├── docs ├── deploying │ ├── debian.md │ ├── nixos.md │ ├── docker-compose.yml │ ├── docker-compose.override.yml │ └── docker-compose.for-traefik.yml ├── deploying.md ├── administration.md ├── introduction.md ├── SUMMARY.md ├── turn.md ├── faq.md ├── administration │ └── media.md ├── appservices.md └── delegation.md ├── .cargo └── config.toml ├── .envrc ├── rustfmt.toml ├── .gitlab ├── route-map.yml ├── CODEOWNERS ├── merge_request_templates │ └── MR.md └── issue_templates │ ├── Feature Request.md │ └── Bug Report.md ├── src ├── api │ ├── mod.rs │ ├── client_server │ │ ├── thirdparty.rs │ │ ├── well_known.rs │ │ ├── openid.rs │ │ ├── capabilities.rs │ │ ├── filter.rs │ │ ├── typing.rs │ │ ├── threads.rs │ │ ├── mod.rs │ │ ├── unversioned.rs │ │ ├── redact.rs │ │ ├── voip.rs │ │ ├── space.rs │ │ ├── report.rs │ │ ├── appservice.rs │ │ ├── relations.rs │ │ ├── presence.rs │ │ ├── user_directory.rs │ │ ├── alias.rs │ │ ├── to_device.rs │ │ └── tag.rs │ ├── ruma_wrapper │ │ └── mod.rs │ └── appservice_server.rs ├── database │ ├── key_value │ │ ├── rooms │ │ │ ├── edus │ │ │ │ └── mod.rs │ │ │ ├── directory.rs │ │ │ ├── outlier.rs │ │ │ ├── mod.rs │ │ │ ├── metadata.rs │ │ │ ├── auth_chain.rs │ │ │ ├── lazy_load.rs │ │ │ ├── state_compressor.rs │ │ │ ├── state.rs │ │ │ ├── threads.rs │ │ │ ├── search.rs │ │ │ ├── alias.rs │ │ │ └── pdu_metadata.rs │ │ ├── mod.rs │ │ ├── transaction_ids.rs │ │ ├── appservice.rs │ │ ├── pusher.rs │ │ └── uiaa.rs │ ├── abstraction │ │ └── watchers.rs │ └── abstraction.rs ├── service │ ├── rooms │ │ ├── edus │ │ │ ├── mod.rs │ │ │ ├── read_receipt │ │ │ │ ├── data.rs │ │ │ │ └── mod.rs │ │ │ └── presence │ │ │ │ └── data.rs │ │ ├── auth_chain │ │ │ └── data.rs │ │ ├── metadata │ │ │ ├── data.rs │ │ │ └── mod.rs │ │ ├── outlier │ │ │ ├── data.rs │ │ │ └── mod.rs │ │ ├── state_compressor │ │ │ └── data.rs │ │ ├── search │ │ │ ├── data.rs │ │ │ └── mod.rs │ │ ├── directory │ │ │ ├── data.rs │ │ │ └── mod.rs │ │ ├── threads │ │ │ └── data.rs │ │ ├── lazy_loading │ │ │ ├── data.rs │ │ │ └── mod.rs │ │ ├── pdu_metadata │ │ │ └── data.rs │ │ ├── alias │ │ │ └── data.rs │ │ ├── user │ │ │ ├── data.rs │ │ │ └── mod.rs │ │ ├── short │ │ │ ├── data.rs │ │ │ └── mod.rs │ │ ├── state │ │ │ └── data.rs │ │ ├── mod.rs │ │ ├── state_accessor │ │ │ └── data.rs │ │ └── timeline │ │ │ └── data.rs │ ├── transaction_ids │ │ ├── data.rs │ │ └── mod.rs │ ├── pusher │ │ └── data.rs │ ├── appservice │ │ └── data.rs │ ├── uiaa │ │ └── data.rs │ ├── sending │ │ └── data.rs │ ├── account_data │ │ ├── data.rs │ │ └── mod.rs │ ├── key_backups │ │ ├── data.rs │ │ └── mod.rs │ └── globals │ │ └── data.rs ├── clap.rs └── lib.rs ├── .gitea └── PULL_REQUEST_TEMPLATE.md ├── typos.toml ├── .editorconfig ├── .vscode ├── extensions.json └── launch.json ├── complement ├── README.md ├── Caddyfile └── Dockerfile ├── .dockerignore ├── debian ├── config ├── postrm ├── templates ├── matrix-conduit.service ├── README.md └── postinst ├── default.nix ├── .github └── ISSUE_TEMPLATE │ └── Issue.md ├── tests ├── test-config.toml └── sytest │ ├── sytest-blacklist │ └── show-expected-fail-tests.sh ├── book.toml ├── rust-toolchain.toml ├── nix ├── pkgs │ ├── book │ │ └── default.nix │ ├── oci-image │ │ └── default.nix │ └── default │ │ ├── default.nix │ │ └── cross-compilation-env.nix └── shell.nix ├── taplo.toml ├── bin ├── complement └── nix-build-and-cache ├── .gitignore ├── engage.toml ├── conduit-example.toml ├── README.md └── flake.nix /docs/deploying/debian.md: -------------------------------------------------------------------------------- 1 | {{#include ../../debian/README.md}} 2 | -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [env] 2 | RUMA_UNSTABLE_EXHAUSTIVE_TYPES = "1" 3 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | use flake 4 | 5 | PATH_add bin 6 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | imports_granularity = "Crate" 2 | unstable_features = true 3 | -------------------------------------------------------------------------------- /docs/deploying.md: -------------------------------------------------------------------------------- 1 | # Deploying 2 | 3 | This chapter describes various ways to deploy Conduit. 4 | -------------------------------------------------------------------------------- /.gitlab/route-map.yml: -------------------------------------------------------------------------------- 1 | # Docs: Map markdown to html files 2 | - source: /docs/(.+)\.md/ 3 | public: '\1.html' -------------------------------------------------------------------------------- /src/api/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod appservice_server; 2 | pub mod client_server; 3 | pub mod ruma_wrapper; 4 | pub mod server_server; 5 | -------------------------------------------------------------------------------- /.gitea/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | - [ ] I agree to release my code and all other changes of this PR under the Apache-2.0 license 2 | -------------------------------------------------------------------------------- /.gitlab/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Nix things 2 | .envrc @CobaltCause 3 | flake.lock @CobaltCause 4 | flake.nix @CobaltCause 5 | nix/ @CobaltCause 6 | -------------------------------------------------------------------------------- /docs/administration.md: -------------------------------------------------------------------------------- 1 | # Administration 2 | 3 | This chapter describes how to perform tasks you may want to do while running Conduit 4 | -------------------------------------------------------------------------------- /typos.toml: -------------------------------------------------------------------------------- 1 | [files] 2 | # From my understanding, they are automatically generated 3 | # Plus, systest hasn't been used in who knows how long 4 | extend-exclude = ["tests/**"] 5 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/edus/mod.rs: -------------------------------------------------------------------------------- 1 | mod presence; 2 | mod read_receipt; 3 | 4 | use crate::{database::KeyValueDatabase, service}; 5 | 6 | impl service::rooms::edus::Data for KeyValueDatabase {} 7 | -------------------------------------------------------------------------------- /src/database/key_value/mod.rs: -------------------------------------------------------------------------------- 1 | mod account_data; 2 | //mod admin; 3 | mod appservice; 4 | mod globals; 5 | mod key_backups; 6 | pub(super) mod media; 7 | //mod pdu; 8 | mod pusher; 9 | mod rooms; 10 | mod sending; 11 | mod transaction_ids; 12 | mod uiaa; 13 | mod users; 14 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: https://EditorConfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | charset = utf-8 7 | end_of_line = lf 8 | tab_width = 4 9 | indent_size = 4 10 | indent_style = space 11 | insert_final_newline = true 12 | max_line_length = 120 13 | 14 | [*.nix] 15 | indent_size = 2 16 | -------------------------------------------------------------------------------- /.gitlab/merge_request_templates/MR.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ----------------------------------------------------------------------------- 5 | 6 | - [ ] I ran `cargo fmt` and `cargo test` 7 | - [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license 8 | 9 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "rust-lang.rust-analyzer", 4 | "bungcip.better-toml", 5 | "ms-azuretools.vscode-docker", 6 | "eamodio.gitlens", 7 | "fill-labs.dependi", 8 | "vadimcn.vscode-lldb", 9 | "timonwong.shellcheck" 10 | ] 11 | } -------------------------------------------------------------------------------- /src/service/rooms/edus/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod presence; 2 | pub mod read_receipt; 3 | pub mod typing; 4 | 5 | pub trait Data: presence::Data + read_receipt::Data + 'static {} 6 | 7 | pub struct Service { 8 | pub presence: presence::Service, 9 | pub read_receipt: read_receipt::Service, 10 | pub typing: typing::Service, 11 | } 12 | -------------------------------------------------------------------------------- /complement/README.md: -------------------------------------------------------------------------------- 1 | # Complement 2 | 3 | ## What's that? 4 | 5 | Have a look at [its repository](https://github.com/matrix-org/complement). 6 | 7 | ## How do I use it with Conduit? 8 | 9 | The script at [`../bin/complement`](../bin/complement) has automation for this. 10 | It takes a few command line arguments, you can read the script to find out what 11 | those are. 12 | -------------------------------------------------------------------------------- /docs/introduction.md: -------------------------------------------------------------------------------- 1 | # Conduit 2 | 3 | {{#include ../README.md:catchphrase}} 4 | 5 | {{#include ../README.md:body}} 6 | 7 | #### How can I deploy my own? 8 | 9 | - [Deployment options](deploying.md) 10 | 11 | If you want to connect an Appservice to Conduit, take a look at the [appservices documentation](appservices.md). 12 | 13 | {{#include ../README.md:footer}} 14 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Local build and dev artifacts 2 | target 3 | tests 4 | 5 | # Docker files 6 | Dockerfile* 7 | docker-compose* 8 | 9 | # IDE files 10 | .vscode 11 | .idea 12 | *.iml 13 | 14 | # Git folder 15 | .git 16 | .gitea 17 | .gitlab 18 | .github 19 | 20 | # Dot files 21 | .env 22 | .gitignore 23 | 24 | # Toml files 25 | rustfmt.toml 26 | 27 | # Documentation 28 | #*.md 29 | -------------------------------------------------------------------------------- /debian/config: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # Source debconf library. 5 | . /usr/share/debconf/confmodule 6 | 7 | # Ask for the Matrix homeserver name, address and port. 8 | db_input high matrix-conduit/hostname || true 9 | db_go 10 | 11 | db_input low matrix-conduit/address || true 12 | db_go 13 | 14 | db_input medium matrix-conduit/port || true 15 | db_go 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /default.nix: -------------------------------------------------------------------------------- 1 | (import 2 | ( 3 | let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in 4 | fetchTarball { 5 | url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; 6 | sha256 = lock.nodes.flake-compat.locked.narHash; 7 | } 8 | ) 9 | { src = ./.; } 10 | ).defaultNix 11 | -------------------------------------------------------------------------------- /.gitlab/issue_templates/Feature Request.md: -------------------------------------------------------------------------------- 1 | 5 | 6 | 7 | ### Is your feature request related to a problem? Please describe. 8 | 9 | 10 | 11 | 12 | ### Describe the solution you'd like 13 | 14 | 15 | 16 | 17 | /label ~conduit 18 | -------------------------------------------------------------------------------- /src/service/rooms/auth_chain/data.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use std::{collections::HashSet, sync::Arc}; 3 | 4 | pub trait Data: Send + Sync { 5 | fn get_cached_eventid_authchain( 6 | &self, 7 | shorteventid: &[u64], 8 | ) -> Result>>>; 9 | fn cache_auth_chain(&self, shorteventid: Vec, auth_chain: Arc>) 10 | -> Result<()>; 11 | } 12 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/Issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Issue with / Feature Request for Conduit" 3 | about: "Please file issues on GitLab: https://gitlab.com/famedly/conduit/-/issues/new" 4 | title: "CLOSE ME" 5 | --- 6 | 7 | 8 | 9 | **⚠️ Conduit development does not happen on GitHub. Issues opened here will not be addressed** 10 | 11 | Please open issues on GitLab: https://gitlab.com/famedly/conduit/-/issues/new 12 | -------------------------------------------------------------------------------- /src/service/rooms/metadata/data.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use ruma::{OwnedRoomId, RoomId}; 3 | 4 | pub trait Data: Send + Sync { 5 | fn exists(&self, room_id: &RoomId) -> Result; 6 | fn iter_ids<'a>(&'a self) -> Box> + 'a>; 7 | fn is_disabled(&self, room_id: &RoomId) -> Result; 8 | fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()>; 9 | } 10 | -------------------------------------------------------------------------------- /src/service/rooms/outlier/data.rs: -------------------------------------------------------------------------------- 1 | use ruma::{CanonicalJsonObject, EventId}; 2 | 3 | use crate::{PduEvent, Result}; 4 | 5 | pub trait Data: Send + Sync { 6 | fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result>; 7 | fn get_outlier_pdu(&self, event_id: &EventId) -> Result>; 8 | fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()>; 9 | } 10 | -------------------------------------------------------------------------------- /docs/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | - [Introduction](introduction.md) 4 | 5 | - [Configuration](configuration.md) 6 | - [Delegation](delegation.md) 7 | - [Deploying](deploying.md) 8 | - [Generic](deploying/generic.md) 9 | - [Debian](deploying/debian.md) 10 | - [Docker](deploying/docker.md) 11 | - [NixOS](deploying/nixos.md) 12 | - [Administration](administration.md) 13 | - [Media](administration/media.md) 14 | - [TURN](turn.md) 15 | - [Appservices](appservices.md) 16 | - [FAQ](faq.md) 17 | -------------------------------------------------------------------------------- /tests/test-config.toml: -------------------------------------------------------------------------------- 1 | [global] 2 | 3 | # Server runs in same container as tests do, so localhost is fine 4 | server_name = "localhost" 5 | 6 | # With a bit of luck /tmp is a RAM disk, so that the file system does not become the bottleneck while testing 7 | database_path = "/tmp" 8 | 9 | # All the other settings are left at their defaults: 10 | address = "127.0.0.1" 11 | allow_registration = true 12 | max_request_size = 20_000_000 13 | port = 6167 14 | proxy = "none" 15 | trusted_servers = ["matrix.org"] 16 | -------------------------------------------------------------------------------- /src/service/rooms/state_compressor/data.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashSet, sync::Arc}; 2 | 3 | use super::CompressedStateEvent; 4 | use crate::Result; 5 | 6 | pub struct StateDiff { 7 | pub parent: Option, 8 | pub added: Arc>, 9 | pub removed: Arc>, 10 | } 11 | 12 | pub trait Data: Send + Sync { 13 | fn get_statediff(&self, shortstatehash: u64) -> Result; 14 | fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()>; 15 | } 16 | -------------------------------------------------------------------------------- /complement/Caddyfile: -------------------------------------------------------------------------------- 1 | { 2 | log default { 3 | level WARN 4 | } 5 | https_port 8448 6 | default_sni your.server.name 7 | local_certs 8 | pki { 9 | ca local { 10 | name "Complement CA" 11 | root { 12 | cert /complement/ca/ca.crt 13 | key /complement/ca/ca.key 14 | } 15 | intermediate { 16 | cert /complement/ca/ca.crt 17 | key /complement/ca/ca.key 18 | } 19 | } 20 | } 21 | } 22 | 23 | your.server.name 24 | 25 | reverse_proxy 127.0.0.1:8008 26 | tls { 27 | issuer internal { 28 | ca local 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/service/transaction_ids/data.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use ruma::{DeviceId, TransactionId, UserId}; 3 | 4 | pub trait Data: Send + Sync { 5 | fn add_txnid( 6 | &self, 7 | user_id: &UserId, 8 | device_id: Option<&DeviceId>, 9 | txn_id: &TransactionId, 10 | data: &[u8], 11 | ) -> Result<()>; 12 | 13 | fn existing_txnid( 14 | &self, 15 | user_id: &UserId, 16 | device_id: Option<&DeviceId>, 17 | txn_id: &TransactionId, 18 | ) -> Result>>; 19 | } 20 | -------------------------------------------------------------------------------- /src/api/client_server/thirdparty.rs: -------------------------------------------------------------------------------- 1 | use crate::{Result, Ruma}; 2 | use ruma::api::client::thirdparty::get_protocols; 3 | 4 | use std::collections::BTreeMap; 5 | 6 | /// # `GET /_matrix/client/r0/thirdparty/protocols` 7 | /// 8 | /// TODO: Fetches all metadata about protocols supported by the homeserver. 9 | pub async fn get_protocols_route( 10 | _body: Ruma, 11 | ) -> Result { 12 | // TODO 13 | Ok(get_protocols::v3::Response { 14 | protocols: BTreeMap::new(), 15 | }) 16 | } 17 | -------------------------------------------------------------------------------- /src/service/rooms/search/data.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use ruma::RoomId; 3 | 4 | pub trait Data: Send + Sync { 5 | fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; 6 | 7 | fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; 8 | 9 | #[allow(clippy::type_complexity)] 10 | fn search_pdus<'a>( 11 | &'a self, 12 | room_id: &RoomId, 13 | search_string: &str, 14 | ) -> Result> + 'a>, Vec)>>; 15 | } 16 | -------------------------------------------------------------------------------- /src/service/pusher/data.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use ruma::{ 3 | api::client::push::{set_pusher, Pusher}, 4 | UserId, 5 | }; 6 | 7 | pub trait Data: Send + Sync { 8 | fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()>; 9 | 10 | fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result>; 11 | 12 | fn get_pushers(&self, sender: &UserId) -> Result>; 13 | 14 | fn get_pushkeys<'a>(&'a self, sender: &UserId) 15 | -> Box> + 'a>; 16 | } 17 | -------------------------------------------------------------------------------- /book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol" 3 | language = "en" 4 | multilingual = false 5 | src = "docs" 6 | title = "Conduit" 7 | 8 | [build] 9 | build-dir = "public" 10 | create-missing = true 11 | 12 | [output.html] 13 | edit-url-template = "https://gitlab.com/famedly/conduit/-/edit/next/{path}" 14 | git-repository-icon = "fa-git-square" 15 | git-repository-url = "https://gitlab.com/famedly/conduit" 16 | 17 | [output.html.search] 18 | limit-results = 15 19 | 20 | [output.html.code.hidelines] 21 | json = "~" 22 | -------------------------------------------------------------------------------- /tests/sytest/sytest-blacklist: -------------------------------------------------------------------------------- 1 | # This test checks for a room-alias key in the response which is not in the spec, we must add it back in whitelist when https://github.com/matrix-org/sytest/pull/880 is merged 2 | POST /createRoom makes a public room 3 | # These fails because they use a endpoint which is not in the spec, we must add them back in whitelist when https://github.com/matrix-org/sytest/issues/878 is closed 4 | POST /createRoom makes a room with a name 5 | POST /createRoom makes a room with a topic 6 | Can /sync newly created room 7 | POST /createRoom ignores attempts to set the room version via creation_content -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | # This is the authoritiative configuration of this project's Rust toolchain. 2 | # 3 | # Other files that need upkeep when this changes: 4 | # 5 | # * `Cargo.toml` 6 | # * `flake.nix` 7 | # 8 | # Search in those files for `rust-toolchain.toml` to find the relevant places. 9 | # If you're having trouble making the relevant changes, bug a maintainer. 10 | 11 | [toolchain] 12 | channel = "1.85.0" 13 | components = [ 14 | # For rust-analyzer 15 | "rust-src", 16 | ] 17 | targets = [ 18 | "aarch64-unknown-linux-musl", 19 | "x86_64-unknown-linux-gnu", 20 | "x86_64-unknown-linux-musl", 21 | ] 22 | -------------------------------------------------------------------------------- /nix/pkgs/book/default.nix: -------------------------------------------------------------------------------- 1 | # Keep sorted 2 | { default 3 | , inputs 4 | , mdbook 5 | , stdenv 6 | }: 7 | 8 | stdenv.mkDerivation { 9 | pname = "${default.pname}-book"; 10 | version = default.version; 11 | 12 | 13 | src = let filter = inputs.nix-filter.lib; in filter { 14 | root = inputs.self; 15 | 16 | # Keep sorted 17 | include = [ 18 | "book.toml" 19 | "conduit-example.toml" 20 | "debian/README.md" 21 | "docs" 22 | "README.md" 23 | ]; 24 | }; 25 | 26 | nativeBuildInputs = [ 27 | mdbook 28 | ]; 29 | 30 | buildPhase = '' 31 | mdbook build 32 | mv public $out 33 | ''; 34 | } 35 | -------------------------------------------------------------------------------- /src/api/client_server/well_known.rs: -------------------------------------------------------------------------------- 1 | use ruma::api::client::discovery::discover_homeserver::{self, HomeserverInfo}; 2 | 3 | use crate::{services, Result, Ruma}; 4 | 5 | /// # `GET /.well-known/matrix/client` 6 | /// 7 | /// Returns the client server discovery information. 8 | pub async fn well_known_client( 9 | _body: Ruma, 10 | ) -> Result { 11 | let client_url = services().globals.well_known_client(); 12 | 13 | Ok(discover_homeserver::Response { 14 | homeserver: HomeserverInfo { 15 | base_url: client_url.clone(), 16 | }, 17 | identity_server: None, 18 | }) 19 | } 20 | -------------------------------------------------------------------------------- /src/service/rooms/directory/data.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use ruma::{OwnedRoomId, RoomId}; 3 | 4 | pub trait Data: Send + Sync { 5 | /// Adds the room to the public room directory 6 | fn set_public(&self, room_id: &RoomId) -> Result<()>; 7 | 8 | /// Removes the room from the public room directory. 9 | fn set_not_public(&self, room_id: &RoomId) -> Result<()>; 10 | 11 | /// Returns true if the room is in the public room directory. 12 | fn is_public_room(&self, room_id: &RoomId) -> Result; 13 | 14 | /// Returns the unsorted public room directory 15 | fn public_rooms<'a>(&'a self) -> Box> + 'a>; 16 | } 17 | -------------------------------------------------------------------------------- /.gitlab/issue_templates/Bug Report.md: -------------------------------------------------------------------------------- 1 | 8 | 9 | ### Description 10 | 11 | 12 | ### System Configuration 13 | 14 | 15 | Conduit Version: 16 | Database backend (default is sqlite): sqlite 17 | 18 | 19 | /label ~conduit 20 | -------------------------------------------------------------------------------- /src/service/rooms/threads/data.rs: -------------------------------------------------------------------------------- 1 | use crate::{PduEvent, Result}; 2 | use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; 3 | 4 | pub trait Data: Send + Sync { 5 | #[allow(clippy::type_complexity)] 6 | fn threads_until<'a>( 7 | &'a self, 8 | user_id: &'a UserId, 9 | room_id: &'a RoomId, 10 | until: u64, 11 | include: &'a IncludeThreads, 12 | ) -> Result> + 'a>>; 13 | 14 | fn update_participants(&self, root_id: &[u8], participants: &[OwnedUserId]) -> Result<()>; 15 | fn get_participants(&self, root_id: &[u8]) -> Result>>; 16 | } 17 | -------------------------------------------------------------------------------- /taplo.toml: -------------------------------------------------------------------------------- 1 | exclude = [".**/*.toml"] 2 | include = ["**/*.toml"] 3 | [formatting] 4 | reorder_arrays = true 5 | reorder_keys = true 6 | 7 | # Prevent breaking command and argument order 8 | [[rule]] 9 | include = ["engage.toml"] 10 | # https://github.com/tamasfe/taplo/issues/608 11 | #keys = ["interpreter"] 12 | 13 | [rule.formatting] 14 | reorder_arrays = false 15 | 16 | # Prevent breaking license file order 17 | [[rule]] 18 | include = ["Cargo.toml"] 19 | # https://github.com/tamasfe/taplo/issues/608 20 | # keys = ["package.metadata.deb.license-file", "package.metadata.deb.assets"] 21 | keys = ["package.metadata.deb", "package.metadata.deb.assets"] 22 | 23 | [rule.formatting] 24 | reorder_arrays = false 25 | -------------------------------------------------------------------------------- /debian/postrm: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | . /usr/share/debconf/confmodule 5 | 6 | CONDUIT_CONFIG_PATH=/etc/matrix-conduit 7 | CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit 8 | 9 | case $1 in 10 | purge) 11 | # Remove debconf changes from the db 12 | db_purge 13 | 14 | # Per https://www.debian.org/doc/debian-policy/ch-files.html#behavior 15 | # "configuration files must be preserved when the package is removed, and 16 | # only deleted when the package is purged." 17 | if [ -d "$CONDUIT_CONFIG_PATH" ]; then 18 | rm -r "$CONDUIT_CONFIG_PATH" 19 | fi 20 | 21 | if [ -d "$CONDUIT_DATABASE_PATH" ]; then 22 | rm -r "$CONDUIT_DATABASE_PATH" 23 | fi 24 | ;; 25 | esac 26 | 27 | #DEBHELPER# 28 | -------------------------------------------------------------------------------- /src/service/rooms/lazy_loading/data.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use ruma::{DeviceId, RoomId, UserId}; 3 | 4 | pub trait Data: Send + Sync { 5 | fn lazy_load_was_sent_before( 6 | &self, 7 | user_id: &UserId, 8 | device_id: &DeviceId, 9 | room_id: &RoomId, 10 | ll_user: &UserId, 11 | ) -> Result; 12 | 13 | fn lazy_load_confirm_delivery( 14 | &self, 15 | user_id: &UserId, 16 | device_id: &DeviceId, 17 | room_id: &RoomId, 18 | confirmed_user_ids: &mut dyn Iterator, 19 | ) -> Result<()>; 20 | 21 | fn lazy_load_reset( 22 | &self, 23 | user_id: &UserId, 24 | device_id: &DeviceId, 25 | room_id: &RoomId, 26 | ) -> Result<()>; 27 | } 28 | -------------------------------------------------------------------------------- /docs/deploying/nixos.md: -------------------------------------------------------------------------------- 1 | # Conduit for NixOS 2 | 3 | Conduit can be acquired by Nix from various places: 4 | 5 | * The `flake.nix` at the root of the repo 6 | * The `default.nix` at the root of the repo 7 | * From Nixpkgs 8 | 9 | The `flake.nix` and `default.nix` do not (currently) provide a NixOS module, so 10 | (for now) [`services.matrix-conduit`][module] from Nixpkgs should be used to 11 | configure Conduit. 12 | 13 | If you want to run the latest code, you should get Conduit from the `flake.nix` 14 | or `default.nix` and set [`services.matrix-conduit.package`][package] 15 | appropriately. 16 | 17 | [module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit 18 | [package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package 19 | -------------------------------------------------------------------------------- /src/service/appservice/data.rs: -------------------------------------------------------------------------------- 1 | use ruma::api::appservice::Registration; 2 | 3 | use crate::Result; 4 | 5 | pub trait Data: Send + Sync { 6 | /// Registers an appservice and returns the ID to the caller 7 | fn register_appservice(&self, yaml: Registration) -> Result; 8 | 9 | /// Remove an appservice registration 10 | /// 11 | /// # Arguments 12 | /// 13 | /// * `service_name` - the name you send to register the service previously 14 | fn unregister_appservice(&self, service_name: &str) -> Result<()>; 15 | 16 | fn get_registration(&self, id: &str) -> Result>; 17 | 18 | fn iter_ids<'a>(&'a self) -> Result> + 'a>>; 19 | 20 | fn all(&self) -> Result>; 21 | } 22 | -------------------------------------------------------------------------------- /src/service/transaction_ids/mod.rs: -------------------------------------------------------------------------------- 1 | mod data; 2 | 3 | pub use data::Data; 4 | 5 | use crate::Result; 6 | use ruma::{DeviceId, TransactionId, UserId}; 7 | 8 | pub struct Service { 9 | pub db: &'static dyn Data, 10 | } 11 | 12 | impl Service { 13 | pub fn add_txnid( 14 | &self, 15 | user_id: &UserId, 16 | device_id: Option<&DeviceId>, 17 | txn_id: &TransactionId, 18 | data: &[u8], 19 | ) -> Result<()> { 20 | self.db.add_txnid(user_id, device_id, txn_id, data) 21 | } 22 | 23 | pub fn existing_txnid( 24 | &self, 25 | user_id: &UserId, 26 | device_id: Option<&DeviceId>, 27 | txn_id: &TransactionId, 28 | ) -> Result>> { 29 | self.db.existing_txnid(user_id, device_id, txn_id) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /debian/templates: -------------------------------------------------------------------------------- 1 | Template: matrix-conduit/hostname 2 | Type: string 3 | Default: localhost 4 | Description: The server (host)name of the Matrix homeserver 5 | This is the hostname the homeserver will be reachable at via a client. 6 | . 7 | If set to "localhost", you can connect with a client locally and clients 8 | from other hosts and also other homeservers will not be able to reach you! 9 | 10 | Template: matrix-conduit/address 11 | Type: string 12 | Default: 127.0.0.1 13 | Description: The listen address of the Matrix homeserver 14 | This is the address the homeserver will listen on. Leave it set to 127.0.0.1 15 | when using a reverse proxy. 16 | 17 | Template: matrix-conduit/port 18 | Type: string 19 | Default: 6167 20 | Description: The port of the Matrix homeserver 21 | This port is most often just accessed by a reverse proxy. 22 | -------------------------------------------------------------------------------- /src/service/rooms/metadata/mod.rs: -------------------------------------------------------------------------------- 1 | mod data; 2 | 3 | pub use data::Data; 4 | use ruma::{OwnedRoomId, RoomId}; 5 | 6 | use crate::Result; 7 | 8 | pub struct Service { 9 | pub db: &'static dyn Data, 10 | } 11 | 12 | impl Service { 13 | /// Checks if a room exists. 14 | #[tracing::instrument(skip(self))] 15 | pub fn exists(&self, room_id: &RoomId) -> Result { 16 | self.db.exists(room_id) 17 | } 18 | 19 | pub fn iter_ids<'a>(&'a self) -> Box> + 'a> { 20 | self.db.iter_ids() 21 | } 22 | 23 | pub fn is_disabled(&self, room_id: &RoomId) -> Result { 24 | self.db.is_disabled(room_id) 25 | } 26 | 27 | pub fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { 28 | self.db.disable_room(room_id, disabled) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/clap.rs: -------------------------------------------------------------------------------- 1 | //! Integration with `clap` 2 | 3 | use clap::Parser; 4 | 5 | /// Returns the current version of the crate with extra info if supplied 6 | /// 7 | /// Set the environment variable `CONDUIT_VERSION_EXTRA` to any UTF-8 string to 8 | /// include it in parenthesis after the SemVer version. A common value are git 9 | /// commit hashes. 10 | fn version() -> String { 11 | let cargo_pkg_version = env!("CARGO_PKG_VERSION"); 12 | 13 | match option_env!("CONDUIT_VERSION_EXTRA") { 14 | Some(x) => format!("{} ({})", cargo_pkg_version, x), 15 | None => cargo_pkg_version.to_owned(), 16 | } 17 | } 18 | 19 | /// Command line arguments 20 | #[derive(Parser)] 21 | #[clap(about, version = version())] 22 | pub struct Args {} 23 | 24 | /// Parse command line arguments into structured data 25 | pub fn parse() -> Args { 26 | Args::parse() 27 | } 28 | -------------------------------------------------------------------------------- /nix/pkgs/oci-image/default.nix: -------------------------------------------------------------------------------- 1 | # Keep sorted 2 | { default 3 | , dockerTools 4 | , lib 5 | , pkgs 6 | }: 7 | let 8 | # See https://github.com/krallin/tini/pull/223 9 | tini = pkgs.tini.overrideAttrs { 10 | patches = [ (pkgs.fetchpatch { 11 | url = "https://patch-diff.githubusercontent.com/raw/krallin/tini/pull/223.patch"; 12 | hash = "sha256-i6xcf+qpjD+7ZQY3ueiDaxO4+UA2LutLCZLNmT+ji1s="; 13 | }) 14 | ]; 15 | }; 16 | in 17 | dockerTools.buildImage { 18 | name = default.pname; 19 | tag = "next"; 20 | copyToRoot = [ 21 | dockerTools.caCertificates 22 | ]; 23 | config = { 24 | # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) 25 | # are handled as expected 26 | Entrypoint = [ 27 | "${lib.getExe' tini "tini"}" 28 | "--" 29 | ]; 30 | Cmd = [ 31 | "${lib.getExe default}" 32 | ]; 33 | }; 34 | } 35 | -------------------------------------------------------------------------------- /src/service/rooms/pdu_metadata/data.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::{service::rooms::timeline::PduCount, PduEvent, Result}; 4 | use ruma::{EventId, RoomId, UserId}; 5 | 6 | pub trait Data: Send + Sync { 7 | fn add_relation(&self, from: u64, to: u64) -> Result<()>; 8 | #[allow(clippy::type_complexity)] 9 | fn relations_until<'a>( 10 | &'a self, 11 | user_id: &'a UserId, 12 | room_id: u64, 13 | target: u64, 14 | until: PduCount, 15 | ) -> Result> + 'a>>; 16 | fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; 17 | fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; 18 | fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()>; 19 | fn is_event_soft_failed(&self, event_id: &EventId) -> Result; 20 | } 21 | -------------------------------------------------------------------------------- /src/service/rooms/directory/mod.rs: -------------------------------------------------------------------------------- 1 | mod data; 2 | 3 | pub use data::Data; 4 | use ruma::{OwnedRoomId, RoomId}; 5 | 6 | use crate::Result; 7 | 8 | pub struct Service { 9 | pub db: &'static dyn Data, 10 | } 11 | 12 | impl Service { 13 | #[tracing::instrument(skip(self))] 14 | pub fn set_public(&self, room_id: &RoomId) -> Result<()> { 15 | self.db.set_public(room_id) 16 | } 17 | 18 | #[tracing::instrument(skip(self))] 19 | pub fn set_not_public(&self, room_id: &RoomId) -> Result<()> { 20 | self.db.set_not_public(room_id) 21 | } 22 | 23 | #[tracing::instrument(skip(self))] 24 | pub fn is_public_room(&self, room_id: &RoomId) -> Result { 25 | self.db.is_public_room(room_id) 26 | } 27 | 28 | #[tracing::instrument(skip(self))] 29 | pub fn public_rooms(&self) -> impl Iterator> + '_ { 30 | self.db.public_rooms() 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/service/rooms/outlier/mod.rs: -------------------------------------------------------------------------------- 1 | mod data; 2 | 3 | pub use data::Data; 4 | use ruma::{CanonicalJsonObject, EventId}; 5 | 6 | use crate::{PduEvent, Result}; 7 | 8 | pub struct Service { 9 | pub db: &'static dyn Data, 10 | } 11 | 12 | impl Service { 13 | /// Returns the pdu from the outlier tree. 14 | pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { 15 | self.db.get_outlier_pdu_json(event_id) 16 | } 17 | 18 | /// Returns the pdu from the outlier tree. 19 | pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { 20 | self.db.get_outlier_pdu(event_id) 21 | } 22 | 23 | /// Append the PDU as an outlier. 24 | #[tracing::instrument(skip(self, pdu))] 25 | pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { 26 | self.db.add_pdu_outlier(event_id, pdu) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /docs/turn.md: -------------------------------------------------------------------------------- 1 | # Setting up TURN/STUN 2 | 3 | ## General instructions 4 | 5 | * It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/element-hq/synapse/blob/develop/docs/turn-howto.md). 6 | 7 | ## Edit/Add a few settings to your existing conduit.toml 8 | 9 | ``` 10 | [global.turn] 11 | # Refer to your Coturn settings. 12 | # `your.turn.url` has to match the REALM setting of your Coturn as well as `transport`. 13 | uris = ["turn:your.turn.url?transport=udp", "turn:your.turn.url?transport=tcp"] 14 | 15 | # static-auth-secret of your turnserver 16 | secret = "ADD SECRET HERE" 17 | 18 | # If you have your TURN server configured to use a username and password 19 | # you can provide these information too. In this case comment out `turn_secret above`! 20 | #username = "" 21 | #password = "" 22 | ``` 23 | 24 | ## Apply settings 25 | 26 | Restart Conduit. 27 | -------------------------------------------------------------------------------- /src/api/client_server/openid.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use ruma::{api::client::account, authentication::TokenType}; 4 | 5 | use crate::{services, Result, Ruma}; 6 | 7 | /// # `POST /_matrix/client/r0/user/{userId}/openid/request_token` 8 | /// 9 | /// Request an OpenID token to verify identity with third-party services. 10 | /// 11 | /// - The token generated is only valid for the OpenID API. 12 | pub async fn create_openid_token_route( 13 | body: Ruma, 14 | ) -> Result { 15 | let (access_token, expires_in) = services().users.create_openid_token(&body.user_id)?; 16 | 17 | Ok(account::request_openid_token::v3::Response { 18 | access_token, 19 | token_type: TokenType::Bearer, 20 | matrix_server_name: services().globals.server_name().to_owned(), 21 | expires_in: Duration::from_secs(expires_in), 22 | }) 23 | } 24 | -------------------------------------------------------------------------------- /src/service/uiaa/data.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use ruma::{api::client::uiaa::UiaaInfo, CanonicalJsonValue, DeviceId, UserId}; 3 | 4 | pub trait Data: Send + Sync { 5 | fn set_uiaa_request( 6 | &self, 7 | user_id: &UserId, 8 | device_id: &DeviceId, 9 | session: &str, 10 | request: &CanonicalJsonValue, 11 | ) -> Result<()>; 12 | 13 | fn get_uiaa_request( 14 | &self, 15 | user_id: &UserId, 16 | device_id: &DeviceId, 17 | session: &str, 18 | ) -> Option; 19 | 20 | fn update_uiaa_session( 21 | &self, 22 | user_id: &UserId, 23 | device_id: &DeviceId, 24 | session: &str, 25 | uiaainfo: Option<&UiaaInfo>, 26 | ) -> Result<()>; 27 | 28 | fn get_uiaa_session( 29 | &self, 30 | user_id: &UserId, 31 | device_id: &DeviceId, 32 | session: &str, 33 | ) -> Result; 34 | } 35 | -------------------------------------------------------------------------------- /src/service/rooms/search/mod.rs: -------------------------------------------------------------------------------- 1 | mod data; 2 | 3 | pub use data::Data; 4 | 5 | use crate::Result; 6 | use ruma::RoomId; 7 | 8 | pub struct Service { 9 | pub db: &'static dyn Data, 10 | } 11 | 12 | impl Service { 13 | #[tracing::instrument(skip(self))] 14 | pub fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { 15 | self.db.index_pdu(shortroomid, pdu_id, message_body) 16 | } 17 | 18 | #[tracing::instrument(skip(self))] 19 | pub fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { 20 | self.db.deindex_pdu(shortroomid, pdu_id, message_body) 21 | } 22 | 23 | #[tracing::instrument(skip(self))] 24 | pub fn search_pdus<'a>( 25 | &'a self, 26 | room_id: &RoomId, 27 | search_string: &str, 28 | ) -> Result> + 'a, Vec)>> { 29 | self.db.search_pdus(room_id, search_string) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /bin/complement: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # Path to Complement's source code 6 | COMPLEMENT_SRC="$1" 7 | 8 | # A `.jsonl` file to write test logs to 9 | LOG_FILE="$2" 10 | 11 | # A `.jsonl` file to write test results to 12 | RESULTS_FILE="$3" 13 | 14 | OCI_IMAGE="complement-conduit:dev" 15 | 16 | env \ 17 | -C "$(git rev-parse --show-toplevel)" \ 18 | docker build \ 19 | --tag "$OCI_IMAGE" \ 20 | --file complement/Dockerfile \ 21 | . 22 | 23 | # It's okay (likely, even) that `go test` exits nonzero 24 | set +o pipefail 25 | env \ 26 | -C "$COMPLEMENT_SRC" \ 27 | COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ 28 | go test -json ./tests | tee "$LOG_FILE" 29 | set -o pipefail 30 | 31 | # Post-process the results into an easy-to-compare format 32 | cat "$LOG_FILE" | jq -c ' 33 | select( 34 | (.Action == "pass" or .Action == "fail" or .Action == "skip") 35 | and .Test != null 36 | ) | {Test: .Test, Action: .Action} 37 | ' | sort > "$RESULTS_FILE" 38 | -------------------------------------------------------------------------------- /src/service/rooms/alias/data.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use ruma::{OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, UserId}; 3 | 4 | pub trait Data: Send + Sync { 5 | /// Creates or updates the alias to the given room id. 6 | fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()>; 7 | 8 | /// Finds the user who assigned the given alias to a room 9 | fn who_created_alias(&self, alias: &RoomAliasId) -> Result>; 10 | 11 | /// Forgets about an alias. Returns an error if the alias did not exist. 12 | fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>; 13 | 14 | /// Looks up the roomid for the given alias. 15 | fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>; 16 | 17 | /// Returns all local aliases that point to the given room 18 | fn local_aliases_for_room<'a>( 19 | &'a self, 20 | room_id: &RoomId, 21 | ) -> Box> + 'a>; 22 | } 23 | -------------------------------------------------------------------------------- /src/service/rooms/user/data.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; 3 | 4 | pub trait Data: Send + Sync { 5 | fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; 6 | 7 | fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; 8 | 9 | fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; 10 | 11 | // Returns the count at which the last reset_notification_counts was called 12 | fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result; 13 | 14 | fn associate_token_shortstatehash( 15 | &self, 16 | room_id: &RoomId, 17 | token: u64, 18 | shortstatehash: u64, 19 | ) -> Result<()>; 20 | 21 | fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result>; 22 | 23 | fn get_shared_rooms<'a>( 24 | &'a self, 25 | users: Vec, 26 | ) -> Result> + 'a>>; 27 | } 28 | -------------------------------------------------------------------------------- /src/service/rooms/short/data.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::Result; 4 | use ruma::{events::StateEventType, EventId, RoomId}; 5 | 6 | pub trait Data: Send + Sync { 7 | fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result; 8 | 9 | fn get_shortstatekey( 10 | &self, 11 | event_type: &StateEventType, 12 | state_key: &str, 13 | ) -> Result>; 14 | 15 | fn get_or_create_shortstatekey( 16 | &self, 17 | event_type: &StateEventType, 18 | state_key: &str, 19 | ) -> Result; 20 | 21 | fn get_eventid_from_short(&self, shorteventid: u64) -> Result>; 22 | 23 | fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)>; 24 | 25 | /// Returns (shortstatehash, already_existed) 26 | fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)>; 27 | 28 | fn get_shortroomid(&self, room_id: &RoomId) -> Result>; 29 | 30 | fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result; 31 | } 32 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/directory.rs: -------------------------------------------------------------------------------- 1 | use ruma::{OwnedRoomId, RoomId}; 2 | 3 | use crate::{database::KeyValueDatabase, service, utils, Error, Result}; 4 | 5 | impl service::rooms::directory::Data for KeyValueDatabase { 6 | fn set_public(&self, room_id: &RoomId) -> Result<()> { 7 | self.publicroomids.insert(room_id.as_bytes(), &[]) 8 | } 9 | 10 | fn set_not_public(&self, room_id: &RoomId) -> Result<()> { 11 | self.publicroomids.remove(room_id.as_bytes()) 12 | } 13 | 14 | fn is_public_room(&self, room_id: &RoomId) -> Result { 15 | Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) 16 | } 17 | 18 | fn public_rooms<'a>(&'a self) -> Box> + 'a> { 19 | Box::new(self.publicroomids.iter().map(|(bytes, _)| { 20 | RoomId::parse( 21 | utils::string_from_bytes(&bytes).map_err(|_| { 22 | Error::bad_database("Room ID in publicroomids is invalid unicode.") 23 | })?, 24 | ) 25 | .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) 26 | })) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /debian/matrix-conduit.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Conduit Matrix homeserver 3 | After=network.target 4 | 5 | [Service] 6 | DynamicUser=yes 7 | User=_matrix-conduit 8 | Group=_matrix-conduit 9 | Type=simple 10 | 11 | AmbientCapabilities= 12 | CapabilityBoundingSet= 13 | LockPersonality=yes 14 | MemoryDenyWriteExecute=yes 15 | NoNewPrivileges=yes 16 | ProtectClock=yes 17 | ProtectControlGroups=yes 18 | ProtectHome=yes 19 | ProtectHostname=yes 20 | ProtectKernelLogs=yes 21 | ProtectKernelModules=yes 22 | ProtectKernelTunables=yes 23 | ProtectSystem=strict 24 | PrivateDevices=yes 25 | PrivateMounts=yes 26 | PrivateTmp=yes 27 | PrivateUsers=yes 28 | RemoveIPC=yes 29 | RestrictAddressFamilies=AF_INET AF_INET6 30 | RestrictNamespaces=yes 31 | RestrictRealtime=yes 32 | RestrictSUIDSGID=yes 33 | SystemCallArchitectures=native 34 | SystemCallFilter=@system-service 35 | SystemCallErrorNumber=EPERM 36 | StateDirectory=matrix-conduit 37 | 38 | Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml" 39 | 40 | ExecStart=/usr/sbin/matrix-conduit 41 | Restart=on-failure 42 | RestartSec=10 43 | StartLimitInterval=1m 44 | StartLimitBurst=5 45 | 46 | [Install] 47 | WantedBy=multi-user.target 48 | -------------------------------------------------------------------------------- /src/api/ruma_wrapper/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{service::appservice::RegistrationInfo, Error}; 2 | use ruma::{ 3 | api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, 4 | OwnedUserId, 5 | }; 6 | use std::ops::Deref; 7 | 8 | #[cfg(feature = "conduit_bin")] 9 | mod axum; 10 | 11 | /// Extractor for Ruma request structs 12 | pub struct Ruma { 13 | pub body: T, 14 | pub sender_user: Option, 15 | pub sender_device: Option, 16 | pub sender_servername: Option, 17 | // This is None when body is not a valid string 18 | pub json_body: Option, 19 | pub appservice_info: Option, 20 | } 21 | 22 | impl Deref for Ruma { 23 | type Target = T; 24 | 25 | fn deref(&self) -> &Self::Target { 26 | &self.body 27 | } 28 | } 29 | 30 | #[derive(Clone)] 31 | pub struct RumaResponse(pub T); 32 | 33 | impl From for RumaResponse { 34 | fn from(t: T) -> Self { 35 | Self(t) 36 | } 37 | } 38 | 39 | impl From for RumaResponse { 40 | fn from(t: Error) -> Self { 41 | t.to_response() 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/outlier.rs: -------------------------------------------------------------------------------- 1 | use ruma::{CanonicalJsonObject, EventId}; 2 | 3 | use crate::{database::KeyValueDatabase, service, Error, PduEvent, Result}; 4 | 5 | impl service::rooms::outlier::Data for KeyValueDatabase { 6 | fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { 7 | self.eventid_outlierpdu 8 | .get(event_id.as_bytes())? 9 | .map_or(Ok(None), |pdu| { 10 | serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) 11 | }) 12 | } 13 | 14 | fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { 15 | self.eventid_outlierpdu 16 | .get(event_id.as_bytes())? 17 | .map_or(Ok(None), |pdu| { 18 | serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) 19 | }) 20 | } 21 | 22 | fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { 23 | self.eventid_outlierpdu.insert( 24 | event_id.as_bytes(), 25 | &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), 26 | ) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/api/client_server/capabilities.rs: -------------------------------------------------------------------------------- 1 | use crate::{services, Result, Ruma}; 2 | use ruma::api::client::discovery::get_capabilities::{ 3 | self, 4 | v3::{Capabilities, RoomVersionStability, RoomVersionsCapability}, 5 | }; 6 | use std::collections::BTreeMap; 7 | 8 | /// # `GET /_matrix/client/r0/capabilities` 9 | /// 10 | /// Get information on the supported feature set and other relevant capabilities of this server. 11 | pub async fn get_capabilities_route( 12 | _body: Ruma, 13 | ) -> Result { 14 | let mut available = BTreeMap::new(); 15 | for room_version in &services().globals.unstable_room_versions { 16 | available.insert(room_version.clone(), RoomVersionStability::Unstable); 17 | } 18 | for room_version in &services().globals.stable_room_versions { 19 | available.insert(room_version.clone(), RoomVersionStability::Stable); 20 | } 21 | 22 | let mut capabilities = Capabilities::new(); 23 | capabilities.room_versions = RoomVersionsCapability { 24 | default: services().globals.default_room_version(), 25 | available, 26 | }; 27 | 28 | Ok(get_capabilities::v3::Response { capabilities }) 29 | } 30 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "type": "lldb", 9 | "request": "launch", 10 | "name": "Debug conduit", 11 | "sourceLanguages": ["rust"], 12 | "cargo": { 13 | "args": [ 14 | "build", 15 | "--bin=conduit", 16 | "--package=conduit" 17 | ], 18 | "filter": { 19 | "name": "conduit", 20 | "kind": "bin" 21 | } 22 | }, 23 | "args": [], 24 | "env": { 25 | "RUST_BACKTRACE": "1", 26 | "CONDUIT_CONFIG": "", 27 | "CONDUIT_SERVER_NAME": "localhost", 28 | "CONDUIT_DATABASE_PATH": "/tmp", 29 | "CONDUIT_ADDRESS": "0.0.0.0", 30 | "CONDUIT_PORT": "6167" 31 | }, 32 | "cwd": "${workspaceFolder}" 33 | } 34 | ] 35 | } -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod api; 2 | pub mod clap; 3 | mod config; 4 | mod database; 5 | mod service; 6 | mod utils; 7 | 8 | // Not async due to services() being used in many closures, and async closures are not stable as of writing 9 | // This is the case for every other occurrence of sync Mutex/RwLock, except for database related ones, where 10 | // the current maintainer (Timo) has asked to not modify those 11 | use std::{ 12 | collections::BTreeSet, 13 | sync::{LazyLock, RwLock}, 14 | }; 15 | 16 | pub use api::ruma_wrapper::{Ruma, RumaResponse}; 17 | pub use config::Config; 18 | pub use database::KeyValueDatabase; 19 | use ruma::api::{MatrixVersion, SupportedVersions}; 20 | pub use service::{pdu::PduEvent, Services}; 21 | pub use utils::error::{Error, Result}; 22 | 23 | pub static SERVICES: RwLock> = RwLock::new(None); 24 | pub static SUPPORTED_VERSIONS: LazyLock = LazyLock::new(|| SupportedVersions { 25 | versions: BTreeSet::from_iter([MatrixVersion::V1_13]), 26 | features: BTreeSet::new(), 27 | }); 28 | 29 | pub fn services() -> &'static Services { 30 | SERVICES 31 | .read() 32 | .unwrap() 33 | .expect("SERVICES should be initialized when this is called") 34 | } 35 | -------------------------------------------------------------------------------- /bin/nix-build-and-cache: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # Build the installable and forward any other arguments too. Also, use 6 | # nix-output-monitor instead if it's available. 7 | if command -v nom &> /dev/null; then 8 | nom build "$@" 9 | else 10 | nix build "$@" 11 | fi 12 | 13 | if [ ! -z ${ATTIC_TOKEN+x} ]; then 14 | nix run --inputs-from . attic -- \ 15 | login \ 16 | conduit \ 17 | "${ATTIC_ENDPOINT:-https://attic.conduit.rs/conduit}" \ 18 | "$ATTIC_TOKEN" 19 | 20 | readarray -t derivations < <(nix path-info "$@" --derivation) 21 | for derivation in "${derivations[@]}"; do 22 | cache+=( 23 | "$(nix-store --query --requisites --include-outputs "$derivation")" 24 | ) 25 | done 26 | 27 | # Upload them to Attic 28 | # 29 | # Use `xargs` and a here-string because something would probably explode if 30 | # several thousand arguments got passed to a command at once. Hopefully no 31 | # store paths include a newline in them. 32 | ( 33 | IFS=$'\n' 34 | nix shell --inputs-from . attic -c xargs \ 35 | attic push conduit <<< "${cache[*]}" 36 | ) 37 | 38 | else 39 | echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache" 40 | fi 41 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/mod.rs: -------------------------------------------------------------------------------- 1 | mod alias; 2 | mod auth_chain; 3 | mod directory; 4 | mod edus; 5 | mod lazy_load; 6 | mod metadata; 7 | mod outlier; 8 | mod pdu_metadata; 9 | mod search; 10 | mod short; 11 | mod state; 12 | mod state_accessor; 13 | mod state_cache; 14 | mod state_compressor; 15 | mod threads; 16 | mod timeline; 17 | mod user; 18 | 19 | use ruma::{RoomId, UserId}; 20 | 21 | use crate::{database::KeyValueDatabase, service}; 22 | 23 | impl service::rooms::Data for KeyValueDatabase {} 24 | 25 | /// Constructs roomuser_id and userroom_id respectively in byte form 26 | fn get_room_and_user_byte_ids(room_id: &RoomId, user_id: &UserId) -> (Vec, Vec) { 27 | ( 28 | get_roomuser_id_bytes(room_id, user_id), 29 | get_userroom_id_bytes(user_id, room_id), 30 | ) 31 | } 32 | 33 | fn get_roomuser_id_bytes(room_id: &RoomId, user_id: &UserId) -> Vec { 34 | let mut roomuser_id = room_id.as_bytes().to_vec(); 35 | roomuser_id.push(0xff); 36 | roomuser_id.extend_from_slice(user_id.as_bytes()); 37 | roomuser_id 38 | } 39 | 40 | fn get_userroom_id_bytes(user_id: &UserId, room_id: &RoomId) -> Vec { 41 | let mut userroom_id = user_id.as_bytes().to_vec(); 42 | userroom_id.push(0xff); 43 | userroom_id.extend_from_slice(room_id.as_bytes()); 44 | userroom_id 45 | } 46 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # CMake 2 | cmake-build-*/ 3 | 4 | # IntelliJ 5 | .idea/ 6 | out/ 7 | *.iml 8 | modules.xml 9 | *.ipr 10 | 11 | # mpeltonen/sbt-idea plugin 12 | .idea_modules/ 13 | 14 | # Linux backup files 15 | *~ 16 | 17 | # temporary files which can be created if a process still has a handle open of a deleted file 18 | .fuse_hidden* 19 | 20 | # KDE directory preferences 21 | .directory 22 | 23 | # Linux trash folder which might appear on any partition or disk 24 | .Trash-* 25 | 26 | # .nfs files are created when an open file is removed but is still being accessed 27 | .nfs* 28 | 29 | # Rust 30 | /target/ 31 | 32 | ### vscode ### 33 | .vscode/* 34 | !.vscode/tasks.json 35 | !.vscode/launch.json 36 | !.vscode/extensions.json 37 | *.code-workspace 38 | 39 | ### Windows ### 40 | # Windows thumbnail cache files 41 | Thumbs.db 42 | Thumbs.db:encryptable 43 | ehthumbs.db 44 | ehthumbs_vista.db 45 | 46 | # Dump file 47 | *.stackdump 48 | 49 | # Folder config file 50 | [Dd]esktop.ini 51 | 52 | # Recycle Bin used on file shares 53 | $RECYCLE.BIN/ 54 | 55 | # Windows shortcuts 56 | *.lnk 57 | 58 | # Conduit 59 | conduit.toml 60 | conduit.db 61 | 62 | # Etc. 63 | **/*.rs.bk 64 | cached_target 65 | 66 | # Nix artifacts 67 | /result* 68 | 69 | # Direnv cache 70 | /.direnv 71 | 72 | # Gitlab CI cache 73 | /.gitlab-ci.d 74 | 75 | # mdbook output 76 | public/ -------------------------------------------------------------------------------- /src/api/client_server/filter.rs: -------------------------------------------------------------------------------- 1 | use crate::{services, Error, Result, Ruma}; 2 | use ruma::api::client::{ 3 | error::ErrorKind, 4 | filter::{create_filter, get_filter}, 5 | }; 6 | 7 | /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` 8 | /// 9 | /// Loads a filter that was previously created. 10 | /// 11 | /// - A user can only access their own filters 12 | pub async fn get_filter_route( 13 | body: Ruma, 14 | ) -> Result { 15 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 16 | let filter = match services().users.get_filter(sender_user, &body.filter_id)? { 17 | Some(filter) => filter, 18 | None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")), 19 | }; 20 | 21 | Ok(get_filter::v3::Response::new(filter)) 22 | } 23 | 24 | /// # `PUT /_matrix/client/r0/user/{userId}/filter` 25 | /// 26 | /// Creates a new filter to be used by other endpoints. 27 | pub async fn create_filter_route( 28 | body: Ruma, 29 | ) -> Result { 30 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 31 | Ok(create_filter::v3::Response::new( 32 | services().users.create_filter(sender_user, &body.filter)?, 33 | )) 34 | } 35 | -------------------------------------------------------------------------------- /src/database/key_value/transaction_ids.rs: -------------------------------------------------------------------------------- 1 | use ruma::{DeviceId, TransactionId, UserId}; 2 | 3 | use crate::{database::KeyValueDatabase, service, Result}; 4 | 5 | impl service::transaction_ids::Data for KeyValueDatabase { 6 | fn add_txnid( 7 | &self, 8 | user_id: &UserId, 9 | device_id: Option<&DeviceId>, 10 | txn_id: &TransactionId, 11 | data: &[u8], 12 | ) -> Result<()> { 13 | let mut key = user_id.as_bytes().to_vec(); 14 | key.push(0xff); 15 | key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); 16 | key.push(0xff); 17 | key.extend_from_slice(txn_id.as_bytes()); 18 | 19 | self.userdevicetxnid_response.insert(&key, data)?; 20 | 21 | Ok(()) 22 | } 23 | 24 | fn existing_txnid( 25 | &self, 26 | user_id: &UserId, 27 | device_id: Option<&DeviceId>, 28 | txn_id: &TransactionId, 29 | ) -> Result>> { 30 | let mut key = user_id.as_bytes().to_vec(); 31 | key.push(0xff); 32 | key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); 33 | key.push(0xff); 34 | key.extend_from_slice(txn_id.as_bytes()); 35 | 36 | // If there's no entry, this is a new transaction 37 | self.userdevicetxnid_response.get(&key) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/service/rooms/state/data.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use ruma::{EventId, OwnedEventId, RoomId}; 3 | use std::{collections::HashSet, sync::Arc}; 4 | use tokio::sync::MutexGuard; 5 | 6 | pub trait Data: Send + Sync { 7 | /// Returns the last state hash key added to the db for the given room. 8 | fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result>; 9 | 10 | /// Set the state hash to a new version, but does not update state_cache. 11 | fn set_room_state( 12 | &self, 13 | room_id: &RoomId, 14 | new_shortstatehash: u64, 15 | _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex 16 | ) -> Result<()>; 17 | 18 | /// Associates a state with an event. 19 | fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()>; 20 | 21 | /// Returns all events we would send as the prev_events of the next event. 22 | fn get_forward_extremities(&self, room_id: &RoomId) -> Result>>; 23 | 24 | /// Replace the forward extremities of the room. 25 | fn set_forward_extremities( 26 | &self, 27 | room_id: &RoomId, 28 | event_ids: Vec, 29 | _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex 30 | ) -> Result<()>; 31 | } 32 | -------------------------------------------------------------------------------- /src/service/sending/data.rs: -------------------------------------------------------------------------------- 1 | use ruma::ServerName; 2 | 3 | use crate::Result; 4 | 5 | use super::{OutgoingKind, SendingEventType}; 6 | 7 | pub trait Data: Send + Sync { 8 | #[allow(clippy::type_complexity)] 9 | fn active_requests<'a>( 10 | &'a self, 11 | ) -> Box, OutgoingKind, SendingEventType)>> + 'a>; 12 | fn active_requests_for<'a>( 13 | &'a self, 14 | outgoing_kind: &OutgoingKind, 15 | ) -> Box, SendingEventType)>> + 'a>; 16 | fn delete_active_request(&self, key: Vec) -> Result<()>; 17 | fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()>; 18 | fn delete_all_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()>; 19 | fn queue_requests( 20 | &self, 21 | requests: &[(&OutgoingKind, SendingEventType)], 22 | ) -> Result>>; 23 | fn queued_requests<'a>( 24 | &'a self, 25 | outgoing_kind: &OutgoingKind, 26 | ) -> Box)>> + 'a>; 27 | fn mark_as_active(&self, events: &[(SendingEventType, Vec)]) -> Result<()>; 28 | fn set_latest_educount(&self, server_name: &ServerName, educount: u64) -> Result<()>; 29 | fn get_latest_educount(&self, server_name: &ServerName) -> Result; 30 | } 31 | -------------------------------------------------------------------------------- /src/service/rooms/edus/read_receipt/data.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; 3 | 4 | pub trait Data: Send + Sync { 5 | /// Replaces the previous read receipt. 6 | fn readreceipt_update( 7 | &self, 8 | user_id: &UserId, 9 | room_id: &RoomId, 10 | event: ReceiptEvent, 11 | ) -> Result<()>; 12 | 13 | /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. 14 | #[allow(clippy::type_complexity)] 15 | fn readreceipts_since<'a>( 16 | &'a self, 17 | room_id: &RoomId, 18 | since: u64, 19 | ) -> Box< 20 | dyn Iterator< 21 | Item = Result<( 22 | OwnedUserId, 23 | u64, 24 | Raw, 25 | )>, 26 | > + 'a, 27 | >; 28 | 29 | /// Sets a private read marker at `count`. 30 | fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()>; 31 | 32 | /// Returns the private read marker. 33 | fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; 34 | 35 | /// Returns the count of the last typing update in this room. 36 | fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result; 37 | } 38 | -------------------------------------------------------------------------------- /src/service/account_data/data.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use crate::Result; 4 | use ruma::{ 5 | events::{AnyGlobalAccountDataEvent, AnyRoomAccountDataEvent, RoomAccountDataEventType}, 6 | serde::Raw, 7 | RoomId, UserId, 8 | }; 9 | 10 | pub trait Data: Send + Sync { 11 | /// Places one event in the account data of the user and removes the previous entry. 12 | fn update( 13 | &self, 14 | room_id: Option<&RoomId>, 15 | user_id: &UserId, 16 | event_type: RoomAccountDataEventType, 17 | data: &serde_json::Value, 18 | ) -> Result<()>; 19 | 20 | /// Searches the account data for a specific kind. 21 | fn get( 22 | &self, 23 | room_id: Option<&RoomId>, 24 | user_id: &UserId, 25 | kind: RoomAccountDataEventType, 26 | ) -> Result>>; 27 | 28 | /// Returns all changes to the global account data that happened after `since`. 29 | fn global_changes_since( 30 | &self, 31 | user_id: &UserId, 32 | since: u64, 33 | ) -> Result>>; 34 | 35 | /// Returns all changes to the room account data that happened after `since`. 36 | fn room_changes_since( 37 | &self, 38 | room_id: &RoomId, 39 | user_id: &UserId, 40 | since: u64, 41 | ) -> Result>>; 42 | } 43 | -------------------------------------------------------------------------------- /src/api/client_server/typing.rs: -------------------------------------------------------------------------------- 1 | use crate::{services, utils, Error, Result, Ruma}; 2 | use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; 3 | 4 | /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` 5 | /// 6 | /// Sets the typing state of the sender user. 7 | pub async fn create_typing_event_route( 8 | body: Ruma, 9 | ) -> Result { 10 | use create_typing_event::v3::Typing; 11 | 12 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 13 | 14 | if !services() 15 | .rooms 16 | .state_cache 17 | .is_joined(sender_user, &body.room_id)? 18 | { 19 | return Err(Error::BadRequest( 20 | ErrorKind::forbidden(), 21 | "You are not in this room.", 22 | )); 23 | } 24 | 25 | if let Typing::Yes(duration) = body.state { 26 | services() 27 | .rooms 28 | .edus 29 | .typing 30 | .typing_add( 31 | sender_user, 32 | &body.room_id, 33 | duration.as_millis() as u64 + utils::millis_since_unix_epoch(), 34 | ) 35 | .await?; 36 | } else { 37 | services() 38 | .rooms 39 | .edus 40 | .typing 41 | .typing_remove(sender_user, &body.room_id) 42 | .await?; 43 | } 44 | 45 | Ok(create_typing_event::v3::Response {}) 46 | } 47 | -------------------------------------------------------------------------------- /src/service/rooms/edus/presence/data.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use crate::Result; 4 | use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; 5 | 6 | pub trait Data: Send + Sync { 7 | /// Adds a presence event which will be saved until a new event replaces it. 8 | /// 9 | /// Note: This method takes a RoomId because presence updates are always bound to rooms to 10 | /// make sure users outside these rooms can't see them. 11 | fn update_presence( 12 | &self, 13 | user_id: &UserId, 14 | room_id: &RoomId, 15 | presence: PresenceEvent, 16 | ) -> Result<()>; 17 | 18 | /// Resets the presence timeout, so the user will stay in their current presence state. 19 | fn ping_presence(&self, user_id: &UserId) -> Result<()>; 20 | 21 | /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. 22 | fn last_presence_update(&self, user_id: &UserId) -> Result>; 23 | 24 | /// Returns the presence event with correct last_active_ago. 25 | fn get_presence_event( 26 | &self, 27 | room_id: &RoomId, 28 | user_id: &UserId, 29 | count: u64, 30 | ) -> Result>; 31 | 32 | /// Returns the most recent presence updates that happened after the event with id `since`. 33 | fn presence_since( 34 | &self, 35 | room_id: &RoomId, 36 | since: u64, 37 | ) -> Result>; 38 | } 39 | -------------------------------------------------------------------------------- /nix/shell.nix: -------------------------------------------------------------------------------- 1 | # Keep sorted 2 | { cargo-deb 3 | , default 4 | , engage 5 | , go 6 | , inputs 7 | , jq 8 | , lychee 9 | , mdbook 10 | , mkShell 11 | , olm 12 | , system 13 | , taplo 14 | , toolchain 15 | , typos 16 | }: 17 | 18 | mkShell { 19 | env = default.env // { 20 | # Rust Analyzer needs to be able to find the path to default crate 21 | # sources, and it can read this environment variable to do so. The 22 | # `rust-src` component is required in order for this to work. 23 | RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library"; 24 | }; 25 | 26 | # Development tools 27 | nativeBuildInputs = [ 28 | # Always use nightly rustfmt because most of its options are unstable 29 | # 30 | # This needs to come before `toolchain` in this list, otherwise 31 | # `$PATH` will have stable rustfmt instead. 32 | inputs.fenix.packages.${system}.latest.rustfmt 33 | 34 | # rust itself 35 | toolchain 36 | 37 | # CI tests 38 | engage 39 | 40 | # format toml files 41 | taplo 42 | 43 | # Needed for producing Debian packages 44 | cargo-deb 45 | 46 | # Needed for our script for Complement 47 | jq 48 | 49 | # Needed for Complement 50 | go 51 | olm 52 | 53 | # Needed for our script for Complement 54 | jq 55 | 56 | # Needed for finding broken markdown links 57 | lychee 58 | 59 | # Needed for checking for typos 60 | typos 61 | 62 | # Useful for editing the book locally 63 | mdbook 64 | ] ++ default.nativeBuildInputs ; 65 | } 66 | -------------------------------------------------------------------------------- /debian/README.md: -------------------------------------------------------------------------------- 1 | Conduit for Debian 2 | ================== 3 | 4 | Installation 5 | ------------ 6 | 7 | Information about downloading, building and deploying the Debian package, see 8 | the "Installing Conduit" section in the Deploying docs. 9 | All following sections until "Setting up the Reverse Proxy" be ignored because 10 | this is handled automatically by the packaging. 11 | 12 | Configuration 13 | ------------- 14 | 15 | When installed, Debconf generates the configuration of the homeserver 16 | (host)name, the address and port it listens on. This configuration ends up in 17 | `/etc/matrix-conduit/conduit.toml`. 18 | 19 | You can tweak more detailed settings by uncommenting and setting the variables 20 | in `/etc/matrix-conduit/conduit.toml`. This involves settings such as the maximum 21 | file size for download/upload, enabling federation, etc. 22 | 23 | Running 24 | ------- 25 | 26 | The package uses the `matrix-conduit.service` systemd unit file to start and 27 | stop Conduit. It loads the configuration file mentioned above to set up the 28 | environment before running the server. 29 | 30 | This package assumes by default that Conduit will be placed behind a reverse 31 | proxy such as Apache or nginx. This default deployment entails just listening 32 | on `127.0.0.1` and the free port `6167` and is reachable via a client using the URL 33 | . 34 | 35 | At a later stage this packaging may support also setting up TLS and running 36 | stand-alone. In this case, however, you need to set up some certificates and 37 | renewal, for it to work properly. 38 | -------------------------------------------------------------------------------- /src/service/rooms/user/mod.rs: -------------------------------------------------------------------------------- 1 | mod data; 2 | 3 | pub use data::Data; 4 | use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; 5 | 6 | use crate::Result; 7 | 8 | pub struct Service { 9 | pub db: &'static dyn Data, 10 | } 11 | 12 | impl Service { 13 | pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { 14 | self.db.reset_notification_counts(user_id, room_id) 15 | } 16 | 17 | pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { 18 | self.db.notification_count(user_id, room_id) 19 | } 20 | 21 | pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { 22 | self.db.highlight_count(user_id, room_id) 23 | } 24 | 25 | pub fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result { 26 | self.db.last_notification_read(user_id, room_id) 27 | } 28 | 29 | pub fn associate_token_shortstatehash( 30 | &self, 31 | room_id: &RoomId, 32 | token: u64, 33 | shortstatehash: u64, 34 | ) -> Result<()> { 35 | self.db 36 | .associate_token_shortstatehash(room_id, token, shortstatehash) 37 | } 38 | 39 | pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { 40 | self.db.get_token_shortstatehash(room_id, token) 41 | } 42 | 43 | pub fn get_shared_rooms( 44 | &self, 45 | users: Vec, 46 | ) -> Result>> { 47 | self.db.get_shared_rooms(users) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /docs/faq.md: -------------------------------------------------------------------------------- 1 | # FAQ 2 | 3 | Here are some of the most frequently asked questions about Conduit, and their answers. 4 | 5 | ## Why do I get a `M_INCOMPATIBLE_ROOM_VERSION` error when trying to join some rooms? 6 | 7 | Conduit doesn't support room versions 1 and 2 at all, and doesn't properly support versions 3-5 currently. You can track the progress of adding support [here](https://gitlab.com/famedly/conduit/-/issues/433). 8 | 9 | ## How do I backup my server? 10 | 11 | To backup your Conduit server, it's very easy. 12 | You can simply stop Conduit, make a copy or file system snapshot of the database directory, then start Conduit again. 13 | 14 | > **Note**: When using a file system snapshot, it is not required that you stop the server, but it is still recommended as it is the safest option and should ensure your database is not left in an inconsistent state. 15 | 16 | ## How do I setup simplified sliding sync? 17 | 18 | You don't need to! If your Conduit instance is reachable, simplified sliding sync should work right out of the box, no delegation required 19 | 20 | ## Can I migrate from Synapse to Conduit? 21 | 22 | Not really. You can reuse the domain of your current server with Conduit, but you will not be able to migrate accounts automatically. 23 | Rooms that were federated can be re-joined via the other participating servers, however media and the like may be deleted from remote servers after some time, and hence might not be recoverable. 24 | 25 | ## How do I make someone an admin? 26 | 27 | Simply invite them to the admin room. Once joined, they can administer the server by interacting with the `@conduit:` user. 28 | -------------------------------------------------------------------------------- /src/api/client_server/threads.rs: -------------------------------------------------------------------------------- 1 | use ruma::api::client::{error::ErrorKind, threads::get_threads}; 2 | 3 | use crate::{services, Error, Result, Ruma}; 4 | 5 | /// # `GET /_matrix/client/r0/rooms/{roomId}/threads` 6 | pub async fn get_threads_route( 7 | body: Ruma, 8 | ) -> Result { 9 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 10 | 11 | // Use limit or else 10, with maximum 100 12 | let limit = body 13 | .limit 14 | .and_then(|l| l.try_into().ok()) 15 | .unwrap_or(10) 16 | .min(100); 17 | 18 | let from = if let Some(from) = &body.from { 19 | from.parse() 20 | .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, ""))? 21 | } else { 22 | u64::MAX 23 | }; 24 | 25 | let threads = services() 26 | .rooms 27 | .threads 28 | .threads_until(sender_user, &body.room_id, from, &body.include)? 29 | .take(limit) 30 | .filter_map(|r| r.ok()) 31 | .filter(|(_, pdu)| { 32 | services() 33 | .rooms 34 | .state_accessor 35 | .user_can_see_event(sender_user, &body.room_id, &pdu.event_id) 36 | .unwrap_or(false) 37 | }) 38 | .collect::>(); 39 | 40 | let next_batch = threads.last().map(|(count, _)| count.to_string()); 41 | 42 | Ok(get_threads::v1::Response { 43 | chunk: threads 44 | .into_iter() 45 | .map(|(_, pdu)| pdu.to_room_event()) 46 | .collect(), 47 | next_batch, 48 | }) 49 | } 50 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/metadata.rs: -------------------------------------------------------------------------------- 1 | use ruma::{OwnedRoomId, RoomId}; 2 | 3 | use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; 4 | 5 | impl service::rooms::metadata::Data for KeyValueDatabase { 6 | fn exists(&self, room_id: &RoomId) -> Result { 7 | let prefix = match services().rooms.short.get_shortroomid(room_id)? { 8 | Some(b) => b.to_be_bytes().to_vec(), 9 | None => return Ok(false), 10 | }; 11 | 12 | // Look for PDUs in that room. 13 | Ok(self 14 | .pduid_pdu 15 | .iter_from(&prefix, false) 16 | .next() 17 | .filter(|(k, _)| k.starts_with(&prefix)) 18 | .is_some()) 19 | } 20 | 21 | fn iter_ids<'a>(&'a self) -> Box> + 'a> { 22 | Box::new(self.roomid_shortroomid.iter().map(|(bytes, _)| { 23 | RoomId::parse( 24 | utils::string_from_bytes(&bytes).map_err(|_| { 25 | Error::bad_database("Room ID in publicroomids is invalid unicode.") 26 | })?, 27 | ) 28 | .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) 29 | })) 30 | } 31 | 32 | fn is_disabled(&self, room_id: &RoomId) -> Result { 33 | Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) 34 | } 35 | 36 | fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { 37 | if disabled { 38 | self.disabledroomids.insert(room_id.as_bytes(), &[])?; 39 | } else { 40 | self.disabledroomids.remove(room_id.as_bytes())?; 41 | } 42 | 43 | Ok(()) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /complement/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.85.0 2 | 3 | WORKDIR /workdir 4 | 5 | RUN apt-get update && apt-get install -y --no-install-recommends \ 6 | libclang-dev 7 | 8 | COPY Cargo.toml Cargo.toml 9 | COPY Cargo.lock Cargo.lock 10 | COPY src src 11 | COPY .cargo .cargo 12 | RUN cargo build --release \ 13 | && mv target/release/conduit conduit \ 14 | && rm -rf target 15 | 16 | # Install caddy 17 | RUN apt-get update \ 18 | && apt-get install -y \ 19 | debian-keyring \ 20 | debian-archive-keyring \ 21 | apt-transport-https \ 22 | curl \ 23 | && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \ 24 | | gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \ 25 | && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \ 26 | | tee /etc/apt/sources.list.d/caddy-testing.list \ 27 | && apt-get update \ 28 | && apt-get install -y caddy 29 | 30 | COPY conduit-example.toml conduit.toml 31 | COPY complement/Caddyfile Caddyfile 32 | 33 | ENV SERVER_NAME=localhost 34 | ENV CONDUIT_CONFIG=/workdir/conduit.toml 35 | 36 | RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml 37 | RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml 38 | RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml 39 | RUN sed -i "s/registration_token = \"\"//g" conduit.toml 40 | RUN sed -i "s/allow_check_for_updates = true/allow_check_for_updates = false/g" conduit.toml 41 | 42 | EXPOSE 8008 8448 43 | 44 | CMD uname -a && \ 45 | sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \ 46 | sed -i "s/your.server.name/${SERVER_NAME}/g" Caddyfile && \ 47 | caddy start > /dev/null && \ 48 | /workdir/conduit 49 | -------------------------------------------------------------------------------- /src/api/client_server/mod.rs: -------------------------------------------------------------------------------- 1 | mod account; 2 | mod alias; 3 | mod appservice; 4 | mod backup; 5 | mod capabilities; 6 | mod config; 7 | mod context; 8 | mod device; 9 | mod directory; 10 | mod filter; 11 | mod keys; 12 | pub mod media; 13 | mod membership; 14 | mod message; 15 | mod openid; 16 | mod presence; 17 | mod profile; 18 | mod push; 19 | mod read_marker; 20 | mod redact; 21 | mod relations; 22 | mod report; 23 | mod room; 24 | mod search; 25 | mod session; 26 | mod space; 27 | mod state; 28 | mod sync; 29 | mod tag; 30 | mod thirdparty; 31 | mod threads; 32 | mod to_device; 33 | mod typing; 34 | mod unversioned; 35 | mod user_directory; 36 | mod voip; 37 | mod well_known; 38 | 39 | pub use account::*; 40 | pub use alias::*; 41 | pub use appservice::*; 42 | pub use backup::*; 43 | pub use capabilities::*; 44 | pub use config::*; 45 | pub use context::*; 46 | pub use device::*; 47 | pub use directory::*; 48 | pub use filter::*; 49 | pub use keys::*; 50 | pub use media::*; 51 | pub use membership::*; 52 | pub use message::*; 53 | pub use openid::*; 54 | pub use presence::*; 55 | pub use profile::*; 56 | pub use push::*; 57 | pub use read_marker::*; 58 | pub use redact::*; 59 | pub use relations::*; 60 | pub use report::*; 61 | pub use room::*; 62 | pub use search::*; 63 | pub use session::*; 64 | pub use space::*; 65 | pub use state::*; 66 | pub use sync::*; 67 | pub use tag::*; 68 | pub use thirdparty::*; 69 | pub use threads::*; 70 | pub use to_device::*; 71 | pub use typing::*; 72 | pub use unversioned::*; 73 | pub use user_directory::*; 74 | pub use voip::*; 75 | pub use well_known::*; 76 | 77 | pub const DEVICE_ID_LENGTH: usize = 10; 78 | pub const TOKEN_LENGTH: usize = 32; 79 | pub const SESSION_ID_LENGTH: usize = 32; 80 | pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15; 81 | -------------------------------------------------------------------------------- /src/database/abstraction/watchers.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::{hash_map, HashMap}, 3 | future::Future, 4 | pin::Pin, 5 | sync::RwLock, 6 | }; 7 | use tokio::sync::watch; 8 | 9 | #[derive(Default)] 10 | pub(super) struct Watchers { 11 | #[allow(clippy::type_complexity)] 12 | watchers: RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, 13 | } 14 | 15 | impl Watchers { 16 | pub(super) fn watch<'a>( 17 | &'a self, 18 | prefix: &[u8], 19 | ) -> Pin + Send + 'a>> { 20 | let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) { 21 | hash_map::Entry::Occupied(o) => o.get().1.clone(), 22 | hash_map::Entry::Vacant(v) => { 23 | let (tx, rx) = watch::channel(()); 24 | v.insert((tx, rx.clone())); 25 | rx 26 | } 27 | }; 28 | 29 | Box::pin(async move { 30 | // Tx is never destroyed 31 | rx.changed().await.unwrap(); 32 | }) 33 | } 34 | pub(super) fn wake(&self, key: &[u8]) { 35 | let watchers = self.watchers.read().unwrap(); 36 | let mut triggered = Vec::new(); 37 | 38 | for length in 0..=key.len() { 39 | if watchers.contains_key(&key[..length]) { 40 | triggered.push(&key[..length]); 41 | } 42 | } 43 | 44 | drop(watchers); 45 | 46 | if !triggered.is_empty() { 47 | let mut watchers = self.watchers.write().unwrap(); 48 | for prefix in triggered { 49 | if let Some(tx) = watchers.remove(prefix) { 50 | let _ = tx.0.send(()); 51 | } 52 | } 53 | }; 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/service/rooms/short/mod.rs: -------------------------------------------------------------------------------- 1 | mod data; 2 | use std::sync::Arc; 3 | 4 | pub use data::Data; 5 | use ruma::{events::StateEventType, EventId, RoomId}; 6 | 7 | use crate::Result; 8 | 9 | pub struct Service { 10 | pub db: &'static dyn Data, 11 | } 12 | 13 | impl Service { 14 | pub fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { 15 | self.db.get_or_create_shorteventid(event_id) 16 | } 17 | 18 | pub fn get_shortstatekey( 19 | &self, 20 | event_type: &StateEventType, 21 | state_key: &str, 22 | ) -> Result> { 23 | self.db.get_shortstatekey(event_type, state_key) 24 | } 25 | 26 | pub fn get_or_create_shortstatekey( 27 | &self, 28 | event_type: &StateEventType, 29 | state_key: &str, 30 | ) -> Result { 31 | self.db.get_or_create_shortstatekey(event_type, state_key) 32 | } 33 | 34 | pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { 35 | self.db.get_eventid_from_short(shorteventid) 36 | } 37 | 38 | pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { 39 | self.db.get_statekey_from_short(shortstatekey) 40 | } 41 | 42 | /// Returns (shortstatehash, already_existed) 43 | pub fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { 44 | self.db.get_or_create_shortstatehash(state_hash) 45 | } 46 | 47 | pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { 48 | self.db.get_shortroomid(room_id) 49 | } 50 | 51 | pub fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { 52 | self.db.get_or_create_shortroomid(room_id) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/service/rooms/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod alias; 2 | pub mod auth_chain; 3 | pub mod directory; 4 | pub mod edus; 5 | pub mod event_handler; 6 | pub mod helpers; 7 | pub mod lazy_loading; 8 | pub mod metadata; 9 | pub mod outlier; 10 | pub mod pdu_metadata; 11 | pub mod search; 12 | pub mod short; 13 | pub mod spaces; 14 | pub mod state; 15 | pub mod state_accessor; 16 | pub mod state_cache; 17 | pub mod state_compressor; 18 | pub mod threads; 19 | pub mod timeline; 20 | pub mod user; 21 | 22 | pub trait Data: 23 | alias::Data 24 | + auth_chain::Data 25 | + directory::Data 26 | + edus::Data 27 | + lazy_loading::Data 28 | + metadata::Data 29 | + outlier::Data 30 | + pdu_metadata::Data 31 | + search::Data 32 | + short::Data 33 | + state::Data 34 | + state_accessor::Data 35 | + state_cache::Data 36 | + state_compressor::Data 37 | + timeline::Data 38 | + threads::Data 39 | + user::Data 40 | { 41 | } 42 | 43 | pub struct Service { 44 | pub alias: alias::Service, 45 | pub auth_chain: auth_chain::Service, 46 | pub directory: directory::Service, 47 | pub edus: edus::Service, 48 | pub event_handler: event_handler::Service, 49 | pub helpers: helpers::Service, 50 | pub lazy_loading: lazy_loading::Service, 51 | pub metadata: metadata::Service, 52 | pub outlier: outlier::Service, 53 | pub pdu_metadata: pdu_metadata::Service, 54 | pub search: search::Service, 55 | pub short: short::Service, 56 | pub state: state::Service, 57 | pub state_accessor: state_accessor::Service, 58 | pub state_cache: state_cache::Service, 59 | pub state_compressor: state_compressor::Service, 60 | pub timeline: timeline::Service, 61 | pub threads: threads::Service, 62 | pub spaces: spaces::Service, 63 | pub user: user::Service, 64 | } 65 | -------------------------------------------------------------------------------- /engage.toml: -------------------------------------------------------------------------------- 1 | interpreter = ["bash", "-euo", "pipefail", "-c"] 2 | 3 | [[task]] 4 | group = "versions" 5 | name = "engage" 6 | script = "engage --version" 7 | 8 | [[task]] 9 | group = "versions" 10 | name = "rustc" 11 | script = "rustc --version" 12 | 13 | [[task]] 14 | group = "versions" 15 | name = "cargo" 16 | script = "cargo --version" 17 | 18 | [[task]] 19 | group = "versions" 20 | name = "cargo-fmt" 21 | script = "cargo fmt --version" 22 | 23 | [[task]] 24 | group = "versions" 25 | name = "rustdoc" 26 | script = "rustdoc --version" 27 | 28 | [[task]] 29 | group = "versions" 30 | name = "cargo-clippy" 31 | script = "cargo clippy -- --version" 32 | 33 | [[task]] 34 | group = "versions" 35 | name = "lychee" 36 | script = "lychee --version" 37 | 38 | [[task]] 39 | group = "versions" 40 | name = "typos" 41 | script = "typos --version" 42 | 43 | [[task]] 44 | group = "lints" 45 | name = "cargo-fmt" 46 | script = "cargo fmt --check -- --color=always" 47 | 48 | [[task]] 49 | group = "lints" 50 | name = "cargo-doc" 51 | script = """ 52 | RUSTDOCFLAGS="-D warnings" cargo doc \ 53 | --workspace \ 54 | --no-deps \ 55 | --document-private-items \ 56 | --color always 57 | """ 58 | 59 | [[task]] 60 | group = "lints" 61 | name = "cargo-clippy" 62 | script = "cargo clippy --workspace --all-targets --color=always -- -D warnings" 63 | 64 | [[task]] 65 | group = "lints" 66 | name = "taplo-fmt" 67 | script = "taplo fmt --check --colors always" 68 | 69 | [[task]] 70 | group = "lints" 71 | name = "lychee" 72 | script = "lychee --offline docs" 73 | 74 | [[task]] 75 | group = "lints" 76 | name = "typos" 77 | script = "typos" 78 | 79 | [[task]] 80 | group = "tests" 81 | name = "cargo" 82 | script = """ 83 | cargo test \ 84 | --workspace \ 85 | --all-targets \ 86 | --color=always \ 87 | -- \ 88 | --color=always 89 | """ 90 | -------------------------------------------------------------------------------- /src/service/rooms/edus/read_receipt/mod.rs: -------------------------------------------------------------------------------- 1 | mod data; 2 | 3 | pub use data::Data; 4 | 5 | use crate::Result; 6 | use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; 7 | 8 | pub struct Service { 9 | pub db: &'static dyn Data, 10 | } 11 | 12 | impl Service { 13 | /// Replaces the previous read receipt. 14 | pub fn readreceipt_update( 15 | &self, 16 | user_id: &UserId, 17 | room_id: &RoomId, 18 | event: ReceiptEvent, 19 | ) -> Result<()> { 20 | self.db.readreceipt_update(user_id, room_id, event) 21 | } 22 | 23 | /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. 24 | #[tracing::instrument(skip(self))] 25 | pub fn readreceipts_since<'a>( 26 | &'a self, 27 | room_id: &RoomId, 28 | since: u64, 29 | ) -> impl Iterator< 30 | Item = Result<( 31 | OwnedUserId, 32 | u64, 33 | Raw, 34 | )>, 35 | > + 'a { 36 | self.db.readreceipts_since(room_id, since) 37 | } 38 | 39 | /// Sets a private read marker at `count`. 40 | #[tracing::instrument(skip(self))] 41 | pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { 42 | self.db.private_read_set(room_id, user_id, count) 43 | } 44 | 45 | /// Returns the private read marker. 46 | #[tracing::instrument(skip(self))] 47 | pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { 48 | self.db.private_read_get(room_id, user_id) 49 | } 50 | 51 | /// Returns the count of the last typing update in this room. 52 | pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { 53 | self.db.last_privateread_update(user_id, room_id) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/api/client_server/unversioned.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::BTreeMap, iter::FromIterator}; 2 | 3 | use ruma::api::client::discovery::get_supported_versions; 4 | 5 | use crate::{Result, Ruma}; 6 | 7 | /// # `GET /_matrix/client/versions` 8 | /// 9 | /// Get the versions of the specification and unstable features supported by this server. 10 | /// 11 | /// - Versions take the form MAJOR.MINOR.PATCH 12 | /// - Only the latest PATCH release will be reported for each MAJOR.MINOR value 13 | /// - Unstable features are namespaced and may include version information in their name 14 | /// 15 | /// Note: Unstable features are used while developing new features. Clients should avoid using 16 | /// unstable features in their stable releases 17 | pub async fn get_supported_versions_route( 18 | _body: Ruma, 19 | ) -> Result { 20 | let resp = get_supported_versions::Response { 21 | versions: vec![ 22 | "r0.5.0".to_owned(), 23 | "r0.6.0".to_owned(), 24 | "v1.1".to_owned(), 25 | "v1.2".to_owned(), 26 | "v1.3".to_owned(), 27 | "v1.4".to_owned(), 28 | "v1.5".to_owned(), 29 | "v1.6".to_owned(), 30 | "v1.7".to_owned(), 31 | "v1.8".to_owned(), 32 | "v1.9".to_owned(), 33 | "v1.10".to_owned(), 34 | "v1.11".to_owned(), // Needed for Element-* to use authenticated media endpoints 35 | "v1.12".to_owned(), // Clarifies that guests can use auth media, which Element-* might depend on support being declared 36 | ], 37 | unstable_features: BTreeMap::from_iter([ 38 | ("org.matrix.e2e_cross_signing".to_owned(), true), 39 | ("org.matrix.msc3916.stable".to_owned(), true), 40 | ("org.matrix.simplified_msc3575".to_owned(), true), 41 | ]), 42 | }; 43 | 44 | Ok(resp) 45 | } 46 | -------------------------------------------------------------------------------- /src/api/client_server/redact.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::{service::pdu::PduBuilder, services, Result, Ruma}; 4 | use ruma::{ 5 | api::client::redact::redact_event, 6 | events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, 7 | }; 8 | 9 | use serde_json::value::to_raw_value; 10 | 11 | /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` 12 | /// 13 | /// Tries to send a redaction event into the room. 14 | /// 15 | /// - TODO: Handle txn id 16 | pub async fn redact_event_route( 17 | body: Ruma, 18 | ) -> Result { 19 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 20 | let body = body.body; 21 | 22 | let mutex_state = Arc::clone( 23 | services() 24 | .globals 25 | .roomid_mutex_state 26 | .write() 27 | .await 28 | .entry(body.room_id.clone()) 29 | .or_default(), 30 | ); 31 | let state_lock = mutex_state.lock().await; 32 | 33 | let event_id = services() 34 | .rooms 35 | .timeline 36 | .build_and_append_pdu( 37 | PduBuilder { 38 | event_type: TimelineEventType::RoomRedaction, 39 | content: to_raw_value(&RoomRedactionEventContent { 40 | redacts: Some(body.event_id.clone()), 41 | reason: body.reason.clone(), 42 | }) 43 | .expect("event is valid, we just created it"), 44 | unsigned: None, 45 | state_key: None, 46 | redacts: Some(body.event_id.into()), 47 | timestamp: None, 48 | }, 49 | sender_user, 50 | &body.room_id, 51 | &state_lock, 52 | ) 53 | .await?; 54 | 55 | drop(state_lock); 56 | 57 | let event_id = (*event_id).to_owned(); 58 | Ok(redact_event::v3::Response { event_id }) 59 | } 60 | -------------------------------------------------------------------------------- /src/database/abstraction.rs: -------------------------------------------------------------------------------- 1 | use super::Config; 2 | use crate::Result; 3 | 4 | use std::{future::Future, pin::Pin, sync::Arc}; 5 | 6 | #[cfg(feature = "sqlite")] 7 | pub mod sqlite; 8 | 9 | #[cfg(feature = "rocksdb")] 10 | pub mod rocksdb; 11 | 12 | #[cfg(any(feature = "sqlite", feature = "rocksdb"))] 13 | pub mod watchers; 14 | 15 | pub trait KeyValueDatabaseEngine: Send + Sync { 16 | fn open(config: &Config) -> Result 17 | where 18 | Self: Sized; 19 | fn open_tree(&self, name: &'static str) -> Result>; 20 | fn flush(&self) -> Result<()>; 21 | fn cleanup(&self) -> Result<()> { 22 | Ok(()) 23 | } 24 | fn memory_usage(&self) -> Result { 25 | Ok("Current database engine does not support memory usage reporting.".to_owned()) 26 | } 27 | } 28 | 29 | pub trait KvTree: Send + Sync { 30 | fn get(&self, key: &[u8]) -> Result>>; 31 | 32 | fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>; 33 | fn insert_batch(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()>; 34 | 35 | fn remove(&self, key: &[u8]) -> Result<()>; 36 | 37 | fn iter<'a>(&'a self) -> Box, Vec)> + 'a>; 38 | 39 | fn iter_from<'a>( 40 | &'a self, 41 | from: &[u8], 42 | backwards: bool, 43 | ) -> Box, Vec)> + 'a>; 44 | 45 | fn increment(&self, key: &[u8]) -> Result>; 46 | fn increment_batch(&self, iter: &mut dyn Iterator>) -> Result<()>; 47 | 48 | fn scan_prefix<'a>( 49 | &'a self, 50 | prefix: Vec, 51 | ) -> Box, Vec)> + 'a>; 52 | 53 | fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>>; 54 | 55 | fn clear(&self) -> Result<()> { 56 | for (key, _) in self.iter() { 57 | self.remove(&key)?; 58 | } 59 | 60 | Ok(()) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/api/client_server/voip.rs: -------------------------------------------------------------------------------- 1 | use crate::{config::TurnAuth, services, Error, Result, Ruma}; 2 | use base64::{engine::general_purpose, Engine as _}; 3 | use hmac::{Hmac, Mac}; 4 | use ruma::{ 5 | api::client::{error::ErrorKind, voip::get_turn_server_info}, 6 | SecondsSinceUnixEpoch, 7 | }; 8 | use sha1::Sha1; 9 | use std::time::{Duration, SystemTime}; 10 | 11 | type HmacSha1 = Hmac; 12 | 13 | /// # `GET /_matrix/client/r0/voip/turnServer` 14 | /// 15 | /// Returns information about the recommended turn server. 16 | pub async fn turn_server_route( 17 | body: Ruma, 18 | ) -> Result { 19 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 20 | 21 | if let Some(turn) = services().globals.turn() { 22 | let (username, password) = match turn.auth { 23 | TurnAuth::Secret { secret } => { 24 | let expiry = SecondsSinceUnixEpoch::from_system_time( 25 | SystemTime::now() + Duration::from_secs(turn.ttl), 26 | ) 27 | .expect("time is valid"); 28 | 29 | let username: String = format!("{}:{}", expiry.get(), sender_user); 30 | 31 | let mut mac = HmacSha1::new_from_slice(secret.as_bytes()) 32 | .expect("HMAC can take key of any size"); 33 | mac.update(username.as_bytes()); 34 | 35 | let password: String = 36 | general_purpose::STANDARD.encode(mac.finalize().into_bytes()); 37 | 38 | (username, password) 39 | } 40 | TurnAuth::UserPass { username, password } => (username, password), 41 | }; 42 | 43 | Ok(get_turn_server_info::v3::Response { 44 | username, 45 | password, 46 | uris: turn.uris, 47 | ttl: Duration::from_secs(turn.ttl), 48 | }) 49 | } else { 50 | Err(Error::BadRequest(ErrorKind::NotFound, "No TURN config set")) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/api/client_server/space.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use crate::{service::rooms::spaces::PagnationToken, services, Error, Result, Ruma}; 4 | use ruma::{ 5 | api::client::{error::ErrorKind, space::get_hierarchy}, 6 | UInt, 7 | }; 8 | 9 | /// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy`` 10 | /// 11 | /// Paginates over the space tree in a depth-first manner to locate child rooms of a given space. 12 | pub async fn get_hierarchy_route( 13 | body: Ruma, 14 | ) -> Result { 15 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 16 | 17 | let limit = body 18 | .limit 19 | .unwrap_or(UInt::from(10_u32)) 20 | .min(UInt::from(100_u32)); 21 | let max_depth = body 22 | .max_depth 23 | .unwrap_or(UInt::from(3_u32)) 24 | .min(UInt::from(10_u32)); 25 | 26 | let key = body 27 | .from 28 | .as_ref() 29 | .and_then(|s| PagnationToken::from_str(s).ok()); 30 | 31 | // Should prevent unexpected behaviour in (bad) clients 32 | if let Some(token) = &key { 33 | if token.suggested_only != body.suggested_only || token.max_depth != max_depth { 34 | return Err(Error::BadRequest( 35 | ErrorKind::InvalidParam, 36 | "suggested_only and max_depth cannot change on paginated requests", 37 | )); 38 | } 39 | } 40 | 41 | services() 42 | .rooms 43 | .spaces 44 | .get_client_hierarchy( 45 | sender_user, 46 | &body.room_id, 47 | usize::try_from(limit) 48 | .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Limit is too great"))?, 49 | key.map_or(vec![], |token| token.short_room_ids), 50 | usize::try_from(max_depth).map_err(|_| { 51 | Error::BadRequest(ErrorKind::InvalidParam, "Max depth is too great") 52 | })?, 53 | body.suggested_only, 54 | ) 55 | .await 56 | } 57 | -------------------------------------------------------------------------------- /docs/administration/media.md: -------------------------------------------------------------------------------- 1 | # Media 2 | 3 | While running Conduit, you may encounter undesirable media, either from other servers, or from local users. 4 | 5 | ## From other servers 6 | If the media originated from a different server, which itself is not malicious, it should be enough 7 | to use the `purge-media-from-server` command to delete the media from the media backend, and then 8 | contact the remote server so that they can deal with the offending user(s). 9 | 10 | If you do not need to media deleted as soon as possible, you can use retention policies to only 11 | store remote media for a short period of time, meaning that the media will be automatically deleted 12 | after some time. As new media can only be accessed over authenticated endpoints, only local users 13 | will be able to access the media via your server, so if you're running a single-user server, you 14 | don't need to worry about the media being distributed via your server. 15 | 16 | If you know the media IDs, (which you can find with the `list-media` command), you can use the 17 | `block-media` to prevent any of those media IDs (or other media with the same SHA256 hash) from 18 | being stored in the media backend in the future. 19 | 20 | If the server itself if malicious, then it should probably be [ACLed](https://spec.matrix.org/v1.14/client-server-api/#server-access-control-lists-acls-for-rooms) 21 | in rooms it particpates in. In the future, you'll be able to block the remote server from 22 | interacting with your server completely. 23 | 24 | ## From local users 25 | If the undesirable media originates from your own server, you can purge media uploaded by them 26 | using the `purge-media-from-users` command. If you also plan to deactivate the user, you can do so 27 | with the `--purge-media` flag on either the `deactivate-user` or `deactivate-all` commands. If 28 | they keep making new accounts, you can use the `block-media-from-users` command to prevent media 29 | with the same SHA256 hash from being uploaded again, as well as using the `allow-registration` 30 | command to temporarily prevent users from creating new accounts. 31 | -------------------------------------------------------------------------------- /src/service/account_data/mod.rs: -------------------------------------------------------------------------------- 1 | mod data; 2 | 3 | pub use data::Data; 4 | 5 | use ruma::{ 6 | events::{AnyGlobalAccountDataEvent, AnyRoomAccountDataEvent, RoomAccountDataEventType}, 7 | serde::Raw, 8 | RoomId, UserId, 9 | }; 10 | 11 | use std::collections::HashMap; 12 | 13 | use crate::Result; 14 | 15 | pub struct Service { 16 | pub db: &'static dyn Data, 17 | } 18 | 19 | impl Service { 20 | /// Places one event in the account data of the user and removes the previous entry. 21 | #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] 22 | pub fn update( 23 | &self, 24 | room_id: Option<&RoomId>, 25 | user_id: &UserId, 26 | event_type: RoomAccountDataEventType, 27 | data: &serde_json::Value, 28 | ) -> Result<()> { 29 | self.db.update(room_id, user_id, event_type, data) 30 | } 31 | 32 | /// Searches the account data for a specific kind. 33 | #[tracing::instrument(skip(self, room_id, user_id, event_type))] 34 | pub fn get( 35 | &self, 36 | room_id: Option<&RoomId>, 37 | user_id: &UserId, 38 | event_type: RoomAccountDataEventType, 39 | ) -> Result>> { 40 | self.db.get(room_id, user_id, event_type) 41 | } 42 | 43 | /// Returns all changes to the global account data that happened after `since`. 44 | #[tracing::instrument(skip_all)] 45 | pub fn global_changes_since( 46 | &self, 47 | user_id: &UserId, 48 | since: u64, 49 | ) -> Result>> { 50 | self.db.global_changes_since(user_id, since) 51 | } 52 | 53 | /// Returns all changes to the room account data that happened after `since`. 54 | #[tracing::instrument(skip_all)] 55 | pub fn room_changes_since( 56 | &self, 57 | room_id: &RoomId, 58 | user_id: &UserId, 59 | since: u64, 60 | ) -> Result>> { 61 | self.db.room_changes_since(room_id, user_id, since) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/service/rooms/state_accessor/data.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, sync::Arc}; 2 | 3 | use async_trait::async_trait; 4 | use ruma::{events::StateEventType, EventId, RoomId}; 5 | 6 | use crate::{PduEvent, Result}; 7 | 8 | #[async_trait] 9 | pub trait Data: Send + Sync { 10 | /// Builds a StateMap by iterating over all keys that start 11 | /// with state_hash, this gives the full state for the given state_hash. 12 | async fn state_full_ids(&self, shortstatehash: u64) -> Result>>; 13 | 14 | async fn state_full( 15 | &self, 16 | shortstatehash: u64, 17 | ) -> Result>>; 18 | 19 | /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). 20 | fn state_get_id( 21 | &self, 22 | shortstatehash: u64, 23 | event_type: &StateEventType, 24 | state_key: &str, 25 | ) -> Result>>; 26 | 27 | /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). 28 | fn state_get( 29 | &self, 30 | shortstatehash: u64, 31 | event_type: &StateEventType, 32 | state_key: &str, 33 | ) -> Result>>; 34 | 35 | /// Returns the state hash for this pdu. 36 | fn pdu_shortstatehash(&self, event_id: &EventId) -> Result>; 37 | 38 | /// Returns the full room state. 39 | async fn room_state_full( 40 | &self, 41 | room_id: &RoomId, 42 | ) -> Result>>; 43 | 44 | /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). 45 | fn room_state_get_id( 46 | &self, 47 | room_id: &RoomId, 48 | event_type: &StateEventType, 49 | state_key: &str, 50 | ) -> Result>>; 51 | 52 | /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). 53 | fn room_state_get( 54 | &self, 55 | room_id: &RoomId, 56 | event_type: &StateEventType, 57 | state_key: &str, 58 | ) -> Result>>; 59 | } 60 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/auth_chain.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashSet, mem::size_of, sync::Arc}; 2 | 3 | use crate::{database::KeyValueDatabase, service, utils, Result}; 4 | 5 | impl service::rooms::auth_chain::Data for KeyValueDatabase { 6 | fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>>> { 7 | // Check RAM cache 8 | if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { 9 | return Ok(Some(Arc::clone(result))); 10 | } 11 | 12 | // We only save auth chains for single events in the db 13 | if key.len() == 1 { 14 | // Check DB cache 15 | let chain = self 16 | .shorteventid_authchain 17 | .get(&key[0].to_be_bytes())? 18 | .map(|chain| { 19 | chain 20 | .chunks_exact(size_of::()) 21 | .map(|chunk| utils::u64_from_bytes(chunk).expect("byte length is correct")) 22 | .collect() 23 | }); 24 | 25 | if let Some(chain) = chain { 26 | let chain = Arc::new(chain); 27 | 28 | // Cache in RAM 29 | self.auth_chain_cache 30 | .lock() 31 | .unwrap() 32 | .insert(vec![key[0]], Arc::clone(&chain)); 33 | 34 | return Ok(Some(chain)); 35 | } 36 | } 37 | 38 | Ok(None) 39 | } 40 | 41 | fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { 42 | // Only persist single events in db 43 | if key.len() == 1 { 44 | self.shorteventid_authchain.insert( 45 | &key[0].to_be_bytes(), 46 | &auth_chain 47 | .iter() 48 | .flat_map(|s| s.to_be_bytes().to_vec()) 49 | .collect::>(), 50 | )?; 51 | } 52 | 53 | // Cache in RAM 54 | self.auth_chain_cache 55 | .lock() 56 | .unwrap() 57 | .insert(key, auth_chain); 58 | 59 | Ok(()) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/lazy_load.rs: -------------------------------------------------------------------------------- 1 | use ruma::{DeviceId, RoomId, UserId}; 2 | 3 | use crate::{database::KeyValueDatabase, service, Result}; 4 | 5 | impl service::rooms::lazy_loading::Data for KeyValueDatabase { 6 | fn lazy_load_was_sent_before( 7 | &self, 8 | user_id: &UserId, 9 | device_id: &DeviceId, 10 | room_id: &RoomId, 11 | ll_user: &UserId, 12 | ) -> Result { 13 | let mut key = user_id.as_bytes().to_vec(); 14 | key.push(0xff); 15 | key.extend_from_slice(device_id.as_bytes()); 16 | key.push(0xff); 17 | key.extend_from_slice(room_id.as_bytes()); 18 | key.push(0xff); 19 | key.extend_from_slice(ll_user.as_bytes()); 20 | Ok(self.lazyloadedids.get(&key)?.is_some()) 21 | } 22 | 23 | fn lazy_load_confirm_delivery( 24 | &self, 25 | user_id: &UserId, 26 | device_id: &DeviceId, 27 | room_id: &RoomId, 28 | confirmed_user_ids: &mut dyn Iterator, 29 | ) -> Result<()> { 30 | let mut prefix = user_id.as_bytes().to_vec(); 31 | prefix.push(0xff); 32 | prefix.extend_from_slice(device_id.as_bytes()); 33 | prefix.push(0xff); 34 | prefix.extend_from_slice(room_id.as_bytes()); 35 | prefix.push(0xff); 36 | 37 | for ll_id in confirmed_user_ids { 38 | let mut key = prefix.clone(); 39 | key.extend_from_slice(ll_id.as_bytes()); 40 | self.lazyloadedids.insert(&key, &[])?; 41 | } 42 | 43 | Ok(()) 44 | } 45 | 46 | fn lazy_load_reset( 47 | &self, 48 | user_id: &UserId, 49 | device_id: &DeviceId, 50 | room_id: &RoomId, 51 | ) -> Result<()> { 52 | let mut prefix = user_id.as_bytes().to_vec(); 53 | prefix.push(0xff); 54 | prefix.extend_from_slice(device_id.as_bytes()); 55 | prefix.push(0xff); 56 | prefix.extend_from_slice(room_id.as_bytes()); 57 | prefix.push(0xff); 58 | 59 | for (key, _) in self.lazyloadedids.scan_prefix(prefix) { 60 | self.lazyloadedids.remove(&key)?; 61 | } 62 | 63 | Ok(()) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/database/key_value/appservice.rs: -------------------------------------------------------------------------------- 1 | use ruma::api::appservice::Registration; 2 | 3 | use crate::{database::KeyValueDatabase, service, utils, Error, Result}; 4 | 5 | impl service::appservice::Data for KeyValueDatabase { 6 | /// Registers an appservice and returns the ID to the caller 7 | fn register_appservice(&self, yaml: Registration) -> Result { 8 | let id = yaml.id.as_str(); 9 | self.id_appserviceregistrations.insert( 10 | id.as_bytes(), 11 | serde_yaml::to_string(&yaml).unwrap().as_bytes(), 12 | )?; 13 | 14 | Ok(id.to_owned()) 15 | } 16 | 17 | /// Remove an appservice registration 18 | /// 19 | /// # Arguments 20 | /// 21 | /// * `service_name` - the name you send to register the service previously 22 | fn unregister_appservice(&self, service_name: &str) -> Result<()> { 23 | self.id_appserviceregistrations 24 | .remove(service_name.as_bytes())?; 25 | Ok(()) 26 | } 27 | 28 | fn get_registration(&self, id: &str) -> Result> { 29 | self.id_appserviceregistrations 30 | .get(id.as_bytes())? 31 | .map(|bytes| { 32 | serde_yaml::from_slice(&bytes).map_err(|_| { 33 | Error::bad_database("Invalid registration bytes in id_appserviceregistrations.") 34 | }) 35 | }) 36 | .transpose() 37 | } 38 | 39 | fn iter_ids<'a>(&'a self) -> Result> + 'a>> { 40 | Ok(Box::new(self.id_appserviceregistrations.iter().map( 41 | |(id, _)| { 42 | utils::string_from_bytes(&id).map_err(|_| { 43 | Error::bad_database("Invalid id bytes in id_appserviceregistrations.") 44 | }) 45 | }, 46 | ))) 47 | } 48 | 49 | fn all(&self) -> Result> { 50 | self.iter_ids()? 51 | .filter_map(|id| id.ok()) 52 | .map(move |id| { 53 | Ok(( 54 | id.clone(), 55 | self.get_registration(&id)? 56 | .expect("iter_ids only returns appservices that exist"), 57 | )) 58 | }) 59 | .collect() 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/state_compressor.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashSet, mem::size_of, sync::Arc}; 2 | 3 | use crate::{ 4 | database::KeyValueDatabase, 5 | service::{self, rooms::state_compressor::data::StateDiff}, 6 | utils, Error, Result, 7 | }; 8 | 9 | impl service::rooms::state_compressor::Data for KeyValueDatabase { 10 | fn get_statediff(&self, shortstatehash: u64) -> Result { 11 | let value = self 12 | .shortstatehash_statediff 13 | .get(&shortstatehash.to_be_bytes())? 14 | .ok_or_else(|| Error::bad_database("State hash does not exist"))?; 15 | let parent = 16 | utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); 17 | let parent = if parent != 0 { Some(parent) } else { None }; 18 | 19 | let mut add_mode = true; 20 | let mut added = HashSet::new(); 21 | let mut removed = HashSet::new(); 22 | 23 | let mut i = size_of::(); 24 | while let Some(v) = value.get(i..i + 2 * size_of::()) { 25 | if add_mode && v.starts_with(&0_u64.to_be_bytes()) { 26 | add_mode = false; 27 | i += size_of::(); 28 | continue; 29 | } 30 | if add_mode { 31 | added.insert(v.try_into().expect("we checked the size above")); 32 | } else { 33 | removed.insert(v.try_into().expect("we checked the size above")); 34 | } 35 | i += 2 * size_of::(); 36 | } 37 | 38 | Ok(StateDiff { 39 | parent, 40 | added: Arc::new(added), 41 | removed: Arc::new(removed), 42 | }) 43 | } 44 | 45 | fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> { 46 | let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); 47 | for new in diff.added.iter() { 48 | value.extend_from_slice(&new[..]); 49 | } 50 | 51 | if !diff.removed.is_empty() { 52 | value.extend_from_slice(&0_u64.to_be_bytes()); 53 | for removed in diff.removed.iter() { 54 | value.extend_from_slice(&removed[..]); 55 | } 56 | } 57 | 58 | self.shortstatehash_statediff 59 | .insert(&shortstatehash.to_be_bytes(), &value) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /docs/deploying/docker-compose.yml: -------------------------------------------------------------------------------- 1 | # Conduit 2 | version: '3' 3 | 4 | services: 5 | homeserver: 6 | ### If you already built the Conduit image with 'docker build' or want to use a registry image, 7 | ### then you are ready to go. 8 | image: matrixconduit/matrix-conduit:latest 9 | ### If you want to build a fresh image from the sources, then comment the image line and uncomment the 10 | ### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this: 11 | ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d 12 | # build: 13 | # context: . 14 | # args: 15 | # CREATED: '2021-03-16T08:18:27Z' 16 | # VERSION: '0.1.0' 17 | # LOCAL: 'false' 18 | # GIT_REF: origin/master 19 | restart: unless-stopped 20 | ports: 21 | - 8448:6167 22 | volumes: 23 | - db:/var/lib/matrix-conduit/ 24 | environment: 25 | CONDUIT_SERVER_NAME: your.server.name # EDIT THIS 26 | CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ 27 | CONDUIT_DATABASE_BACKEND: rocksdb 28 | CONDUIT_PORT: 6167 29 | CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB 30 | CONDUIT_ALLOW_REGISTRATION: 'true' 31 | CONDUIT_ALLOW_FEDERATION: 'true' 32 | CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' 33 | CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' 34 | #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 35 | CONDUIT_ADDRESS: 0.0.0.0 36 | CONDUIT_CONFIG: '' # Ignore this 37 | # 38 | ### Uncomment if you want to use your own Element-Web App. 39 | ### Note: You need to provide a config.json for Element and you also need a second 40 | ### Domain or Subdomain for the communication between Element and Conduit 41 | ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md 42 | # element-web: 43 | # image: vectorim/element-web:latest 44 | # restart: unless-stopped 45 | # ports: 46 | # - 8009:80 47 | # volumes: 48 | # - ./element_config.json:/app/config.json 49 | # depends_on: 50 | # - homeserver 51 | 52 | volumes: 53 | db: 54 | -------------------------------------------------------------------------------- /src/service/key_backups/data.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | use crate::Result; 4 | use ruma::{ 5 | api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, 6 | serde::Raw, 7 | OwnedRoomId, RoomId, UserId, 8 | }; 9 | 10 | pub trait Data: Send + Sync { 11 | fn create_backup( 12 | &self, 13 | user_id: &UserId, 14 | backup_metadata: &Raw, 15 | ) -> Result; 16 | 17 | fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()>; 18 | 19 | fn update_backup( 20 | &self, 21 | user_id: &UserId, 22 | version: &str, 23 | backup_metadata: &Raw, 24 | ) -> Result; 25 | 26 | fn get_latest_backup_version(&self, user_id: &UserId) -> Result>; 27 | 28 | fn get_latest_backup(&self, user_id: &UserId) 29 | -> Result)>>; 30 | 31 | fn get_backup(&self, user_id: &UserId, version: &str) -> Result>>; 32 | 33 | fn add_key( 34 | &self, 35 | user_id: &UserId, 36 | version: &str, 37 | room_id: &RoomId, 38 | session_id: &str, 39 | key_data: &Raw, 40 | ) -> Result<()>; 41 | 42 | fn count_keys(&self, user_id: &UserId, version: &str) -> Result; 43 | 44 | fn get_etag(&self, user_id: &UserId, version: &str) -> Result; 45 | 46 | fn get_all( 47 | &self, 48 | user_id: &UserId, 49 | version: &str, 50 | ) -> Result>; 51 | 52 | fn get_room( 53 | &self, 54 | user_id: &UserId, 55 | version: &str, 56 | room_id: &RoomId, 57 | ) -> Result>>; 58 | 59 | fn get_session( 60 | &self, 61 | user_id: &UserId, 62 | version: &str, 63 | room_id: &RoomId, 64 | session_id: &str, 65 | ) -> Result>>; 66 | 67 | fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()>; 68 | 69 | fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()>; 70 | 71 | fn delete_room_key( 72 | &self, 73 | user_id: &UserId, 74 | version: &str, 75 | room_id: &RoomId, 76 | session_id: &str, 77 | ) -> Result<()>; 78 | } 79 | -------------------------------------------------------------------------------- /docs/appservices.md: -------------------------------------------------------------------------------- 1 | # Setting up Appservices 2 | 3 | ## Getting help 4 | 5 | If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in [#conduit:ahimsa.chat](https://matrix.to/#/#conduit:ahimsa.chat) or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). 6 | 7 | ## Set up the appservice - general instructions 8 | 9 | Follow whatever instructions are given by the appservice. This usually includes 10 | downloading, changing its config (setting domain, homeserver url, port etc.) 11 | and later starting it. 12 | 13 | At some point the appservice guide should ask you to add a registration yaml 14 | file to the homeserver. In Synapse you would do this by adding the path to the 15 | homeserver.yaml, but in Conduit you can do this from within Matrix: 16 | 17 | First, go into the #admins room of your homeserver. The first person that 18 | registered on the homeserver automatically joins it. Then send a message into 19 | the room like this: 20 | 21 | @conduit:your.server.name: register-appservice 22 | ``` 23 | paste 24 | the 25 | contents 26 | of 27 | the 28 | yaml 29 | registration 30 | here 31 | ``` 32 | 33 | You can confirm it worked by sending a message like this: 34 | `@conduit:your.server.name: list-appservices` 35 | 36 | The @conduit bot should answer with `Appservices (1): your-bridge` 37 | 38 | Then you are done. Conduit will send messages to the appservices and the 39 | appservice can send requests to the homeserver. You don't need to restart 40 | Conduit, but if it doesn't work, restarting while the appservice is running 41 | could help. 42 | 43 | ## Appservice-specific instructions 44 | 45 | ### Remove an appservice 46 | 47 | To remove an appservice go to your admin room and execute 48 | 49 | `@conduit:your.server.name: unregister-appservice ` 50 | 51 | where `` one of the output of `list-appservices`. 52 | 53 | ### Tested appservices 54 | 55 | These appservices have been tested and work with Conduit without any extra steps: 56 | 57 | - [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord) 58 | - [mautrix-hangouts](https://github.com/mautrix/hangouts/) 59 | - [mautrix-telegram](https://github.com/mautrix/telegram/) 60 | - [mautrix-signal](https://github.com/mautrix/signal/) from version `0.2.2` forward. 61 | - [heisenbridge](https://github.com/hifi/heisenbridge/) 62 | -------------------------------------------------------------------------------- /nix/pkgs/default/default.nix: -------------------------------------------------------------------------------- 1 | # Dependencies (keep sorted) 2 | { craneLib 3 | , inputs 4 | , lib 5 | , pkgsBuildHost 6 | , rocksdb 7 | , rust 8 | , stdenv 9 | 10 | # Options (keep sorted) 11 | , default-features ? true 12 | , features ? [] 13 | , profile ? "release" 14 | }: 15 | 16 | let 17 | buildDepsOnlyEnv = 18 | let 19 | rocksdb' = rocksdb.override { 20 | enableJemalloc = builtins.elem "jemalloc" features; 21 | enableLiburing = false; 22 | }; 23 | in 24 | { 25 | NIX_OUTPATH_USED_AS_RANDOM_SEED = "randomseed"; # https://crane.dev/faq/rebuilds-bindgen.html 26 | ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include"; 27 | ROCKSDB_LIB_DIR = "${rocksdb'}/lib"; 28 | } 29 | // 30 | (import ./cross-compilation-env.nix { 31 | # Keep sorted 32 | inherit 33 | lib 34 | pkgsBuildHost 35 | rust 36 | stdenv; 37 | }); 38 | 39 | buildPackageEnv = { 40 | CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev; 41 | } // buildDepsOnlyEnv; 42 | 43 | commonAttrs = { 44 | inherit 45 | (craneLib.crateNameFromCargoToml { 46 | cargoToml = "${inputs.self}/Cargo.toml"; 47 | }) 48 | pname 49 | version; 50 | 51 | src = let filter = inputs.nix-filter.lib; in filter { 52 | root = inputs.self; 53 | 54 | # Keep sorted 55 | include = [ 56 | ".cargo" 57 | "Cargo.lock" 58 | "Cargo.toml" 59 | "src" 60 | ]; 61 | }; 62 | 63 | nativeBuildInputs = [ 64 | # bindgen needs the build platform's libclang. Apparently due to "splicing 65 | # weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the 66 | # right thing here. 67 | pkgsBuildHost.rustPlatform.bindgenHook 68 | ]; 69 | 70 | CARGO_PROFILE = profile; 71 | }; 72 | in 73 | 74 | craneLib.buildPackage ( commonAttrs // { 75 | cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // { 76 | env = buildDepsOnlyEnv; 77 | }); 78 | 79 | cargoExtraArgs = "--locked " 80 | + lib.optionalString 81 | (!default-features) 82 | "--no-default-features " 83 | + lib.optionalString 84 | (features != []) 85 | "--features " + (builtins.concatStringsSep "," features); 86 | 87 | # This is redundant with CI 88 | doCheck = false; 89 | 90 | env = buildPackageEnv; 91 | 92 | passthru = { 93 | env = buildPackageEnv; 94 | }; 95 | 96 | meta.mainProgram = commonAttrs.pname; 97 | }) 98 | -------------------------------------------------------------------------------- /docs/delegation.md: -------------------------------------------------------------------------------- 1 | # Delegation 2 | 3 | You can run Conduit on a separate domain than the actual server name (what shows up in user ids, aliases, etc.). 4 | For example you can have your users have IDs such as `@foo:example.org` and have aliases like `#bar:example.org`, 5 | while actually having Conduit hosted on the `matrix.example.org` domain. This is called delegation. 6 | 7 | ## Automatic (recommended) 8 | 9 | Conduit has support for hosting delegation files by itself, and by default uses it to serve federation traffic on port 443. 10 | 11 | With this method, you need to direct requests to `/.well-known/matrix/*` to Conduit in your reverse proxy. 12 | 13 | This is only recommended if Conduit is on the same physical server as the server which serves your server name (e.g. example.org) 14 | as servers don't always seem to cache the response, leading to slower response times otherwise, but it should also work if you 15 | are connected to the server running Conduit using something like a VPN. 16 | 17 | To configure it, use the following options in the `global.well_known` table: 18 | | Field | Type | Description | Default | 19 | | --- | --- | --- | --- | 20 | | `client` | `String` | The URL that clients should use to connect to Conduit | `https://` | 21 | | `server` | `String` | The hostname and port servers should use to connect to Conduit | `:443` | 22 | 23 | ### Example 24 | 25 | ```toml 26 | [global.well_known] 27 | client = "https://matrix.example.org" 28 | server = "matrix.example.org:443" 29 | ``` 30 | 31 | ## Manual 32 | 33 | Alternatively you can serve static JSON files to inform clients and servers how to connect to Conduit. 34 | 35 | ### Servers 36 | 37 | For servers to discover how to access your domain, serve a response in the following format for `/.well-known/matrix/server`: 38 | 39 | ```json 40 | { 41 | "m.server": "matrix.example.org:443" 42 | } 43 | ``` 44 | Where `matrix.example.org` is the domain and `443` is the port Conduit is accessible at. 45 | 46 | ### Clients 47 | 48 | For clients to discover how to access your domain, serve a response in the following format for `/.well-known/matrix/client`: 49 | ```json 50 | { 51 | "m.homeserver": { 52 | "base_url": "https://matrix.example.org" 53 | } 54 | } 55 | ``` 56 | Where `matrix.example.org` is the URL Conduit is accessible at. 57 | 58 | To ensure that all clients can access this endpoint, it is recommended you set the following headers for this endpoint: 59 | ``` 60 | Access-Control-Allow-Origin: * 61 | Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS 62 | Access-Control-Allow-Headers: X-Requested-With, Content-Type, Authorization 63 | ``` 64 | -------------------------------------------------------------------------------- /src/service/rooms/lazy_loading/mod.rs: -------------------------------------------------------------------------------- 1 | mod data; 2 | use std::collections::{HashMap, HashSet}; 3 | 4 | pub use data::Data; 5 | use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId}; 6 | use tokio::sync::Mutex; 7 | 8 | use crate::Result; 9 | 10 | use super::timeline::PduCount; 11 | 12 | pub struct Service { 13 | pub db: &'static dyn Data, 14 | 15 | #[allow(clippy::type_complexity)] 16 | pub lazy_load_waiting: 17 | Mutex>>, 18 | } 19 | 20 | impl Service { 21 | #[tracing::instrument(skip(self))] 22 | pub fn lazy_load_was_sent_before( 23 | &self, 24 | user_id: &UserId, 25 | device_id: &DeviceId, 26 | room_id: &RoomId, 27 | ll_user: &UserId, 28 | ) -> Result { 29 | self.db 30 | .lazy_load_was_sent_before(user_id, device_id, room_id, ll_user) 31 | } 32 | 33 | #[tracing::instrument(skip(self))] 34 | pub async fn lazy_load_mark_sent( 35 | &self, 36 | user_id: &UserId, 37 | device_id: &DeviceId, 38 | room_id: &RoomId, 39 | lazy_load: HashSet, 40 | count: PduCount, 41 | ) { 42 | self.lazy_load_waiting.lock().await.insert( 43 | ( 44 | user_id.to_owned(), 45 | device_id.to_owned(), 46 | room_id.to_owned(), 47 | count, 48 | ), 49 | lazy_load, 50 | ); 51 | } 52 | 53 | #[tracing::instrument(skip(self))] 54 | pub async fn lazy_load_confirm_delivery( 55 | &self, 56 | user_id: &UserId, 57 | device_id: &DeviceId, 58 | room_id: &RoomId, 59 | since: PduCount, 60 | ) -> Result<()> { 61 | if let Some(user_ids) = self.lazy_load_waiting.lock().await.remove(&( 62 | user_id.to_owned(), 63 | device_id.to_owned(), 64 | room_id.to_owned(), 65 | since, 66 | )) { 67 | self.db.lazy_load_confirm_delivery( 68 | user_id, 69 | device_id, 70 | room_id, 71 | &mut user_ids.iter().map(|u| &**u), 72 | )?; 73 | } else { 74 | // Ignore 75 | } 76 | 77 | Ok(()) 78 | } 79 | 80 | #[tracing::instrument(skip(self))] 81 | pub fn lazy_load_reset( 82 | &self, 83 | user_id: &UserId, 84 | device_id: &DeviceId, 85 | room_id: &RoomId, 86 | ) -> Result<()> { 87 | self.db.lazy_load_reset(user_id, device_id, room_id) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/api/client_server/report.rs: -------------------------------------------------------------------------------- 1 | use crate::{services, utils::HtmlEscape, Error, Result, Ruma}; 2 | use ruma::{ 3 | api::client::{error::ErrorKind, room::report_content}, 4 | events::room::message, 5 | int, 6 | }; 7 | 8 | /// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}` 9 | /// 10 | /// Reports an inappropriate event to homeserver admins 11 | /// 12 | pub async fn report_event_route( 13 | body: Ruma, 14 | ) -> Result { 15 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 16 | 17 | let pdu = match services().rooms.timeline.get_pdu(&body.event_id)? { 18 | Some(pdu) => pdu, 19 | _ => { 20 | return Err(Error::BadRequest( 21 | ErrorKind::InvalidParam, 22 | "Invalid Event ID", 23 | )) 24 | } 25 | }; 26 | 27 | if let Some(true) = body.score.map(|s| s > int!(0) || s < int!(-100)) { 28 | return Err(Error::BadRequest( 29 | ErrorKind::InvalidParam, 30 | "Invalid score, must be within 0 to -100", 31 | )); 32 | }; 33 | 34 | if let Some(true) = body.reason.clone().map(|s| s.chars().count() > 250) { 35 | return Err(Error::BadRequest( 36 | ErrorKind::InvalidParam, 37 | "Reason too long, should be 250 characters or fewer", 38 | )); 39 | }; 40 | 41 | services().admin 42 | .send_message(message::RoomMessageEventContent::text_html( 43 | format!( 44 | "Report received from: {}\n\n\ 45 | Event ID: {:?}\n\ 46 | Room ID: {:?}\n\ 47 | Sent By: {:?}\n\n\ 48 | Report Score: {:?}\n\ 49 | Report Reason: {:?}", 50 | sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason 51 | ), 52 | format!( 53 | "
Report received from: {0:?}\ 54 |
  • Event Info
    • Event ID: {1:?}\ 55 | 🔗
    • Room ID: {2:?}\ 56 |
    • Sent By: {3:?}
  • \ 57 | Report Info
    • Report Score: {4:?}
    • Report Reason: {5}
  • \ 58 |
", 59 | sender_user, 60 | pdu.event_id, 61 | pdu.room_id, 62 | pdu.sender, 63 | body.score, 64 | HtmlEscape(body.reason.as_deref().unwrap_or("")) 65 | ), 66 | )); 67 | 68 | Ok(report_content::v3::Response {}) 69 | } 70 | -------------------------------------------------------------------------------- /nix/pkgs/default/cross-compilation-env.nix: -------------------------------------------------------------------------------- 1 | { lib 2 | , pkgsBuildHost 3 | , rust 4 | , stdenv 5 | }: 6 | 7 | lib.optionalAttrs stdenv.hostPlatform.isStatic { 8 | ROCKSDB_STATIC = ""; 9 | } 10 | // 11 | { 12 | CARGO_BUILD_RUSTFLAGS = 13 | lib.concatStringsSep 14 | " " 15 | ([] 16 | # This disables PIE for static builds, which isn't great in terms of 17 | # security. Unfortunately, my hand is forced because nixpkgs' 18 | # `libstdc++.a` is built without `-fPIE`, which precludes us from 19 | # leaving PIE enabled. 20 | ++ lib.optionals 21 | stdenv.hostPlatform.isStatic 22 | [ "-C" "relocation-model=static" ] 23 | ++ lib.optionals 24 | (stdenv.buildPlatform.config != stdenv.hostPlatform.config) 25 | [ 26 | "-l" 27 | "c" 28 | 29 | "-l" 30 | "stdc++" 31 | "-L" 32 | "${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib" 33 | ] 34 | ); 35 | } 36 | 37 | # What follows is stolen from [here][0]. Its purpose is to properly configure 38 | # compilers and linkers for various stages of the build, and even covers the 39 | # case of build scripts that need native code compiled and run on the build 40 | # platform (I think). 41 | # 42 | # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80 43 | // 44 | ( 45 | let 46 | inherit (rust.lib) envVars; 47 | in 48 | lib.optionalAttrs 49 | (stdenv.targetPlatform.rust.rustcTarget 50 | != stdenv.hostPlatform.rust.rustcTarget) 51 | ( 52 | let 53 | inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget; 54 | in 55 | { 56 | "CC_${cargoEnvVarTarget}" = envVars.ccForTarget; 57 | "CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget; 58 | "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = 59 | envVars.linkerForTarget; 60 | } 61 | ) 62 | // 63 | ( 64 | let 65 | inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget; 66 | in 67 | { 68 | "CC_${cargoEnvVarTarget}" = envVars.ccForHost; 69 | "CXX_${cargoEnvVarTarget}" = envVars.cxxForHost; 70 | "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForHost; 71 | CARGO_BUILD_TARGET = rustcTarget; 72 | } 73 | ) 74 | // 75 | ( 76 | let 77 | inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget; 78 | in 79 | { 80 | "CC_${cargoEnvVarTarget}" = envVars.ccForBuild; 81 | "CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild; 82 | "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForBuild; 83 | HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc"; 84 | HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++"; 85 | } 86 | ) 87 | ) 88 | -------------------------------------------------------------------------------- /docs/deploying/docker-compose.override.yml: -------------------------------------------------------------------------------- 1 | # Conduit - Traefik Reverse Proxy Labels 2 | version: '3' 3 | 4 | services: 5 | homeserver: 6 | labels: 7 | - "traefik.enable=true" 8 | - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network 9 | 10 | - "traefik.http.routers.to-conduit.rule=Host(`.`)" # Change to the address on which Conduit is hosted 11 | - "traefik.http.routers.to-conduit.tls=true" 12 | - "traefik.http.routers.to-conduit.tls.certresolver=letsencrypt" 13 | - "traefik.http.routers.to-conduit.middlewares=cors-headers@docker" 14 | 15 | - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*" 16 | - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" 17 | - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" 18 | 19 | # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container 20 | # to serve those two as static files. If you want to use a different way, delete or comment the below service, here 21 | # and in the docker compose file. 22 | well-known: 23 | labels: 24 | - "traefik.enable=true" 25 | - "traefik.docker.network=proxy" 26 | 27 | - "traefik.http.routers.to-matrix-wellknown.rule=Host(`.`) && PathPrefix(`/.well-known/matrix`)" 28 | - "traefik.http.routers.to-matrix-wellknown.tls=true" 29 | - "traefik.http.routers.to-matrix-wellknown.tls.certresolver=letsencrypt" 30 | - "traefik.http.routers.to-matrix-wellknown.middlewares=cors-headers@docker" 31 | 32 | - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*" 33 | - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" 34 | - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" 35 | 36 | 37 | ### Uncomment this if you uncommented Element-Web App in the docker-compose.yml 38 | # element-web: 39 | # labels: 40 | # - "traefik.enable=true" 41 | # - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network 42 | 43 | # - "traefik.http.routers.to-element-web.rule=Host(`.`)" # Change to the address on which Element-Web is hosted 44 | # - "traefik.http.routers.to-element-web.tls=true" 45 | # - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt" 46 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/state.rs: -------------------------------------------------------------------------------- 1 | use ruma::{EventId, OwnedEventId, RoomId}; 2 | use std::collections::HashSet; 3 | 4 | use std::sync::Arc; 5 | use tokio::sync::MutexGuard; 6 | 7 | use crate::{database::KeyValueDatabase, service, utils, Error, Result}; 8 | 9 | impl service::rooms::state::Data for KeyValueDatabase { 10 | fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { 11 | self.roomid_shortstatehash 12 | .get(room_id.as_bytes())? 13 | .map_or(Ok(None), |bytes| { 14 | Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { 15 | Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") 16 | })?)) 17 | }) 18 | } 19 | 20 | fn set_room_state( 21 | &self, 22 | room_id: &RoomId, 23 | new_shortstatehash: u64, 24 | _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex 25 | ) -> Result<()> { 26 | self.roomid_shortstatehash 27 | .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; 28 | Ok(()) 29 | } 30 | 31 | fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()> { 32 | self.shorteventid_shortstatehash 33 | .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; 34 | Ok(()) 35 | } 36 | 37 | fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { 38 | let mut prefix = room_id.as_bytes().to_vec(); 39 | prefix.push(0xff); 40 | 41 | self.roomid_pduleaves 42 | .scan_prefix(prefix) 43 | .map(|(_, bytes)| { 44 | EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { 45 | Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") 46 | })?) 47 | .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) 48 | }) 49 | .collect() 50 | } 51 | 52 | fn set_forward_extremities<'a>( 53 | &self, 54 | room_id: &RoomId, 55 | event_ids: Vec, 56 | _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex 57 | ) -> Result<()> { 58 | let mut prefix = room_id.as_bytes().to_vec(); 59 | prefix.push(0xff); 60 | 61 | for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { 62 | self.roomid_pduleaves.remove(&key)?; 63 | } 64 | 65 | for event_id in event_ids { 66 | let mut key = prefix.to_owned(); 67 | key.extend_from_slice(event_id.as_bytes()); 68 | self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; 69 | } 70 | 71 | Ok(()) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/database/key_value/pusher.rs: -------------------------------------------------------------------------------- 1 | use ruma::{ 2 | api::client::push::{set_pusher, Pusher}, 3 | UserId, 4 | }; 5 | 6 | use crate::{database::KeyValueDatabase, service, utils, Error, Result}; 7 | 8 | impl service::pusher::Data for KeyValueDatabase { 9 | fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()> { 10 | match &pusher { 11 | set_pusher::v3::PusherAction::Post(data) => { 12 | let mut key = sender.as_bytes().to_vec(); 13 | key.push(0xff); 14 | key.extend_from_slice(data.pusher.ids.pushkey.as_bytes()); 15 | self.senderkey_pusher.insert( 16 | &key, 17 | &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), 18 | )?; 19 | Ok(()) 20 | } 21 | set_pusher::v3::PusherAction::Delete(ids) => { 22 | let mut key = sender.as_bytes().to_vec(); 23 | key.push(0xff); 24 | key.extend_from_slice(ids.pushkey.as_bytes()); 25 | self.senderkey_pusher.remove(&key).map(|_| ()) 26 | } 27 | } 28 | } 29 | 30 | fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { 31 | let mut senderkey = sender.as_bytes().to_vec(); 32 | senderkey.push(0xff); 33 | senderkey.extend_from_slice(pushkey.as_bytes()); 34 | 35 | self.senderkey_pusher 36 | .get(&senderkey)? 37 | .map(|push| { 38 | serde_json::from_slice(&push) 39 | .map_err(|_| Error::bad_database("Invalid Pusher in db.")) 40 | }) 41 | .transpose() 42 | } 43 | 44 | fn get_pushers(&self, sender: &UserId) -> Result> { 45 | let mut prefix = sender.as_bytes().to_vec(); 46 | prefix.push(0xff); 47 | 48 | self.senderkey_pusher 49 | .scan_prefix(prefix) 50 | .map(|(_, push)| { 51 | serde_json::from_slice(&push) 52 | .map_err(|_| Error::bad_database("Invalid Pusher in db.")) 53 | }) 54 | .collect() 55 | } 56 | 57 | fn get_pushkeys<'a>( 58 | &'a self, 59 | sender: &UserId, 60 | ) -> Box> + 'a> { 61 | let mut prefix = sender.as_bytes().to_vec(); 62 | prefix.push(0xff); 63 | 64 | Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| { 65 | let mut parts = k.splitn(2, |&b| b == 0xff); 66 | let _senderkey = parts.next(); 67 | let push_key = parts 68 | .next() 69 | .ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?; 70 | let push_key_string = utils::string_from_bytes(push_key) 71 | .map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?; 72 | 73 | Ok(push_key_string) 74 | })) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/api/client_server/appservice.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | use ruma::api::{ 4 | appservice::ping::send_ping, 5 | client::{appservice::request_ping, error::ErrorKind}, 6 | }; 7 | 8 | use crate::{api::appservice_server, Error, Result, Ruma}; 9 | 10 | /// # `POST /_matrix/client/v1/appservice/{appserviceId}/ping` 11 | /// 12 | /// Allows an appservice to check whether the server and 13 | /// appservice can connect, and how fast their connection is 14 | pub async fn ping_appservice_route( 15 | body: Ruma, 16 | ) -> Result { 17 | let Ruma:: { 18 | appservice_info, 19 | body, 20 | .. 21 | } = body; 22 | 23 | let registration = appservice_info 24 | .expect("Only appservices can call this endpoint") 25 | .registration; 26 | 27 | if registration.id != body.appservice_id { 28 | return Err(Error::BadRequest( 29 | ErrorKind::forbidden(), 30 | "Appservice ID specified in path does not match the requesting access token", 31 | )); 32 | } 33 | 34 | if registration.url.is_some() { 35 | let start = Instant::now(); 36 | let response = appservice_server::send_request( 37 | registration, 38 | send_ping::v1::Request { 39 | transaction_id: body.transaction_id, 40 | }, 41 | ) 42 | .await; 43 | let elapsed = start.elapsed(); 44 | 45 | if let Err(error) = response { 46 | Err(match error { 47 | Error::ReqwestError { source } => { 48 | if source.is_timeout() { 49 | Error::BadRequest( 50 | ErrorKind::ConnectionTimeout, 51 | "Connection to appservice timed-out", 52 | ) 53 | } else if let Some(status_code) = source.status() { 54 | Error::BadRequest( 55 | ErrorKind::BadStatus { 56 | status: Some(status_code), 57 | body: Some(source.to_string()), 58 | }, 59 | "Ping returned error status", 60 | ) 61 | } else { 62 | Error::BadRequest(ErrorKind::ConnectionFailed, "Failed to ping appservice") 63 | } 64 | } 65 | Error::BadServerResponse(_) => Error::BadRequest( 66 | ErrorKind::ConnectionFailed, 67 | "Received invalid response from appservice", 68 | ), 69 | e => e, 70 | }) 71 | } else { 72 | Ok(request_ping::v1::Response::new(elapsed)) 73 | } 74 | } else { 75 | Err(Error::BadRequest( 76 | ErrorKind::UrlNotSet, 77 | "Appservice doesn't have a URL configured", 78 | )) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/database/key_value/uiaa.rs: -------------------------------------------------------------------------------- 1 | use ruma::{ 2 | api::client::{error::ErrorKind, uiaa::UiaaInfo}, 3 | CanonicalJsonValue, DeviceId, UserId, 4 | }; 5 | 6 | use crate::{database::KeyValueDatabase, service, Error, Result}; 7 | 8 | impl service::uiaa::Data for KeyValueDatabase { 9 | fn set_uiaa_request( 10 | &self, 11 | user_id: &UserId, 12 | device_id: &DeviceId, 13 | session: &str, 14 | request: &CanonicalJsonValue, 15 | ) -> Result<()> { 16 | self.userdevicesessionid_uiaarequest 17 | .write() 18 | .unwrap() 19 | .insert( 20 | (user_id.to_owned(), device_id.to_owned(), session.to_owned()), 21 | request.to_owned(), 22 | ); 23 | 24 | Ok(()) 25 | } 26 | 27 | fn get_uiaa_request( 28 | &self, 29 | user_id: &UserId, 30 | device_id: &DeviceId, 31 | session: &str, 32 | ) -> Option { 33 | self.userdevicesessionid_uiaarequest 34 | .read() 35 | .unwrap() 36 | .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) 37 | .map(|j| j.to_owned()) 38 | } 39 | 40 | fn update_uiaa_session( 41 | &self, 42 | user_id: &UserId, 43 | device_id: &DeviceId, 44 | session: &str, 45 | uiaainfo: Option<&UiaaInfo>, 46 | ) -> Result<()> { 47 | let mut userdevicesessionid = user_id.as_bytes().to_vec(); 48 | userdevicesessionid.push(0xff); 49 | userdevicesessionid.extend_from_slice(device_id.as_bytes()); 50 | userdevicesessionid.push(0xff); 51 | userdevicesessionid.extend_from_slice(session.as_bytes()); 52 | 53 | if let Some(uiaainfo) = uiaainfo { 54 | self.userdevicesessionid_uiaainfo.insert( 55 | &userdevicesessionid, 56 | &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), 57 | )?; 58 | } else { 59 | self.userdevicesessionid_uiaainfo 60 | .remove(&userdevicesessionid)?; 61 | } 62 | 63 | Ok(()) 64 | } 65 | 66 | fn get_uiaa_session( 67 | &self, 68 | user_id: &UserId, 69 | device_id: &DeviceId, 70 | session: &str, 71 | ) -> Result { 72 | let mut userdevicesessionid = user_id.as_bytes().to_vec(); 73 | userdevicesessionid.push(0xff); 74 | userdevicesessionid.extend_from_slice(device_id.as_bytes()); 75 | userdevicesessionid.push(0xff); 76 | userdevicesessionid.extend_from_slice(session.as_bytes()); 77 | 78 | serde_json::from_slice( 79 | &self 80 | .userdevicesessionid_uiaainfo 81 | .get(&userdevicesessionid)? 82 | .ok_or(Error::BadRequest( 83 | ErrorKind::forbidden(), 84 | "UIAA session does not exist.", 85 | ))?, 86 | ) 87 | .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid.")) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/threads.rs: -------------------------------------------------------------------------------- 1 | use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; 2 | 3 | use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; 4 | 5 | impl service::rooms::threads::Data for KeyValueDatabase { 6 | fn threads_until<'a>( 7 | &'a self, 8 | user_id: &'a UserId, 9 | room_id: &'a RoomId, 10 | until: u64, 11 | _include: &'a IncludeThreads, 12 | ) -> Result> + 'a>> { 13 | let prefix = services() 14 | .rooms 15 | .short 16 | .get_shortroomid(room_id)? 17 | .expect("room exists") 18 | .to_be_bytes() 19 | .to_vec(); 20 | 21 | let mut current = prefix.clone(); 22 | current.extend_from_slice(&(until - 1).to_be_bytes()); 23 | 24 | Ok(Box::new( 25 | self.threadid_userids 26 | .iter_from(¤t, true) 27 | .take_while(move |(k, _)| k.starts_with(&prefix)) 28 | .map(move |(pduid, _users)| { 29 | let count = utils::u64_from_bytes(&pduid[(size_of::())..]) 30 | .map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?; 31 | let mut pdu = services() 32 | .rooms 33 | .timeline 34 | .get_pdu_from_id(&pduid)? 35 | .ok_or_else(|| { 36 | Error::bad_database("Invalid pduid reference in threadid_userids") 37 | })?; 38 | if pdu.sender != user_id { 39 | pdu.remove_transaction_id()?; 40 | } 41 | Ok((count, pdu)) 42 | }), 43 | )) 44 | } 45 | 46 | fn update_participants(&self, root_id: &[u8], participants: &[OwnedUserId]) -> Result<()> { 47 | let users = participants 48 | .iter() 49 | .map(|user| user.as_bytes()) 50 | .collect::>() 51 | .join(&[0xff][..]); 52 | 53 | self.threadid_userids.insert(root_id, &users)?; 54 | 55 | Ok(()) 56 | } 57 | 58 | fn get_participants(&self, root_id: &[u8]) -> Result>> { 59 | if let Some(users) = self.threadid_userids.get(root_id)? { 60 | Ok(Some( 61 | users 62 | .split(|b| *b == 0xff) 63 | .map(|bytes| { 64 | UserId::parse(utils::string_from_bytes(bytes).map_err(|_| { 65 | Error::bad_database("Invalid UserId bytes in threadid_userids.") 66 | })?) 67 | .map_err(|_| Error::bad_database("Invalid UserId in threadid_userids.")) 68 | }) 69 | .filter_map(|r| r.ok()) 70 | .collect(), 71 | )) 72 | } else { 73 | Ok(None) 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/api/client_server/relations.rs: -------------------------------------------------------------------------------- 1 | use ruma::api::client::relations::{ 2 | get_relating_events, get_relating_events_with_rel_type, 3 | get_relating_events_with_rel_type_and_event_type, 4 | }; 5 | 6 | use crate::{services, Result, Ruma}; 7 | 8 | /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}` 9 | pub async fn get_relating_events_with_rel_type_and_event_type_route( 10 | body: Ruma, 11 | ) -> Result { 12 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 13 | 14 | let res = services() 15 | .rooms 16 | .pdu_metadata 17 | .paginate_relations_with_filter( 18 | sender_user, 19 | &body.room_id, 20 | &body.event_id, 21 | Some(body.event_type.clone()), 22 | Some(body.rel_type.clone()), 23 | body.from.clone(), 24 | body.to.clone(), 25 | body.limit, 26 | body.recurse, 27 | &body.dir, 28 | )?; 29 | 30 | Ok( 31 | get_relating_events_with_rel_type_and_event_type::v1::Response { 32 | chunk: res.chunk, 33 | next_batch: res.next_batch, 34 | prev_batch: res.prev_batch, 35 | recursion_depth: res.recursion_depth, 36 | }, 37 | ) 38 | } 39 | 40 | /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}` 41 | pub async fn get_relating_events_with_rel_type_route( 42 | body: Ruma, 43 | ) -> Result { 44 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 45 | 46 | let res = services() 47 | .rooms 48 | .pdu_metadata 49 | .paginate_relations_with_filter( 50 | sender_user, 51 | &body.room_id, 52 | &body.event_id, 53 | None, 54 | Some(body.rel_type.clone()), 55 | body.from.clone(), 56 | body.to.clone(), 57 | body.limit, 58 | body.recurse, 59 | &body.dir, 60 | )?; 61 | 62 | Ok(get_relating_events_with_rel_type::v1::Response { 63 | chunk: res.chunk, 64 | next_batch: res.next_batch, 65 | prev_batch: res.prev_batch, 66 | recursion_depth: res.recursion_depth, 67 | }) 68 | } 69 | 70 | /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}` 71 | pub async fn get_relating_events_route( 72 | body: Ruma, 73 | ) -> Result { 74 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 75 | 76 | services() 77 | .rooms 78 | .pdu_metadata 79 | .paginate_relations_with_filter( 80 | sender_user, 81 | &body.room_id, 82 | &body.event_id, 83 | None, 84 | None, 85 | body.from.clone(), 86 | body.to.clone(), 87 | body.limit, 88 | body.recurse, 89 | &body.dir, 90 | ) 91 | } 92 | -------------------------------------------------------------------------------- /src/api/client_server/presence.rs: -------------------------------------------------------------------------------- 1 | use crate::{services, utils, Error, Result, Ruma}; 2 | use ruma::api::client::{ 3 | error::ErrorKind, 4 | presence::{get_presence, set_presence}, 5 | }; 6 | use std::time::Duration; 7 | 8 | /// # `PUT /_matrix/client/r0/presence/{userId}/status` 9 | /// 10 | /// Sets the presence state of the sender user. 11 | pub async fn set_presence_route( 12 | body: Ruma, 13 | ) -> Result { 14 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 15 | 16 | for room_id in services().rooms.state_cache.rooms_joined(sender_user) { 17 | let room_id = room_id?; 18 | 19 | services().rooms.edus.presence.update_presence( 20 | sender_user, 21 | &room_id, 22 | ruma::events::presence::PresenceEvent { 23 | content: ruma::events::presence::PresenceEventContent { 24 | avatar_url: services().users.avatar_url(sender_user)?, 25 | currently_active: None, 26 | displayname: services().users.displayname(sender_user)?, 27 | last_active_ago: Some( 28 | utils::millis_since_unix_epoch() 29 | .try_into() 30 | .expect("time is valid"), 31 | ), 32 | presence: body.presence.clone(), 33 | status_msg: body.status_msg.clone(), 34 | }, 35 | sender: sender_user.clone(), 36 | }, 37 | )?; 38 | } 39 | 40 | Ok(set_presence::v3::Response {}) 41 | } 42 | 43 | /// # `GET /_matrix/client/r0/presence/{userId}/status` 44 | /// 45 | /// Gets the presence state of the given user. 46 | /// 47 | /// - Only works if you share a room with the user 48 | pub async fn get_presence_route( 49 | body: Ruma, 50 | ) -> Result { 51 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 52 | 53 | let mut presence_event = None; 54 | 55 | for room_id in services() 56 | .rooms 57 | .user 58 | .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? 59 | { 60 | let room_id = room_id?; 61 | 62 | if let Some(presence) = services() 63 | .rooms 64 | .edus 65 | .presence 66 | .get_last_presence_event(sender_user, &room_id)? 67 | { 68 | presence_event = Some(presence); 69 | break; 70 | } 71 | } 72 | 73 | if let Some(presence) = presence_event { 74 | Ok(get_presence::v3::Response { 75 | // TODO: Should ruma just use the presenceeventcontent type here? 76 | status_msg: presence.content.status_msg, 77 | currently_active: presence.content.currently_active, 78 | last_active_ago: presence 79 | .content 80 | .last_active_ago 81 | .map(|millis| Duration::from_millis(millis.into())), 82 | presence: presence.content.presence, 83 | }) 84 | } else { 85 | Err(Error::BadRequest( 86 | ErrorKind::NotFound, 87 | "Presence state for this user was not found", 88 | )) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/search.rs: -------------------------------------------------------------------------------- 1 | use ruma::RoomId; 2 | 3 | use crate::{database::KeyValueDatabase, service, services, utils, Result}; 4 | 5 | /// Splits a string into tokens used as keys in the search inverted index 6 | /// 7 | /// This may be used to tokenize both message bodies (for indexing) or search 8 | /// queries (for querying). 9 | fn tokenize(body: &str) -> impl Iterator + '_ { 10 | body.split_terminator(|c: char| !c.is_alphanumeric()) 11 | .filter(|s| !s.is_empty()) 12 | .filter(|word| word.len() <= 50) 13 | .map(str::to_lowercase) 14 | } 15 | 16 | impl service::rooms::search::Data for KeyValueDatabase { 17 | fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { 18 | let mut batch = tokenize(message_body).map(|word| { 19 | let mut key = shortroomid.to_be_bytes().to_vec(); 20 | key.extend_from_slice(word.as_bytes()); 21 | key.push(0xff); 22 | key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here 23 | (key, Vec::new()) 24 | }); 25 | 26 | self.tokenids.insert_batch(&mut batch) 27 | } 28 | 29 | fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { 30 | let batch = tokenize(message_body).map(|word| { 31 | let mut key = shortroomid.to_be_bytes().to_vec(); 32 | key.extend_from_slice(word.as_bytes()); 33 | key.push(0xFF); 34 | key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here 35 | key 36 | }); 37 | 38 | for token in batch { 39 | self.tokenids.remove(&token)?; 40 | } 41 | 42 | Ok(()) 43 | } 44 | 45 | fn search_pdus<'a>( 46 | &'a self, 47 | room_id: &RoomId, 48 | search_string: &str, 49 | ) -> Result> + 'a>, Vec)>> { 50 | let prefix = services() 51 | .rooms 52 | .short 53 | .get_shortroomid(room_id)? 54 | .expect("room exists") 55 | .to_be_bytes() 56 | .to_vec(); 57 | 58 | let words: Vec<_> = tokenize(search_string).collect(); 59 | 60 | let iterators = words.clone().into_iter().map(move |word| { 61 | let mut prefix2 = prefix.clone(); 62 | prefix2.extend_from_slice(word.as_bytes()); 63 | prefix2.push(0xff); 64 | let prefix3 = prefix2.clone(); 65 | 66 | let mut last_possible_id = prefix2.clone(); 67 | last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); 68 | 69 | self.tokenids 70 | .iter_from(&last_possible_id, true) // Newest pdus first 71 | .take_while(move |(k, _)| k.starts_with(&prefix2)) 72 | .map(move |(key, _)| key[prefix3.len()..].to_vec()) 73 | }); 74 | 75 | let common_elements = match utils::common_elements(iterators, |a, b| { 76 | // We compare b with a because we reversed the iterator earlier 77 | b.cmp(a) 78 | }) { 79 | Some(it) => it, 80 | None => return Ok(None), 81 | }; 82 | 83 | Ok(Some((Box::new(common_elements), words))) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /docs/deploying/docker-compose.for-traefik.yml: -------------------------------------------------------------------------------- 1 | # Conduit - Behind Traefik Reverse Proxy 2 | version: '3' 3 | 4 | services: 5 | homeserver: 6 | ### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image, 7 | ### then you are ready to go. 8 | image: matrixconduit/matrix-conduit:latest 9 | ### If you want to build a fresh image from the sources, then comment the image line and uncomment the 10 | ### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this: 11 | ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d 12 | # build: 13 | # context: . 14 | # args: 15 | # CREATED: '2021-03-16T08:18:27Z' 16 | # VERSION: '0.1.0' 17 | # LOCAL: 'false' 18 | # GIT_REF: origin/master 19 | restart: unless-stopped 20 | volumes: 21 | - db:/var/lib/matrix-conduit/ 22 | networks: 23 | - proxy 24 | environment: 25 | CONDUIT_SERVER_NAME: your.server.name # EDIT THIS 26 | CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ 27 | CONDUIT_DATABASE_BACKEND: rocksdb 28 | CONDUIT_PORT: 6167 29 | CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB 30 | CONDUIT_ALLOW_REGISTRATION: 'true' 31 | #CONDUIT_REGISTRATION_TOKEN: '' # require password for registration 32 | CONDUIT_ALLOW_FEDERATION: 'true' 33 | CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' 34 | CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' 35 | #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 36 | CONDUIT_ADDRESS: 0.0.0.0 37 | CONDUIT_CONFIG: '' # Ignore this 38 | 39 | # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container 40 | # to serve those two as static files. If you want to use a different way, delete or comment the below service, here 41 | # and in the docker compose override file. 42 | well-known: 43 | image: nginx:latest 44 | restart: unless-stopped 45 | volumes: 46 | - ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files 47 | - ./nginx/www:/var/www/ # location of the client and server .well-known-files 48 | ### Uncomment if you want to use your own Element-Web App. 49 | ### Note: You need to provide a config.json for Element and you also need a second 50 | ### Domain or Subdomain for the communication between Element and Conduit 51 | ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md 52 | # element-web: 53 | # image: vectorim/element-web:latest 54 | # restart: unless-stopped 55 | # volumes: 56 | # - ./element_config.json:/app/config.json 57 | # networks: 58 | # - proxy 59 | # depends_on: 60 | # - homeserver 61 | 62 | volumes: 63 | db: 64 | 65 | networks: 66 | # This is the network Traefik listens to, if your network has a different 67 | # name, don't forget to change it here and in the docker-compose.override.yml 68 | proxy: 69 | external: true 70 | -------------------------------------------------------------------------------- /src/service/rooms/timeline/data.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; 4 | 5 | use crate::{PduEvent, Result}; 6 | 7 | use super::PduCount; 8 | 9 | pub trait Data: Send + Sync { 10 | fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; 11 | 12 | /// Returns the `count` of this pdu's id. 13 | fn get_pdu_count(&self, event_id: &EventId) -> Result>; 14 | 15 | /// Returns the json of a pdu. 16 | fn get_pdu_json(&self, event_id: &EventId) -> Result>; 17 | 18 | /// Returns the json of a pdu. 19 | fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result>; 20 | 21 | /// Returns the pdu's id. 22 | fn get_pdu_id(&self, event_id: &EventId) -> Result>>; 23 | 24 | /// Returns the pdu. 25 | /// 26 | /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. 27 | fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result>; 28 | 29 | /// Returns the pdu. 30 | /// 31 | /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. 32 | fn get_pdu(&self, event_id: &EventId) -> Result>>; 33 | 34 | /// Returns the pdu. 35 | /// 36 | /// This does __NOT__ check the outliers `Tree`. 37 | fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result>; 38 | 39 | /// Returns the pdu as a `BTreeMap`. 40 | fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result>; 41 | 42 | /// Adds a new pdu to the timeline 43 | fn append_pdu( 44 | &self, 45 | pdu_id: &[u8], 46 | pdu: &PduEvent, 47 | json: &CanonicalJsonObject, 48 | count: u64, 49 | ) -> Result<()>; 50 | 51 | // Adds a new pdu to the backfilled timeline 52 | fn prepend_backfill_pdu( 53 | &self, 54 | pdu_id: &[u8], 55 | event_id: &EventId, 56 | json: &CanonicalJsonObject, 57 | ) -> Result<()>; 58 | 59 | /// Removes a pdu and creates a new one with the same id. 60 | fn replace_pdu( 61 | &self, 62 | pdu_id: &[u8], 63 | pdu_json: &CanonicalJsonObject, 64 | pdu: &PduEvent, 65 | ) -> Result<()>; 66 | 67 | /// Returns an iterator over all events and their tokens in a room that happened before the 68 | /// event with id `until` in reverse-chronological order. 69 | #[allow(clippy::type_complexity)] 70 | fn pdus_until<'a>( 71 | &'a self, 72 | user_id: &UserId, 73 | room_id: &RoomId, 74 | until: PduCount, 75 | ) -> Result> + 'a>>; 76 | 77 | /// Returns an iterator over all events in a room that happened after the event with id `from` 78 | /// in chronological order. 79 | #[allow(clippy::type_complexity)] 80 | fn pdus_after<'a>( 81 | &'a self, 82 | user_id: &UserId, 83 | room_id: &RoomId, 84 | from: PduCount, 85 | ) -> Result> + 'a>>; 86 | 87 | fn increment_notification_counts( 88 | &self, 89 | room_id: &RoomId, 90 | notifies: Vec, 91 | highlights: Vec, 92 | ) -> Result<()>; 93 | } 94 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/alias.rs: -------------------------------------------------------------------------------- 1 | use ruma::{ 2 | api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, 3 | UserId, 4 | }; 5 | 6 | use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; 7 | 8 | impl service::rooms::alias::Data for KeyValueDatabase { 9 | fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> { 10 | // Comes first as we don't want a stuck alias 11 | self.alias_userid 12 | .insert(alias.alias().as_bytes(), user_id.as_bytes())?; 13 | self.alias_roomid 14 | .insert(alias.alias().as_bytes(), room_id.as_bytes())?; 15 | let mut aliasid = room_id.as_bytes().to_vec(); 16 | aliasid.push(0xff); 17 | aliasid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); 18 | self.aliasid_alias.insert(&aliasid, alias.as_bytes())?; 19 | Ok(()) 20 | } 21 | 22 | fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { 23 | if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { 24 | let mut prefix = room_id.to_vec(); 25 | prefix.push(0xff); 26 | 27 | for (key, _) in self.aliasid_alias.scan_prefix(prefix) { 28 | self.aliasid_alias.remove(&key)?; 29 | } 30 | self.alias_roomid.remove(alias.alias().as_bytes())?; 31 | self.alias_userid.remove(alias.alias().as_bytes()) 32 | } else { 33 | Err(Error::BadRequest( 34 | ErrorKind::NotFound, 35 | "Alias does not exist.", 36 | )) 37 | } 38 | } 39 | 40 | fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { 41 | self.alias_roomid 42 | .get(alias.alias().as_bytes())? 43 | .map(|bytes| { 44 | RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { 45 | Error::bad_database("Room ID in alias_roomid is invalid unicode.") 46 | })?) 47 | .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) 48 | }) 49 | .transpose() 50 | } 51 | 52 | fn local_aliases_for_room<'a>( 53 | &'a self, 54 | room_id: &RoomId, 55 | ) -> Box> + 'a> { 56 | let mut prefix = room_id.as_bytes().to_vec(); 57 | prefix.push(0xff); 58 | 59 | Box::new(self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { 60 | utils::string_from_bytes(&bytes) 61 | .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? 62 | .try_into() 63 | .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) 64 | })) 65 | } 66 | 67 | fn who_created_alias(&self, alias: &RoomAliasId) -> Result> { 68 | self.alias_userid 69 | .get(alias.alias().as_bytes())? 70 | .map(|bytes| { 71 | UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { 72 | Error::bad_database("User ID in alias_userid is invalid unicode.") 73 | })?) 74 | .map_err(|_| Error::bad_database("User ID in alias_roomid is invalid.")) 75 | }) 76 | .transpose() 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/database/key_value/rooms/pdu_metadata.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use ruma::{EventId, RoomId, UserId}; 4 | 5 | use crate::{ 6 | database::KeyValueDatabase, 7 | service::{self, rooms::timeline::PduCount}, 8 | services, utils, Error, PduEvent, Result, 9 | }; 10 | 11 | impl service::rooms::pdu_metadata::Data for KeyValueDatabase { 12 | fn add_relation(&self, from: u64, to: u64) -> Result<()> { 13 | let mut key = to.to_be_bytes().to_vec(); 14 | key.extend_from_slice(&from.to_be_bytes()); 15 | self.tofrom_relation.insert(&key, &[])?; 16 | Ok(()) 17 | } 18 | 19 | fn relations_until<'a>( 20 | &'a self, 21 | user_id: &'a UserId, 22 | shortroomid: u64, 23 | target: u64, 24 | until: PduCount, 25 | ) -> Result> + 'a>> { 26 | let prefix = target.to_be_bytes().to_vec(); 27 | let mut current = prefix.clone(); 28 | 29 | let count_raw = match until { 30 | PduCount::Normal(x) => x - 1, 31 | PduCount::Backfilled(x) => { 32 | current.extend_from_slice(&0_u64.to_be_bytes()); 33 | u64::MAX - x - 1 34 | } 35 | }; 36 | current.extend_from_slice(&count_raw.to_be_bytes()); 37 | 38 | Ok(Box::new( 39 | self.tofrom_relation 40 | .iter_from(¤t, true) 41 | .take_while(move |(k, _)| k.starts_with(&prefix)) 42 | .map(move |(tofrom, _data)| { 43 | let from = utils::u64_from_bytes(&tofrom[(size_of::())..]) 44 | .map_err(|_| Error::bad_database("Invalid count in tofrom_relation."))?; 45 | 46 | let mut pduid = shortroomid.to_be_bytes().to_vec(); 47 | pduid.extend_from_slice(&from.to_be_bytes()); 48 | 49 | let mut pdu = services() 50 | .rooms 51 | .timeline 52 | .get_pdu_from_id(&pduid)? 53 | .ok_or_else(|| Error::bad_database("Pdu in tofrom_relation is invalid."))?; 54 | if pdu.sender != user_id { 55 | pdu.remove_transaction_id()?; 56 | } 57 | Ok((PduCount::Normal(from), pdu)) 58 | }), 59 | )) 60 | } 61 | 62 | fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { 63 | for prev in event_ids { 64 | let mut key = room_id.as_bytes().to_vec(); 65 | key.extend_from_slice(prev.as_bytes()); 66 | self.referencedevents.insert(&key, &[])?; 67 | } 68 | 69 | Ok(()) 70 | } 71 | 72 | fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { 73 | let mut key = room_id.as_bytes().to_vec(); 74 | key.extend_from_slice(event_id.as_bytes()); 75 | Ok(self.referencedevents.get(&key)?.is_some()) 76 | } 77 | 78 | fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { 79 | self.softfailedeventids.insert(event_id.as_bytes(), &[]) 80 | } 81 | 82 | fn is_event_soft_failed(&self, event_id: &EventId) -> Result { 83 | self.softfailedeventids 84 | .get(event_id.as_bytes()) 85 | .map(|o| o.is_some()) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /conduit-example.toml: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # This is the official example config for Conduit. 3 | # If you use it for your server, you will need to adjust it to your own needs. 4 | # At the very least, change the server_name field! 5 | # ============================================================================= 6 | 7 | 8 | [global] 9 | # The server_name is the pretty name of this server. It is used as a suffix for user 10 | # and room ids. Examples: matrix.org, conduit.rs 11 | 12 | # The Conduit server needs all /_matrix/ requests to be reachable at 13 | # https://your.server.name/ on port 443 (client-server) and 8448 (federation). 14 | 15 | # If that's not possible for you, you can create /.well-known files to redirect 16 | # requests. See 17 | # https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client 18 | # and 19 | # https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server 20 | # for more information, or continue below to see how conduit can do this for you. 21 | 22 | # YOU NEED TO EDIT THIS 23 | #server_name = "your.server.name" 24 | 25 | database_backend = "rocksdb" 26 | # This is the only directory where Conduit will save its data 27 | database_path = "/var/lib/matrix-conduit/" 28 | 29 | # The port Conduit will be running on. You need to set up a reverse proxy in 30 | # your web server (e.g. apache or nginx), so all requests to /_matrix on port 31 | # 443 and 8448 will be forwarded to the Conduit instance running on this port 32 | # Docker users: Don't change this, you'll need to map an external port to this. 33 | port = 6167 34 | 35 | # Max size for uploads 36 | max_request_size = 20_000_000 # in bytes 37 | 38 | # Enables registration. If set to false, no users can register on this server. 39 | allow_registration = true 40 | 41 | # A static registration token that new users will have to provide when creating 42 | # an account. YOU NEED TO EDIT THIS. 43 | # - Insert a password that users will have to enter on registration 44 | # - Start the line with '#' to remove the condition 45 | registration_token = "" 46 | 47 | allow_check_for_updates = true 48 | allow_federation = true 49 | 50 | # Enable the display name lightning bolt on registration. 51 | enable_lightning_bolt = true 52 | 53 | # Servers listed here will be used to gather public keys of other servers. 54 | # Generally, copying this exactly should be enough. (Currently, Conduit doesn't 55 | # support batched key requests, so this list should only contain Synapse 56 | # servers.) 57 | trusted_servers = ["matrix.org"] 58 | 59 | #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time 60 | 61 | # Controls the log verbosity. See also [here][0]. 62 | # 63 | # [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives 64 | #log = "..." 65 | 66 | address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy 67 | #address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. 68 | 69 | [global.well_known] 70 | # Conduit handles the /.well-known/matrix/* endpoints, making both clients and servers try to access conduit with the host 71 | # server_name and port 443 by default. 72 | # If you want to override these defaults, uncomment and edit the following lines accordingly: 73 | #server = your.server.name:443 74 | #client = https://your.server.name 75 | -------------------------------------------------------------------------------- /src/api/client_server/user_directory.rs: -------------------------------------------------------------------------------- 1 | use crate::{services, Result, Ruma}; 2 | use ruma::{ 3 | api::client::user_directory::search_users, 4 | events::{ 5 | room::join_rules::{JoinRule, RoomJoinRulesEventContent}, 6 | StateEventType, 7 | }, 8 | }; 9 | 10 | /// # `POST /_matrix/client/r0/user_directory/search` 11 | /// 12 | /// Searches all known users for a match. 13 | /// 14 | /// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public) 15 | /// and don't share a room with the sender 16 | pub async fn search_users_route( 17 | body: Ruma, 18 | ) -> Result { 19 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 20 | let limit = u64::from(body.limit) as usize; 21 | 22 | let mut users = services().users.iter().filter_map(|user_id| { 23 | // Filter out buggy users (they should not exist, but you never know...) 24 | let user_id = user_id.ok()?; 25 | 26 | let user = search_users::v3::User { 27 | user_id: user_id.clone(), 28 | display_name: services().users.displayname(&user_id).ok()?, 29 | avatar_url: services().users.avatar_url(&user_id).ok()?, 30 | }; 31 | 32 | let user_id_matches = user 33 | .user_id 34 | .to_string() 35 | .to_lowercase() 36 | .contains(&body.search_term.to_lowercase()); 37 | 38 | let user_displayname_matches = user 39 | .display_name 40 | .as_ref() 41 | .filter(|name| { 42 | name.to_lowercase() 43 | .contains(&body.search_term.to_lowercase()) 44 | }) 45 | .is_some(); 46 | 47 | if !user_id_matches && !user_displayname_matches { 48 | return None; 49 | } 50 | 51 | // It's a matching user, but is the sender allowed to see them? 52 | let mut user_visible = false; 53 | 54 | let user_is_in_public_rooms = services() 55 | .rooms 56 | .state_cache 57 | .rooms_joined(&user_id) 58 | .filter_map(|r| r.ok()) 59 | .any(|room| { 60 | services() 61 | .rooms 62 | .state_accessor 63 | .room_state_get(&room, &StateEventType::RoomJoinRules, "") 64 | .is_ok_and(|event| { 65 | event.is_some_and(|event| { 66 | serde_json::from_str(event.content.get()).is_ok_and( 67 | |r: RoomJoinRulesEventContent| r.join_rule == JoinRule::Public, 68 | ) 69 | }) 70 | }) 71 | }); 72 | 73 | if user_is_in_public_rooms { 74 | user_visible = true; 75 | } else { 76 | let user_is_in_shared_rooms = services() 77 | .rooms 78 | .user 79 | .get_shared_rooms(vec![sender_user.clone(), user_id]) 80 | .ok()? 81 | .next() 82 | .is_some(); 83 | 84 | if user_is_in_shared_rooms { 85 | user_visible = true; 86 | } 87 | } 88 | 89 | if !user_visible { 90 | return None; 91 | } 92 | 93 | Some(user) 94 | }); 95 | 96 | let results = users.by_ref().take(limit).collect(); 97 | let limited = users.next().is_some(); 98 | 99 | Ok(search_users::v3::Response { results, limited }) 100 | } 101 | -------------------------------------------------------------------------------- /src/api/client_server/alias.rs: -------------------------------------------------------------------------------- 1 | use crate::{services, Error, Result, Ruma}; 2 | use ruma::api::client::{ 3 | alias::{create_alias, delete_alias, get_alias}, 4 | error::ErrorKind, 5 | }; 6 | 7 | /// # `PUT /_matrix/client/r0/directory/room/{roomAlias}` 8 | /// 9 | /// Creates a new room alias on this server. 10 | pub async fn create_alias_route( 11 | body: Ruma, 12 | ) -> Result { 13 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 14 | 15 | if body.room_alias.server_name() != services().globals.server_name() { 16 | return Err(Error::BadRequest( 17 | ErrorKind::InvalidParam, 18 | "Alias is from another server.", 19 | )); 20 | } 21 | 22 | if let Some(ref info) = body.appservice_info { 23 | if !info.aliases.is_match(body.room_alias.as_str()) { 24 | return Err(Error::BadRequest( 25 | ErrorKind::Exclusive, 26 | "Room alias is not in namespace.", 27 | )); 28 | } 29 | } else if services() 30 | .appservice 31 | .is_exclusive_alias(&body.room_alias) 32 | .await 33 | { 34 | return Err(Error::BadRequest( 35 | ErrorKind::Exclusive, 36 | "Room alias reserved by appservice.", 37 | )); 38 | } 39 | 40 | if services() 41 | .rooms 42 | .alias 43 | .resolve_local_alias(&body.room_alias)? 44 | .is_some() 45 | { 46 | return Err(Error::Conflict("Alias already exists.")); 47 | } 48 | 49 | services() 50 | .rooms 51 | .alias 52 | .set_alias(&body.room_alias, &body.room_id, sender_user)?; 53 | 54 | Ok(create_alias::v3::Response::new()) 55 | } 56 | 57 | /// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}` 58 | /// 59 | /// Deletes a room alias from this server. 60 | /// 61 | /// - TODO: Update canonical alias event 62 | pub async fn delete_alias_route( 63 | body: Ruma, 64 | ) -> Result { 65 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 66 | 67 | if body.room_alias.server_name() != services().globals.server_name() { 68 | return Err(Error::BadRequest( 69 | ErrorKind::InvalidParam, 70 | "Alias is from another server.", 71 | )); 72 | } 73 | 74 | if let Some(ref info) = body.appservice_info { 75 | if !info.aliases.is_match(body.room_alias.as_str()) { 76 | return Err(Error::BadRequest( 77 | ErrorKind::Exclusive, 78 | "Room alias is not in namespace.", 79 | )); 80 | } 81 | } else if services() 82 | .appservice 83 | .is_exclusive_alias(&body.room_alias) 84 | .await 85 | { 86 | return Err(Error::BadRequest( 87 | ErrorKind::Exclusive, 88 | "Room alias reserved by appservice.", 89 | )); 90 | } 91 | 92 | services() 93 | .rooms 94 | .alias 95 | .remove_alias(&body.room_alias, sender_user)?; 96 | 97 | // TODO: update alt_aliases? 98 | 99 | Ok(delete_alias::v3::Response::new()) 100 | } 101 | 102 | /// # `GET /_matrix/client/r0/directory/room/{roomAlias}` 103 | /// 104 | /// Resolve an alias locally or over federation. 105 | /// 106 | /// - TODO: Suggest more servers to join via 107 | pub async fn get_alias_route( 108 | body: Ruma, 109 | ) -> Result { 110 | services() 111 | .rooms 112 | .alias 113 | .get_alias_helper(body.body.room_alias) 114 | .await 115 | } 116 | -------------------------------------------------------------------------------- /src/api/appservice_server.rs: -------------------------------------------------------------------------------- 1 | use crate::{services, utils, Error, Result, SUPPORTED_VERSIONS}; 2 | use bytes::BytesMut; 3 | use ruma::api::{appservice::Registration, IncomingResponse, OutgoingRequest, SendAccessToken}; 4 | use std::{fmt::Debug, mem, time::Duration}; 5 | use tracing::warn; 6 | 7 | /// Sends a request to an appservice 8 | /// 9 | /// Only returns None if there is no url specified in the appservice registration file 10 | #[tracing::instrument(skip(request))] 11 | pub(crate) async fn send_request( 12 | registration: Registration, 13 | request: T, 14 | ) -> Result> 15 | where 16 | T: OutgoingRequest + Debug, 17 | { 18 | let destination = match registration.url { 19 | Some(url) => url, 20 | None => { 21 | return Ok(None); 22 | } 23 | }; 24 | 25 | let hs_token = registration.hs_token.as_str(); 26 | 27 | let mut http_request = request 28 | .try_into_http_request::( 29 | &destination, 30 | SendAccessToken::IfRequired(hs_token), 31 | &SUPPORTED_VERSIONS, 32 | ) 33 | .unwrap() 34 | .map(|body| body.freeze()); 35 | 36 | let mut parts = http_request.uri().clone().into_parts(); 37 | let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); 38 | let symbol = if old_path_and_query.contains('?') { 39 | "&" 40 | } else { 41 | "?" 42 | }; 43 | 44 | parts.path_and_query = Some( 45 | (old_path_and_query + symbol + "access_token=" + hs_token) 46 | .parse() 47 | .unwrap(), 48 | ); 49 | *http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid"); 50 | 51 | let mut reqwest_request = reqwest::Request::try_from(http_request)?; 52 | 53 | *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); 54 | 55 | let url = reqwest_request.url().clone(); 56 | let mut response = match services() 57 | .globals 58 | .default_client() 59 | .execute(reqwest_request) 60 | .await 61 | { 62 | Ok(r) => r, 63 | Err(e) => { 64 | warn!( 65 | "Could not send request to appservice {:?} at {}: {}", 66 | registration.id, destination, e 67 | ); 68 | return Err(e.into()); 69 | } 70 | }; 71 | 72 | // reqwest::Response -> http::Response conversion 73 | let status = response.status(); 74 | let mut http_response_builder = http::Response::builder() 75 | .status(status) 76 | .version(response.version()); 77 | mem::swap( 78 | response.headers_mut(), 79 | http_response_builder 80 | .headers_mut() 81 | .expect("http::response::Builder is usable"), 82 | ); 83 | 84 | let body = response.bytes().await.unwrap_or_else(|e| { 85 | warn!("server error: {}", e); 86 | Vec::new().into() 87 | }); // TODO: handle timeout 88 | 89 | if status != 200 { 90 | warn!( 91 | "Appservice returned bad response {} {}\n{}\n{:?}", 92 | destination, 93 | status, 94 | url, 95 | utils::string_from_bytes(&body) 96 | ); 97 | } 98 | 99 | let response = T::IncomingResponse::try_from_http_response( 100 | http_response_builder 101 | .body(body) 102 | .expect("reqwest body is valid http body"), 103 | ); 104 | 105 | response.map(Some).map_err(|_| { 106 | warn!( 107 | "Appservice returned invalid response bytes {}\n{}", 108 | destination, url 109 | ); 110 | Error::BadServerResponse("Server returned bad response.") 111 | }) 112 | } 113 | -------------------------------------------------------------------------------- /tests/sytest/show-expected-fail-tests.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | # Parses a results.tap file from SyTest output and a file containing test names (a test whitelist) 4 | # and checks whether a test name that exists in the whitelist (that should pass), failed or not. 5 | # 6 | # An optional blacklist file can be added, also containing test names, where if a test name is 7 | # present, the script will not error even if the test is in the whitelist file and failed 8 | # 9 | # For each of these files, lines starting with '#' are ignored. 10 | # 11 | # Usage ./show-expected-fail-tests.sh results.tap whitelist [blacklist] 12 | 13 | results_file=$1 14 | whitelist_file=$2 15 | blacklist_file=$3 16 | 17 | fail_build=0 18 | 19 | if [ $# -lt 2 ]; then 20 | echo "Usage: $0 results.tap whitelist [blacklist]" 21 | exit 1 22 | fi 23 | 24 | if [ ! -f "$results_file" ]; then 25 | echo "ERROR: Specified results file '${results_file}' doesn't exist." 26 | fail_build=1 27 | fi 28 | 29 | if [ ! -f "$whitelist_file" ]; then 30 | echo "ERROR: Specified test whitelist '${whitelist_file}' doesn't exist." 31 | fail_build=1 32 | fi 33 | 34 | blacklisted_tests=() 35 | 36 | # Check if a blacklist file was provided 37 | if [ $# -eq 3 ]; then 38 | # Read test blacklist file 39 | if [ ! -f "$blacklist_file" ]; then 40 | echo "ERROR: Specified test blacklist file '${blacklist_file}' doesn't exist." 41 | fail_build=1 42 | fi 43 | 44 | # Read each line, ignoring those that start with '#' 45 | blacklisted_tests="" 46 | search_non_comments=$(grep -v '^#' ${blacklist_file}) 47 | while read -r line ; do 48 | # Record the blacklisted test name 49 | blacklisted_tests+=("${line}") 50 | done <<< "${search_non_comments}" # This allows us to edit blacklisted_tests in the while loop 51 | fi 52 | 53 | [ "$fail_build" = 0 ] || exit 1 54 | 55 | passed_but_expected_fail=$(grep ' # TODO passed but expected fail' ${results_file} | sed -E 's/^ok [0-9]+ (\(expected fail\) )?//' | sed -E 's/( \([0-9]+ subtests\))? # TODO passed but expected fail$//') 56 | tests_to_add="" 57 | already_in_whitelist="" 58 | 59 | while read -r test_name; do 60 | # Ignore empty lines 61 | [ "${test_name}" = "" ] && continue 62 | 63 | grep "^${test_name}" "${whitelist_file}" > /dev/null 2>&1 64 | if [ "$?" != "0" ]; then 65 | # Check if this test name is blacklisted 66 | if printf '%s\n' "${blacklisted_tests[@]}" | grep -q -P "^${test_name}$"; then 67 | # Don't notify about this test 68 | continue 69 | fi 70 | 71 | # Append this test_name to the existing list 72 | tests_to_add="${tests_to_add}${test_name}\n" 73 | fail_build=1 74 | else 75 | already_in_whitelist="${already_in_whitelist}${test_name}\n" 76 | fi 77 | done <<< "${passed_but_expected_fail}" 78 | 79 | # TODO: Check that the same test doesn't exist in both the whitelist and blacklist 80 | # TODO: Check that the same test doesn't appear twice in the whitelist|blacklist 81 | 82 | # Trim test output strings 83 | tests_to_add=$(IFS=$'\n' echo "${tests_to_add[*]%%'\n'}") 84 | already_in_whitelist=$(IFS=$'\n' echo "${already_in_whitelist[*]%%'\n'}") 85 | 86 | # Format output with markdown for buildkite annotation rendering purposes 87 | if [ -n "${tests_to_add}" ] && [ -n "${already_in_whitelist}" ]; then 88 | echo "### 📜 SyTest Whitelist Maintenance" 89 | fi 90 | 91 | if [ -n "${tests_to_add}" ]; then 92 | echo "**ERROR**: The following tests passed but are not present in \`$2\`. Please append them to the file:" 93 | echo "\`\`\`" 94 | echo -e "${tests_to_add}" 95 | echo "\`\`\`" 96 | fi 97 | 98 | if [ -n "${already_in_whitelist}" ]; then 99 | echo "**WARN**: Tests in the whitelist still marked as **expected fail**:" 100 | echo "\`\`\`" 101 | echo -e "${already_in_whitelist}" 102 | echo "\`\`\`" 103 | fi 104 | 105 | exit ${fail_build} 106 | -------------------------------------------------------------------------------- /src/service/key_backups/mod.rs: -------------------------------------------------------------------------------- 1 | mod data; 2 | pub use data::Data; 3 | 4 | use crate::Result; 5 | use ruma::{ 6 | api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, 7 | serde::Raw, 8 | OwnedRoomId, RoomId, UserId, 9 | }; 10 | use std::collections::BTreeMap; 11 | 12 | pub struct Service { 13 | pub db: &'static dyn Data, 14 | } 15 | 16 | impl Service { 17 | pub fn create_backup( 18 | &self, 19 | user_id: &UserId, 20 | backup_metadata: &Raw, 21 | ) -> Result { 22 | self.db.create_backup(user_id, backup_metadata) 23 | } 24 | 25 | pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { 26 | self.db.delete_backup(user_id, version) 27 | } 28 | 29 | pub fn update_backup( 30 | &self, 31 | user_id: &UserId, 32 | version: &str, 33 | backup_metadata: &Raw, 34 | ) -> Result { 35 | self.db.update_backup(user_id, version, backup_metadata) 36 | } 37 | 38 | pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { 39 | self.db.get_latest_backup_version(user_id) 40 | } 41 | 42 | pub fn get_latest_backup( 43 | &self, 44 | user_id: &UserId, 45 | ) -> Result)>> { 46 | self.db.get_latest_backup(user_id) 47 | } 48 | 49 | pub fn get_backup( 50 | &self, 51 | user_id: &UserId, 52 | version: &str, 53 | ) -> Result>> { 54 | self.db.get_backup(user_id, version) 55 | } 56 | 57 | pub fn add_key( 58 | &self, 59 | user_id: &UserId, 60 | version: &str, 61 | room_id: &RoomId, 62 | session_id: &str, 63 | key_data: &Raw, 64 | ) -> Result<()> { 65 | self.db 66 | .add_key(user_id, version, room_id, session_id, key_data) 67 | } 68 | 69 | pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { 70 | self.db.count_keys(user_id, version) 71 | } 72 | 73 | pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { 74 | self.db.get_etag(user_id, version) 75 | } 76 | 77 | pub fn get_all( 78 | &self, 79 | user_id: &UserId, 80 | version: &str, 81 | ) -> Result> { 82 | self.db.get_all(user_id, version) 83 | } 84 | 85 | pub fn get_room( 86 | &self, 87 | user_id: &UserId, 88 | version: &str, 89 | room_id: &RoomId, 90 | ) -> Result>> { 91 | self.db.get_room(user_id, version, room_id) 92 | } 93 | 94 | pub fn get_session( 95 | &self, 96 | user_id: &UserId, 97 | version: &str, 98 | room_id: &RoomId, 99 | session_id: &str, 100 | ) -> Result>> { 101 | self.db.get_session(user_id, version, room_id, session_id) 102 | } 103 | 104 | pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { 105 | self.db.delete_all_keys(user_id, version) 106 | } 107 | 108 | pub fn delete_room_keys( 109 | &self, 110 | user_id: &UserId, 111 | version: &str, 112 | room_id: &RoomId, 113 | ) -> Result<()> { 114 | self.db.delete_room_keys(user_id, version, room_id) 115 | } 116 | 117 | pub fn delete_room_key( 118 | &self, 119 | user_id: &UserId, 120 | version: &str, 121 | room_id: &RoomId, 122 | session_id: &str, 123 | ) -> Result<()> { 124 | self.db 125 | .delete_room_key(user_id, version, room_id, session_id) 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Conduit 2 | 3 | 4 | ### A Matrix homeserver written in Rust 5 | 6 | 7 | Please visit the [Conduit documentation](https://famedly.gitlab.io/conduit) for more information. 8 | Alternatively you can open [docs/introduction.md](docs/introduction.md) in this repository. 9 | 10 | 11 | #### What is Matrix? 12 | 13 | [Matrix](https://matrix.org) is an open network for secure and decentralized 14 | communication. Users from every Matrix homeserver can chat with users from all 15 | other Matrix servers. You can even use bridges (also called Matrix appservices) 16 | to communicate with users outside of Matrix, like a community on Discord. 17 | 18 | #### What is the goal? 19 | 20 | An efficient Matrix homeserver that's easy to set up and just works. You can install 21 | it on a mini-computer like the Raspberry Pi to host Matrix for your family, 22 | friends or company. 23 | 24 | #### Can I try it out? 25 | 26 | Yes! You can test our Conduit instance by opening a client that supports registration tokens such as [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. Once you have registered, you can use any other [Matrix client](https://matrix.org/ecosystem/clients) to login. 27 | 28 | Server hosting for conduit.rs is donated by the Matrix.org Foundation. 29 | 30 | #### What is the current status? 31 | 32 | Conduit is Beta, meaning you can join and participate in most 33 | Matrix rooms, but not all features are supported and you might run into bugs 34 | from time to time. 35 | 36 | There are still a few important features missing: 37 | 38 | - E2EE emoji comparison over federation (E2EE chat works) 39 | - Outgoing read receipts, typing, presence over federation (incoming works) 40 | 41 | 42 | 43 | #### How can I contribute? 44 | 45 | 1. Look for an issue you would like to work on and make sure no one else is currently working on it. 46 | 2. Tell us that you are working on the issue (comment on the issue or chat in 47 | [#conduit:ahimsa.chat](https://matrix.to/#/#conduit:ahimsa.chat)). If it is more complicated, please explain your approach and ask questions. 48 | 3. Fork the repo, create a new branch and push commits. 49 | 4. Submit a MR 50 | 51 | #### Contact 52 | 53 | If you have any questions, feel free to 54 | - Ask in `#conduit:ahimsa.chat` on Matrix 55 | - Write an E-Mail to `conduit@koesters.xyz` 56 | - Send an direct message to `@timokoesters:fachschaften.org` on Matrix 57 | - [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new) 58 | 59 | #### Security 60 | 61 | If you believe you have found a security issue, please send a message to [Timo](https://matrix.to/#/@timo:conduit.rs) 62 | and/or [Matthias](https://matrix.to/#/@matthias:ahouansou.cz) on Matrix, or send an email to 63 | [conduit@koesters.xyz](mailto:conduit@koesters.xyz). Please do not disclose details about the issue to anyone else before 64 | a fix is released publicly. 65 | 66 | #### Thanks to 67 | 68 | Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project. 69 | 70 | Thanks to the contributors to Conduit and all libraries we use, for example: 71 | 72 | - Ruma: A clean library for the Matrix Spec in Rust 73 | - axum: A modular web framework 74 | 75 | #### Donate 76 | 77 | - Liberapay: 78 | - Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n` 79 | 80 | #### Logo 81 | 82 | - Lightning Bolt Logo: 83 | - Logo License: 84 | 85 | -------------------------------------------------------------------------------- /src/api/client_server/to_device.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | use crate::{services, Error, Result, Ruma}; 4 | use ruma::{ 5 | api::{ 6 | client::{error::ErrorKind, to_device::send_event_to_device}, 7 | federation::{self, transactions::edu::DirectDeviceContent}, 8 | }, 9 | to_device::DeviceIdOrAllDevices, 10 | }; 11 | 12 | /// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}` 13 | /// 14 | /// Send a to-device event to a set of client devices. 15 | pub async fn send_event_to_device_route( 16 | body: Ruma, 17 | ) -> Result { 18 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 19 | let sender_device = body.sender_device.as_deref(); 20 | 21 | // Check if this is a new transaction id 22 | if services() 23 | .transaction_ids 24 | .existing_txnid(sender_user, sender_device, &body.txn_id)? 25 | .is_some() 26 | { 27 | return Ok(send_event_to_device::v3::Response {}); 28 | } 29 | 30 | for (target_user_id, map) in &body.messages { 31 | for (target_device_id_maybe, event) in map { 32 | if target_user_id.server_name() != services().globals.server_name() { 33 | let mut map = BTreeMap::new(); 34 | map.insert(target_device_id_maybe.clone(), event.clone()); 35 | let mut messages = BTreeMap::new(); 36 | messages.insert(target_user_id.clone(), map); 37 | let count = services().globals.next_count()?; 38 | 39 | services().sending.send_reliable_edu( 40 | target_user_id.server_name(), 41 | serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( 42 | DirectDeviceContent { 43 | sender: sender_user.clone(), 44 | ev_type: body.event_type.clone(), 45 | message_id: count.to_string().into(), 46 | messages, 47 | }, 48 | )) 49 | .expect("DirectToDevice EDU can be serialized"), 50 | count, 51 | )?; 52 | 53 | continue; 54 | } 55 | 56 | match target_device_id_maybe { 57 | DeviceIdOrAllDevices::DeviceId(target_device_id) => { 58 | services().users.add_to_device_event( 59 | sender_user, 60 | target_user_id, 61 | target_device_id, 62 | &body.event_type.to_string(), 63 | event.deserialize_as().map_err(|_| { 64 | Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") 65 | })?, 66 | )? 67 | } 68 | 69 | DeviceIdOrAllDevices::AllDevices => { 70 | for target_device_id in services().users.all_device_ids(target_user_id) { 71 | services().users.add_to_device_event( 72 | sender_user, 73 | target_user_id, 74 | &target_device_id?, 75 | &body.event_type.to_string(), 76 | event.deserialize_as().map_err(|_| { 77 | Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") 78 | })?, 79 | )?; 80 | } 81 | } 82 | } 83 | } 84 | } 85 | 86 | // Save transaction id with empty data 87 | services() 88 | .transaction_ids 89 | .add_txnid(sender_user, sender_device, &body.txn_id, &[])?; 90 | 91 | Ok(send_event_to_device::v3::Response {}) 92 | } 93 | -------------------------------------------------------------------------------- /src/service/globals/data.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::BTreeMap, 3 | time::{Duration, SystemTime}, 4 | }; 5 | 6 | use crate::{services, Result}; 7 | use async_trait::async_trait; 8 | use ruma::{ 9 | api::federation::discovery::{OldVerifyKey, ServerSigningKeys, VerifyKey}, 10 | serde::Base64, 11 | signatures::Ed25519KeyPair, 12 | DeviceId, MilliSecondsSinceUnixEpoch, ServerName, UserId, 13 | }; 14 | use serde::Deserialize; 15 | 16 | /// Similar to ServerSigningKeys, but drops a few unnecessary fields we don't require post-validation 17 | #[derive(Deserialize, Debug, Clone)] 18 | pub struct SigningKeys { 19 | pub verify_keys: BTreeMap, 20 | pub old_verify_keys: BTreeMap, 21 | pub valid_until_ts: MilliSecondsSinceUnixEpoch, 22 | } 23 | 24 | impl SigningKeys { 25 | /// Creates the SigningKeys struct, using the keys of the current server 26 | pub fn load_own_keys() -> Self { 27 | let mut keys = Self { 28 | verify_keys: BTreeMap::new(), 29 | old_verify_keys: BTreeMap::new(), 30 | valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( 31 | SystemTime::now() + Duration::from_secs(7 * 86400), 32 | ) 33 | .expect("Should be valid until year 500,000,000"), 34 | }; 35 | 36 | keys.verify_keys.insert( 37 | format!("ed25519:{}", services().globals.keypair().version()), 38 | VerifyKey { 39 | key: Base64::new(services().globals.keypair.public_key().to_vec()), 40 | }, 41 | ); 42 | 43 | keys 44 | } 45 | } 46 | 47 | impl From for SigningKeys { 48 | fn from(value: ServerSigningKeys) -> Self { 49 | let ServerSigningKeys { 50 | verify_keys, 51 | old_verify_keys, 52 | valid_until_ts, 53 | .. 54 | } = value; 55 | 56 | Self { 57 | verify_keys: verify_keys 58 | .into_iter() 59 | .map(|(id, key)| (id.to_string(), key)) 60 | .collect(), 61 | old_verify_keys: old_verify_keys 62 | .into_iter() 63 | .map(|(id, key)| (id.to_string(), key)) 64 | .collect(), 65 | valid_until_ts, 66 | } 67 | } 68 | } 69 | 70 | #[async_trait] 71 | pub trait Data: Send + Sync { 72 | fn next_count(&self) -> Result; 73 | fn current_count(&self) -> Result; 74 | fn last_check_for_updates_id(&self) -> Result; 75 | fn update_check_for_updates_id(&self, id: u64) -> Result<()>; 76 | async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; 77 | fn cleanup(&self) -> Result<()>; 78 | fn memory_usage(&self) -> String; 79 | fn clear_caches(&self, amount: u32); 80 | fn load_keypair(&self) -> Result; 81 | fn remove_keypair(&self) -> Result<()>; 82 | /// Only extends the cached keys, not moving any verify_keys to old_verify_keys, as if we suddenly 83 | /// receive requests from the origin server, we want to be able to accept requests from them 84 | fn add_signing_key_from_trusted_server( 85 | &self, 86 | origin: &ServerName, 87 | new_keys: ServerSigningKeys, 88 | ) -> Result; 89 | /// Extends cached keys, as well as moving verify_keys that are not present in these new keys to 90 | /// old_verify_keys, so that potnetially compromised keys cannot be used to make requests 91 | fn add_signing_key_from_origin( 92 | &self, 93 | origin: &ServerName, 94 | new_keys: ServerSigningKeys, 95 | ) -> Result; 96 | 97 | /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. 98 | fn signing_keys_for(&self, origin: &ServerName) -> Result>; 99 | fn database_version(&self) -> Result; 100 | fn bump_database_version(&self, new_version: u64) -> Result<()>; 101 | } 102 | -------------------------------------------------------------------------------- /debian/postinst: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | . /usr/share/debconf/confmodule 5 | 6 | CONDUIT_CONFIG_PATH=/etc/matrix-conduit 7 | CONDUIT_CONFIG_FILE="${CONDUIT_CONFIG_PATH}/conduit.toml" 8 | CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/ 9 | 10 | case "$1" in 11 | configure) 12 | # Create the `_matrix-conduit` user if it does not exist yet. 13 | if ! getent passwd _matrix-conduit > /dev/null ; then 14 | echo 'Adding system user for the Conduit Matrix homeserver' 1>&2 15 | adduser --system --group --quiet \ 16 | --home "$CONDUIT_DATABASE_PATH" \ 17 | --disabled-login \ 18 | --force-badname \ 19 | _matrix-conduit 20 | fi 21 | 22 | # Create the database path if it does not exist yet and fix up ownership 23 | # and permissions. 24 | mkdir -p "$CONDUIT_DATABASE_PATH" 25 | chown _matrix-conduit "$CONDUIT_DATABASE_PATH" 26 | chmod 700 "$CONDUIT_DATABASE_PATH" 27 | 28 | if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then 29 | # Write the debconf values in the config. 30 | db_get matrix-conduit/hostname 31 | CONDUIT_SERVER_NAME="$RET" 32 | db_get matrix-conduit/address 33 | CONDUIT_ADDRESS="$RET" 34 | db_get matrix-conduit/port 35 | CONDUIT_PORT="$RET" 36 | mkdir -p "$CONDUIT_CONFIG_PATH" 37 | cat > "$CONDUIT_CONFIG_FILE" << EOF 38 | [global] 39 | # The server_name is the pretty name of this server. It is used as a suffix for 40 | # user and room ids. Examples: matrix.org, conduit.rs 41 | 42 | # The Conduit server needs all /_matrix/ requests to be reachable at 43 | # https://your.server.name/ on port 443 (client-server) and 8448 (federation). 44 | 45 | # If that's not possible for you, you can create /.well-known files to redirect 46 | # requests. See 47 | # https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client 48 | # and 49 | # https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server 50 | # for more information 51 | 52 | server_name = "${CONDUIT_SERVER_NAME}" 53 | 54 | # This is the only directory where Conduit will save its data. 55 | database_path = "${CONDUIT_DATABASE_PATH}" 56 | database_backend = "rocksdb" 57 | 58 | # The address Conduit will be listening on. 59 | # By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to 60 | # only listen on the localhost when using a reverse proxy. 61 | address = "${CONDUIT_ADDRESS}" 62 | 63 | # The port Conduit will be running on. You need to set up a reverse proxy in 64 | # your web server (e.g. apache or nginx), so all requests to /_matrix on port 65 | # 443 and 8448 will be forwarded to the Conduit instance running on this port 66 | # Docker users: Don't change this, you'll need to map an external port to this. 67 | port = ${CONDUIT_PORT} 68 | 69 | # Max size for uploads 70 | max_request_size = 20_000_000 # in bytes 71 | 72 | # Enables registration. If set to false, no users can register on this server. 73 | allow_registration = true 74 | 75 | # A static registration token that new users will have to provide when creating 76 | # an account. 77 | # - Insert a password that users will have to enter on registration 78 | # - Start the line with '#' to remove the condition 79 | #registration_token = "" 80 | 81 | allow_federation = true 82 | allow_check_for_updates = true 83 | 84 | # Enable the display name lightning bolt on registration. 85 | enable_lightning_bolt = true 86 | 87 | # Servers listed here will be used to gather public keys of other servers. 88 | # Generally, copying this exactly should be enough. (Currently, Conduit doesn't 89 | # support batched key requests, so this list should only contain Synapse 90 | # servers.) 91 | trusted_servers = ["matrix.org"] 92 | 93 | #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time 94 | 95 | # Controls the log verbosity. See also [here][0]. 96 | # 97 | # [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives 98 | #log = "..." 99 | EOF 100 | fi 101 | ;; 102 | esac 103 | 104 | #DEBHELPER# 105 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | inputs = { 3 | nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; 4 | flake-utils.url = "github:numtide/flake-utils"; 5 | nix-filter.url = "github:numtide/nix-filter"; 6 | flake-compat = { 7 | url = "github:edolstra/flake-compat"; 8 | flake = false; 9 | }; 10 | 11 | fenix = { 12 | url = "github:nix-community/fenix"; 13 | inputs.nixpkgs.follows = "nixpkgs"; 14 | }; 15 | # Pinned because crane's own automatic cross compilation configuration that they 16 | # introduce in the next commit attempts to link the musl targets against glibc 17 | # for some reason. Unpin once this is fixed. 18 | crane.url = "github:ipetkov/crane?rev=bb1c9567c43e4434f54e9481eb4b8e8e0d50f0b5"; 19 | attic.url = "github:zhaofengli/attic?ref=main"; 20 | }; 21 | 22 | outputs = inputs: 23 | let 24 | # Keep sorted 25 | mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { 26 | craneLib = 27 | (inputs.crane.mkLib pkgs).overrideToolchain (_: self.toolchain); 28 | 29 | default = self.callPackage ./nix/pkgs/default {}; 30 | 31 | inherit inputs; 32 | 33 | oci-image = self.callPackage ./nix/pkgs/oci-image {}; 34 | 35 | book = self.callPackage ./nix/pkgs/book {}; 36 | 37 | rocksdb = 38 | let 39 | version = "10.5.1"; 40 | in 41 | pkgs.rocksdb.overrideAttrs (old: { 42 | inherit version; 43 | src = pkgs.fetchFromGitHub { 44 | owner = "facebook"; 45 | repo = "rocksdb"; 46 | rev = "v${version}"; 47 | hash = "sha256-TDYXzYbOLhcIRi+qi0FW1OLVtfKOF+gUbj62Tgpp3/E="; 48 | }; 49 | }); 50 | 51 | shell = self.callPackage ./nix/shell.nix {}; 52 | 53 | # The Rust toolchain to use 54 | toolchain = inputs 55 | .fenix 56 | .packages 57 | .${pkgs.pkgsBuildHost.system} 58 | .fromToolchainFile { 59 | file = ./rust-toolchain.toml; 60 | 61 | # See also `rust-toolchain.toml` 62 | sha256 = "sha256-AJ6LX/Q/Er9kS15bn9iflkUwcgYqRQxiOIL2ToVAXaU="; 63 | }; 64 | }); 65 | in 66 | inputs.flake-utils.lib.eachDefaultSystem (system: 67 | let 68 | pkgs = (import inputs.nixpkgs { 69 | inherit system; 70 | 71 | # libolm is deprecated, but we only need it for complement 72 | config.permittedInsecurePackages = [ 73 | "olm-3.2.16" 74 | ]; 75 | }); 76 | in 77 | { 78 | packages = { 79 | default = (mkScope pkgs).default; 80 | oci-image = (mkScope pkgs).oci-image; 81 | book = (mkScope pkgs).book; 82 | } 83 | // 84 | builtins.listToAttrs 85 | (builtins.concatLists 86 | (builtins.map 87 | (crossSystem: 88 | let 89 | binaryName = "static-${crossSystem}"; 90 | pkgsCrossStatic = 91 | (import inputs.nixpkgs { 92 | inherit system; 93 | crossSystem = { 94 | config = crossSystem; 95 | }; 96 | }).pkgsStatic; 97 | in 98 | [ 99 | # An output for a statically-linked binary 100 | { 101 | name = binaryName; 102 | value = (mkScope pkgsCrossStatic).default; 103 | } 104 | 105 | # An output for an OCI image based on that binary 106 | { 107 | name = "oci-image-${crossSystem}"; 108 | value = (mkScope pkgsCrossStatic).oci-image; 109 | } 110 | ] 111 | ) 112 | [ 113 | "x86_64-unknown-linux-musl" 114 | "aarch64-unknown-linux-musl" 115 | ] 116 | ) 117 | ); 118 | 119 | devShells.default = (mkScope pkgs).shell; 120 | } 121 | ); 122 | } 123 | -------------------------------------------------------------------------------- /src/api/client_server/tag.rs: -------------------------------------------------------------------------------- 1 | use crate::{services, Error, Result, Ruma}; 2 | use ruma::{ 3 | api::client::tag::{create_tag, delete_tag, get_tags}, 4 | events::{ 5 | tag::{TagEvent, TagEventContent}, 6 | RoomAccountDataEventType, 7 | }, 8 | }; 9 | use std::collections::BTreeMap; 10 | 11 | /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` 12 | /// 13 | /// Adds a tag to the room. 14 | /// 15 | /// - Inserts the tag into the tag event of the room account data. 16 | pub async fn update_tag_route( 17 | body: Ruma, 18 | ) -> Result { 19 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 20 | 21 | let event = services().account_data.get( 22 | Some(&body.room_id), 23 | sender_user, 24 | RoomAccountDataEventType::Tag, 25 | )?; 26 | 27 | let mut tags_event = event 28 | .map(|e| { 29 | serde_json::from_str(e.get()) 30 | .map_err(|_| Error::bad_database("Invalid account data event in db.")) 31 | }) 32 | .unwrap_or_else(|| { 33 | Ok(TagEvent { 34 | content: TagEventContent { 35 | tags: BTreeMap::new(), 36 | }, 37 | }) 38 | })?; 39 | 40 | tags_event 41 | .content 42 | .tags 43 | .insert(body.tag.clone().into(), body.tag_info.clone()); 44 | 45 | services().account_data.update( 46 | Some(&body.room_id), 47 | sender_user, 48 | RoomAccountDataEventType::Tag, 49 | &serde_json::to_value(tags_event).expect("to json value always works"), 50 | )?; 51 | 52 | Ok(create_tag::v3::Response {}) 53 | } 54 | 55 | /// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` 56 | /// 57 | /// Deletes a tag from the room. 58 | /// 59 | /// - Removes the tag from the tag event of the room account data. 60 | pub async fn delete_tag_route( 61 | body: Ruma, 62 | ) -> Result { 63 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 64 | 65 | let event = services().account_data.get( 66 | Some(&body.room_id), 67 | sender_user, 68 | RoomAccountDataEventType::Tag, 69 | )?; 70 | 71 | let mut tags_event = event 72 | .map(|e| { 73 | serde_json::from_str(e.get()) 74 | .map_err(|_| Error::bad_database("Invalid account data event in db.")) 75 | }) 76 | .unwrap_or_else(|| { 77 | Ok(TagEvent { 78 | content: TagEventContent { 79 | tags: BTreeMap::new(), 80 | }, 81 | }) 82 | })?; 83 | 84 | tags_event.content.tags.remove(&body.tag.clone().into()); 85 | 86 | services().account_data.update( 87 | Some(&body.room_id), 88 | sender_user, 89 | RoomAccountDataEventType::Tag, 90 | &serde_json::to_value(tags_event).expect("to json value always works"), 91 | )?; 92 | 93 | Ok(delete_tag::v3::Response {}) 94 | } 95 | 96 | /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags` 97 | /// 98 | /// Returns tags on the room. 99 | /// 100 | /// - Gets the tag event of the room account data. 101 | pub async fn get_tags_route(body: Ruma) -> Result { 102 | let sender_user = body.sender_user.as_ref().expect("user is authenticated"); 103 | 104 | let event = services().account_data.get( 105 | Some(&body.room_id), 106 | sender_user, 107 | RoomAccountDataEventType::Tag, 108 | )?; 109 | 110 | let tags_event = event 111 | .map(|e| { 112 | serde_json::from_str(e.get()) 113 | .map_err(|_| Error::bad_database("Invalid account data event in db.")) 114 | }) 115 | .unwrap_or_else(|| { 116 | Ok(TagEvent { 117 | content: TagEventContent { 118 | tags: BTreeMap::new(), 119 | }, 120 | }) 121 | })?; 122 | 123 | Ok(get_tags::v3::Response { 124 | tags: tags_event.content.tags, 125 | }) 126 | } 127 | --------------------------------------------------------------------------------