├── .gitignore ├── docs ├── .gitignore ├── book.toml ├── README.md └── src │ ├── SUMMARY.md │ ├── integration │ ├── protocol-impl.md │ ├── crates.md │ └── pre-built.md │ ├── integration.md │ ├── usage.md │ ├── introduction.md │ └── usage │ ├── docker-compose.md │ ├── docker-images.md │ └── binaries.md ├── .pre-commit-config.yaml ├── server ├── src │ ├── lib.rs │ ├── bin │ │ ├── taskchampion-sync-server.rs │ │ └── taskchampion-sync-server-postgres.rs │ ├── web.rs │ ├── api │ │ ├── get_snapshot.rs │ │ ├── mod.rs │ │ ├── add_snapshot.rs │ │ ├── get_child_version.rs │ │ └── add_version.rs │ └── args.rs └── Cargo.toml ├── .dockerignore ├── sqlite ├── README.md ├── Cargo.toml ├── tests │ └── concurrency.rs └── src │ └── lib.rs ├── postgres ├── README.md ├── schema.sql ├── Cargo.toml └── src │ ├── testing.rs │ └── lib.rs ├── core ├── README.md ├── src │ ├── error.rs │ ├── lib.rs │ ├── storage.rs │ ├── inmemory.rs │ └── server.rs └── Cargo.toml ├── .github ├── workflows │ ├── security.yml │ ├── add-to-project.yml │ ├── publish-docs.yml │ ├── rust-tests.yml │ ├── docker.yml │ └── checks.yml └── dependabot.yml ├── SECURITY.md ├── entrypoint-sqlite.sh ├── entrypoint-postgres.sh ├── Dockerfile-sqlite ├── CONTRIBUTING.md ├── LICENSE ├── Dockerfile-postgres ├── Cargo.toml ├── RELEASING.md ├── docker-compose.yml ├── README.md └── CODE_OF_CONDUCT.md /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | book 2 | tmp 3 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | ci: {} 2 | repos: [] 3 | -------------------------------------------------------------------------------- /server/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all)] 2 | 3 | pub mod api; 4 | pub mod args; 5 | pub mod web; 6 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | * 2 | !Cargo.toml 3 | !Cargo.lock 4 | !core/ 5 | !server/ 6 | !sqlite/ 7 | !postgres/ 8 | !entrypoint-* 9 | !Dockerfile* 10 | -------------------------------------------------------------------------------- /sqlite/README.md: -------------------------------------------------------------------------------- 1 | # taskchampion-sync-server-storage-sqlite 2 | 3 | This crate implements a SQLite storage backend for the 4 | `taskchampion-sync-server-core`. 5 | -------------------------------------------------------------------------------- /postgres/README.md: -------------------------------------------------------------------------------- 1 | # taskchampion-sync-server-storage-postgres 2 | 3 | This crate implements a Postgres storage backend for the 4 | `taskchampion-sync-server-core`. 5 | -------------------------------------------------------------------------------- /docs/book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = ["Dustin J. Mitchell"] 3 | language = "en" 4 | multilingual = false 5 | src = "src" 6 | title = "TaskChampion Sync Server" 7 | 8 | [output.html] 9 | default-theme = "ayu" 10 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | This is an [mdbook](https://rust-lang.github.io/mdBook/index.html) book. 2 | Minor modifications can be made without installing the mdbook tool, as the content is simple Markdown. 3 | Changes are verified on pull requests. 4 | -------------------------------------------------------------------------------- /core/README.md: -------------------------------------------------------------------------------- 1 | # taskchampion-sync-server-core 2 | 3 | This crate implements the core logic of the taskchampion sync protocol. 4 | 5 | This should be considered a reference implementation, with [the protocol 6 | documentation](https://gothenburgbitfactory.org/taskchampion/sync-protocol.html). 7 | representing the authoritative definition of the protocol. Other 8 | implementations are encouraged. 9 | -------------------------------------------------------------------------------- /docs/src/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | - [Introduction](./introduction.md) 4 | - [Usage](./usage.md) 5 | - [Docker Compose](./usage/docker-compose.md) 6 | - [Docker Images](./usage/docker-images.md) 7 | - [Binaries](./usage/binaries.md) 8 | - [Integration](./integration.md) 9 | - [Pre-built Images](./integration/pre-built.md) 10 | - [Rust Crates](./integration/crates.md) 11 | - [Sync Protocol Implementation](./integration/protocol-impl.md) 12 | -------------------------------------------------------------------------------- /.github/workflows/security.yml: -------------------------------------------------------------------------------- 1 | name: security 2 | 3 | on: 4 | schedule: 5 | - cron: '33 0 * * THU' 6 | push: 7 | paths: 8 | - '**/Cargo.toml' 9 | - '**/Cargo.lock' 10 | 11 | jobs: 12 | audit: 13 | runs-on: ubuntu-latest 14 | permissions: write-all 15 | name: "Audit Rust Dependencies" 16 | steps: 17 | - uses: actions/checkout@v6 18 | - uses: rustsec/audit-check@v2.0.0 19 | with: 20 | token: ${{ secrets.GITHUB_TOKEN }} 21 | -------------------------------------------------------------------------------- /core/src/error.rs: -------------------------------------------------------------------------------- 1 | /// An error from the [`crate::Server`] type. 2 | /// 3 | /// This type represents only circumstances outside the realm of the protocol, and not the specific 4 | /// results descriebd in the protocol documentation. 5 | #[derive(Debug, thiserror::Error)] 6 | pub enum ServerError { 7 | /// There is no client with the given ClientId. 8 | #[error("No such client")] 9 | NoSuchClient, 10 | 11 | #[error(transparent)] 12 | Other(#[from] anyhow::Error), 13 | } 14 | -------------------------------------------------------------------------------- /docs/src/integration/protocol-impl.md: -------------------------------------------------------------------------------- 1 | # Sync Protocol Implementation 2 | 3 | The [sync protocol](https://gothenburgbitfactory.org/taskchampion/sync.html) is 4 | an open specification, and can be re-implemented from that specification as 5 | desired. This specification is not battle-tested, so refer to 6 | taskchampion-sync-server's implementation to resolve any ambiguities, and 7 | please create pull requests to resolve the ambiguity in the specification. 8 | 9 | We suggest that new implementations be published as open-source packages where 10 | possible. 11 | -------------------------------------------------------------------------------- /.github/workflows/add-to-project.yml: -------------------------------------------------------------------------------- 1 | # This adds all new issues to the Taskwarrior project, for better tracking. 2 | # It uses a PAT that belongs to @taskwarrior. 3 | name: Add issues to Taskwarrior Project 4 | 5 | on: 6 | issues: 7 | types: 8 | - opened 9 | 10 | jobs: 11 | add-to-project: 12 | name: Add issue to project 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/add-to-project@v1.0.2 16 | with: 17 | project-url: https://github.com/orgs/GothenburgBitFactory/projects/4 18 | github-token: ${{ secrets.ADD_TO_PROJECT_PAT }} 19 | -------------------------------------------------------------------------------- /postgres/schema.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE clients ( 2 | client_id UUID PRIMARY KEY, 3 | latest_version_id UUID default '00000000-0000-0000-0000-000000000000', 4 | snapshot_version_id UUID, 5 | versions_since_snapshot INTEGER, 6 | snapshot_timestamp BIGINT, 7 | snapshot BYTEA); 8 | 9 | CREATE TABLE versions ( 10 | client_id UUID NOT NULL, 11 | FOREIGN KEY(client_id) REFERENCES clients (client_id) ON DELETE CASCADE, 12 | version_id UUID NOT NULL, 13 | parent_version_id UUID, 14 | history_segment BYTEA, 15 | CONSTRAINT versions_pkey PRIMARY KEY (client_id, version_id) 16 | ); 17 | CREATE INDEX versions_by_parent ON versions (parent_version_id); 18 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Enable version updates for GitHub actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "weekly" 8 | # Enable updates for Rust packages 9 | - package-ecosystem: "cargo" 10 | directory: "/" # Location of package manifests 11 | schedule: 12 | interval: "daily" 13 | ignore: 14 | # skip patch updates, as they can be quite noisy, but keep 15 | # minor and major updates so that we don't fall too far 16 | # behind 17 | - dependency-name: "*" 18 | update-types: ["version-update:semver-patch"] 19 | -------------------------------------------------------------------------------- /.github/workflows/publish-docs.yml: -------------------------------------------------------------------------------- 1 | name: docs 2 | 3 | on: 4 | push: 5 | tags: 6 | - '*' 7 | 8 | permissions: 9 | contents: write 10 | 11 | jobs: 12 | mdbook-deploy: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v6 17 | 18 | - name: Setup mdBook 19 | uses: peaceiris/actions-mdbook@v2 20 | with: 21 | # if this changes, change it in .github/workflows/checks.yml as well 22 | mdbook-version: '0.4.48' 23 | 24 | - run: mdbook build docs 25 | 26 | - name: Deploy 27 | uses: peaceiris/actions-gh-pages@v4 28 | with: 29 | github_token: ${{ secrets.GITHUB_TOKEN }} 30 | publish_dir: ./docs/book 31 | -------------------------------------------------------------------------------- /core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "taskchampion-sync-server-core" 3 | version = "0.7.2-pre" 4 | authors = ["Dustin J. Mitchell "] 5 | edition = "2021" 6 | description = "Core of sync protocol for TaskChampion" 7 | homepage = "https://github.com/GothenburgBitFactory/taskchampion" 8 | repository = "https://github.com/GothenburgBitFactory/taskchampion-sync-server" 9 | license = "MIT" 10 | 11 | [dependencies] 12 | uuid.workspace = true 13 | async-trait.workspace = true 14 | anyhow.workspace = true 15 | thiserror.workspace = true 16 | log.workspace = true 17 | env_logger.workspace = true 18 | chrono.workspace = true 19 | 20 | [dev-dependencies] 21 | pretty_assertions.workspace = true 22 | tokio.workspace = true 23 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security 2 | 3 | To report a vulnerability, please contact Dustin via signal, [`djmitche.78`](https://signal.me/#eu/2T98jpkMAzvFL2wg3OkZnNrfhk1DFfu6eqkMEPqcAuCsLZPVk39A67rp4khmrMNF). 4 | Initial response is expected within ~48h. 5 | 6 | We kindly ask to follow the responsible disclosure model and refrain from sharing information until: 7 | 8 | 1. Vulnerabilities are patched in `taskchampion-sync-server` + 60 days to coordinate with distributions. 9 | 2. 90 days since the vulnerability is disclosed to us. 10 | 11 | We recognise the legitimacy of public interest and accept that security researchers can publish information after 90-days deadline unilaterally. 12 | 13 | We will assist with obtaining CVE and acknowledge the vulnerabilities reported. 14 | -------------------------------------------------------------------------------- /docs/src/integration.md: -------------------------------------------------------------------------------- 1 | # Integration 2 | 3 | Taskchampion-sync-server can be integrated into larger applications, such as 4 | web-based hosting services. 5 | 6 | - Most deployments can simply use the pre-built Docker images to implement the 7 | sync protocol, handling other aspects of the application in separate 8 | containers. See [Pre-built Images](./integration/pre-built.md). 9 | 10 | - More complex deployments may wish to modify or extend the operation of the 11 | server. These can use the Rust crates to build precisely the desired 12 | functionality. See [Rust Crates](./integration/crates.md). 13 | 14 | - If desired, an integration may completely re-implement the [sync 15 | protocol](https://gothenburgbitfactory.org/taskchampion/sync.html). See [Sync 16 | Protocol Implementation](./integration/protocol-impl.md). 17 | -------------------------------------------------------------------------------- /sqlite/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "taskchampion-sync-server-storage-sqlite" 3 | version = "0.7.2-pre" 4 | authors = ["Dustin J. Mitchell "] 5 | edition = "2021" 6 | description = "SQLite backend for TaskChampion-sync-server" 7 | homepage = "https://github.com/GothenburgBitFactory/taskchampion" 8 | repository = "https://github.com/GothenburgBitFactory/taskchampion-sync-server" 9 | license = "MIT" 10 | 11 | [dependencies] 12 | taskchampion-sync-server-core = { path = "../core", version = "0.7.2-pre" } 13 | async-trait.workspace = true 14 | uuid.workspace = true 15 | anyhow.workspace = true 16 | thiserror.workspace = true 17 | rusqlite.workspace = true 18 | chrono.workspace = true 19 | 20 | [dev-dependencies] 21 | tempfile.workspace = true 22 | pretty_assertions.workspace = true 23 | tokio.workspace = true 24 | -------------------------------------------------------------------------------- /entrypoint-sqlite.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | echo "starting entrypoint script..." 4 | if [ "$1" = "/bin/taskchampion-sync-server" ]; then 5 | : ${DATA_DIR:=/var/lib/taskchampion-sync-server/data} 6 | export DATA_DIR 7 | echo "setting up data directory ${DATA_DIR}" 8 | mkdir -p "${DATA_DIR}" 9 | chown -R taskchampion:users "${DATA_DIR}" 10 | chmod -R 700 "${DATA_DIR}" 11 | 12 | : ${LISTEN:=0.0.0.0:8080} 13 | export LISTEN 14 | echo "Listen set to ${LISTEN}" 15 | 16 | if [ -n "${CLIENT_ID}" ]; then 17 | export CLIENT_ID 18 | echo "Limiting to client ID ${CLIENT_ID}" 19 | else 20 | unset CLIENT_ID 21 | fi 22 | 23 | if [ "$(id -u)" = "0" ]; then 24 | echo "Running server as user 'taskchampion'" 25 | exec su-exec taskchampion "$@" 26 | fi 27 | else 28 | eval "${@}" 29 | fi 30 | -------------------------------------------------------------------------------- /entrypoint-postgres.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | echo "starting entrypoint script..." 4 | if [ "$1" = "/bin/taskchampion-sync-server-postgres" ]; then 5 | : ${DATA_DIR:=/var/lib/taskchampion-sync-server/data} 6 | export DATA_DIR 7 | echo "setting up data directory ${DATA_DIR}" 8 | mkdir -p "${DATA_DIR}" 9 | chown -R taskchampion:users "${DATA_DIR}" 10 | chmod -R 700 "${DATA_DIR}" 11 | 12 | : ${LISTEN:=0.0.0.0:8080} 13 | export LISTEN 14 | echo "Listen set to ${LISTEN}" 15 | 16 | if [ -n "${CLIENT_ID}" ]; then 17 | export CLIENT_ID 18 | echo "Limiting to client ID ${CLIENT_ID}" 19 | else 20 | unset CLIENT_ID 21 | fi 22 | 23 | if [ "$(id -u)" = "0" ]; then 24 | echo "Running server as user 'taskchampion'" 25 | exec su-exec taskchampion "$@" 26 | fi 27 | else 28 | eval "${@}" 29 | fi 30 | -------------------------------------------------------------------------------- /docs/src/integration/crates.md: -------------------------------------------------------------------------------- 1 | # Rust Crates 2 | 3 | This project publishes several Rust crates on `crates.io`: 4 | 5 | - [`taskchampion-sync-server-core`](https://docs.rs/taskchampion-sync-server-core) 6 | implements the core of the protocol 7 | - [`taskchampion-sync-server-storage-sqlite`](https://docs.rs/taskchampion-sync-server-storage-sqlite) 8 | implements an SQLite backend for the core 9 | - [`taskchampion-sync-server-storage-postgres`](https://docs.rs/taskchampion-sync-server-storage-postgres) 10 | implements a Postgres backend for the core 11 | 12 | If you are building an integration with, for example, a custom storage system, 13 | it may be helpful to use the `core` crate and provide a custom implementation 14 | of its `Storage` trait. 15 | 16 | We suggest that any generally useful extensions, such as additional storage 17 | backends, be published as open-source packages. 18 | -------------------------------------------------------------------------------- /core/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! This crate implements the core logic of the taskchampion sync protocol. 2 | //! 3 | //! This should be considered a reference implementation, with [the protocol 4 | //! documentation](https://gothenburgbitfactory.org/taskchampion/sync-protocol.html). representing 5 | //! the authoritative definition of the protocol. Other implementations are encouraged. 6 | //! 7 | //! This crate uses an abstract storage backend. Note that this does not implement the 8 | //! HTTP-specific portions of the protocol, nor provide any storage implementations. 9 | //! 10 | //! ## Usage 11 | //! 12 | //! To use, create a new [`Server`] instance and call the relevant protocol API methods. The 13 | //! arguments and return values correspond closely to the protocol documentation. 14 | 15 | mod error; 16 | mod inmemory; 17 | mod server; 18 | mod storage; 19 | 20 | pub use error::*; 21 | pub use inmemory::*; 22 | pub use server::*; 23 | pub use storage::*; 24 | -------------------------------------------------------------------------------- /docs/src/usage.md: -------------------------------------------------------------------------------- 1 | # Usage 2 | 3 | This repository is flexible and can be used in a number of ways, to suit your 4 | needs. 5 | 6 | - If you only need a place to sync your tasks, using cloud storage may be 7 | cheaper and easier than running taskchampion-sync-server. See 8 | [task-sync(5)](http://taskwarrior.org/docs/man/task-sync.5/) for details on 9 | cloud storage. 10 | 11 | - If you have a publicly accessible server, such as a VPS, you can use `docker 12 | compose` to run taskchampion-sync-server as pre-built docker images. See 13 | [Docker Compose](./usage/docker-compose.md). 14 | 15 | - If you would like more control, such as to deploy taskchampion-sync-server 16 | within an orchestration environment such as Kubernetes, you can deploy the 17 | docker images directly. See [Docker Images](./usage/docker-images.md). 18 | 19 | - For even more control, or to avoid the overhead of container images, you can 20 | build and run the taskchampion-sync-server binary directly. See 21 | [Binaries](./usage/binaries.md). 22 | 23 | -------------------------------------------------------------------------------- /Dockerfile-sqlite: -------------------------------------------------------------------------------- 1 | # Versions must be major.minor 2 | # Default versions are as below 3 | ARG RUST_VERSION=1.85 4 | ARG ALPINE_VERSION=3.20 5 | 6 | FROM docker.io/rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS builder 7 | RUN apk -U add libc-dev 8 | COPY Cargo.lock Cargo.toml /data/ 9 | COPY core /data/core/ 10 | COPY server /data/server/ 11 | COPY postgres /data/postgres/ 12 | COPY sqlite /data/sqlite/ 13 | RUN cd /data && \ 14 | cargo build --release --bin taskchampion-sync-server 15 | 16 | FROM docker.io/alpine:${ALPINE_VERSION} 17 | COPY --from=builder /data/target/release/taskchampion-sync-server /bin 18 | RUN apk add --no-cache su-exec && \ 19 | adduser -u 1092 -S -D -H -h /var/lib/taskchampion-sync-server -s /sbin/nologin -G users \ 20 | -g taskchampion taskchampion && \ 21 | install -d -m1755 -o1092 -g1092 "/var/lib/taskchampion-sync-server" 22 | EXPOSE 8080 23 | VOLUME /var/lib/taskchampion-sync-server/data 24 | COPY entrypoint-sqlite.sh /bin/entrypoint.sh 25 | ENTRYPOINT [ "/bin/entrypoint.sh" ] 26 | CMD [ "/bin/taskchampion-sync-server" ] 27 | -------------------------------------------------------------------------------- /postgres/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "taskchampion-sync-server-storage-postgres" 3 | version = "0.7.2-pre" 4 | authors = ["Dustin J. Mitchell "] 5 | edition = "2021" 6 | description = "Postgres backend for TaskChampion-sync-server" 7 | homepage = "https://github.com/GothenburgBitFactory/taskchampion" 8 | repository = "https://github.com/GothenburgBitFactory/taskchampion-sync-server" 9 | license = "MIT" 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | async-trait.workspace = true 14 | bb8-postgres.workspace = true 15 | bb8.workspace = true 16 | chrono.workspace = true 17 | env_logger.workspace = true 18 | log.workspace = true 19 | taskchampion-sync-server-core = { path = "../core", version = "0.7.2-pre" } 20 | thiserror.workspace = true 21 | tokio-postgres.workspace = true 22 | tokio.workspace = true 23 | uuid.workspace = true 24 | openssl.workspace = true 25 | native-tls.workspace = true 26 | postgres-native-tls.workspace = true 27 | 28 | [dev-dependencies] 29 | tempfile.workspace = true 30 | pretty_assertions.workspace = true 31 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Welcome 2 | 3 | TaskChampion sync-server is very open to contributions, and we'd love to have your help! 4 | 5 | A good starting point might be one of the issues tagged with ["good first issue"][first]. 6 | 7 | [first]: https://github.com/GothenburgBitFactory/taskchampion-sync-server/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 8 | 9 | # Development Guide 10 | 11 | This repository is a typical Rust application. 12 | To work on it, you'll need to [install a recent version of Rust](https://www.rust-lang.org/tools/install) (the latest stable is always a good choice). 13 | Once you've done that, run `cargo build` at the top level of this repository to build the binary. 14 | Alternately, run `cargo test` to run the test suite. 15 | 16 | ## Making a Pull Request 17 | 18 | We expect contributors to follow the [GitHub Flow](https://guides.github.com/introduction/flow/). 19 | Aside from that, we have no particular requirements on pull requests. 20 | Make your patch, double-check that it's complete (tests? docs? documentation comments?), and make a new pull request. 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Dustin J. Mitchell 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Dockerfile-postgres: -------------------------------------------------------------------------------- 1 | # Versions must be major.minor 2 | # Default versions are as below 3 | ARG RUST_VERSION=1.85 4 | ARG ALPINE_VERSION=3.20 5 | 6 | FROM docker.io/rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS builder 7 | # perl and make are required to build openssl. 8 | RUN apk -U add libc-dev perl make 9 | COPY Cargo.lock Cargo.toml /data/ 10 | COPY core /data/core/ 11 | COPY server /data/server/ 12 | COPY postgres /data/postgres/ 13 | COPY sqlite /data/sqlite/ 14 | RUN cd /data && \ 15 | cargo build -p taskchampion-sync-server --release --no-default-features --features postgres --bin taskchampion-sync-server-postgres 16 | 17 | FROM docker.io/alpine:${ALPINE_VERSION} 18 | COPY --from=builder /data/target/release/taskchampion-sync-server-postgres /bin 19 | RUN apk add --no-cache su-exec && \ 20 | adduser -u 1092 -S -D -H -h /var/lib/taskchampion-sync-server -s /sbin/nologin -G users \ 21 | -g taskchampion taskchampion && \ 22 | install -d -m1755 -o1092 -g1092 "/var/lib/taskchampion-sync-server" 23 | EXPOSE 8080 24 | COPY entrypoint-postgres.sh /bin/entrypoint.sh 25 | ENTRYPOINT [ "/bin/entrypoint.sh" ] 26 | CMD [ "/bin/taskchampion-sync-server-postgres" ] 27 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = [ 4 | "core", 5 | "server", 6 | "sqlite", 7 | "postgres", 8 | ] 9 | rust-version = "1.85.0" # MSRV 10 | 11 | [workspace.dependencies] 12 | async-trait = "0.1.88" 13 | uuid = { version = "^1.19.0", features = ["serde", "v4"] } 14 | actix-web = "^4.11.0" 15 | anyhow = "1.0" 16 | thiserror = "2.0" 17 | futures = "^0.3.25" 18 | serde_json = "^1.0" 19 | serde = { version = "^1.0.147", features = ["derive"] } 20 | clap = { version = "^4.5.6", features = ["string", "env"] } 21 | log = "^0.4.17" 22 | env_logger = "^0.11.7" 23 | rusqlite = { version = "0.37", features = ["bundled"] } 24 | chrono = { version = "^0.4.38", features = ["serde"] } 25 | actix-rt = "2" 26 | tempfile = "3" 27 | pretty_assertions = "1" 28 | temp-env = "0.3" 29 | tokio = { version = "1.48", features = ["rt", "macros"] } 30 | tokio-postgres = { version = "0.7.13", features = ["with-uuid-1"] } 31 | bb8 = "0.9.0" 32 | bb8-postgres = { version = "0.9.0", features = ["with-uuid-1"] } 33 | openssl = { version = "0.10.73", default-features = false, features = ["vendored"] } 34 | native-tls = { version = "0.2.14", default-features = false, features = ["vendored"] } 35 | postgres-native-tls = "0.5.1" 36 | -------------------------------------------------------------------------------- /RELEASING.md: -------------------------------------------------------------------------------- 1 | # Release process 2 | 3 | 1. Run `git pull upstream main` 4 | 1. Run `cargo test` 5 | 1. Run `cargo clean && cargo clippy` 6 | 1. Remove the `-pre` from `version` in all `*/Cargo.toml`, and from the `version = ..` in any references between packages. 7 | 1. Update the link to `docker-compose.yml` in `docs/src/usage/docker-compose.md` to refer to the new version. 8 | 1. Update the docker image in `docker-compose.yml` to refer to the new version. 9 | 1. Run `cargo semver-checks` (https://crates.io/crates/cargo-semver-checks) 10 | 1. Run `cargo build --release` 11 | 1. Commit the changes (Cargo.lock will change too) with comment `vX.Y.Z`. 12 | 1. Run `git tag vX.Y.Z` 13 | 1. Run `git push upstream` 14 | 1. Run `git push upstream --tag vX.Y.Z` 15 | 1. Run `cargo publish` to publish all packages in the workspace 16 | 1. Bump the patch version in `*/Cargo.toml` and add the `-pre` suffix. This allows `cargo-semver-checks` to check for changes not accounted for in the version delta. 17 | 1. Run `cargo build --release` again to update `Cargo.lock` 18 | 1. Commit that change with comment "Bump to -pre version". 19 | 1. Run `git push upstream` 20 | 1. Navigate to the tag in the GitHub releases UI and create a release with general comments about the changes in the release 21 | -------------------------------------------------------------------------------- /server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "taskchampion-sync-server" 3 | version = "0.7.2-pre" 4 | authors = ["Dustin J. Mitchell "] 5 | edition = "2021" 6 | publish = false 7 | 8 | [features] 9 | # By default, only build the SQLite backend. 10 | default = ["sqlite"] 11 | sqlite = ["dep:taskchampion-sync-server-storage-sqlite"] 12 | postgres = ["dep:taskchampion-sync-server-storage-postgres"] 13 | 14 | [[bin]] 15 | # The simple binary name is the SQLite build. 16 | name = "taskchampion-sync-server" 17 | required-features = ["sqlite"] 18 | 19 | [[bin]] 20 | name = "taskchampion-sync-server-postgres" 21 | required-features = ["postgres"] 22 | 23 | [dependencies] 24 | taskchampion-sync-server-core = { path = "../core" } 25 | taskchampion-sync-server-storage-sqlite = { path = "../sqlite", optional = true } 26 | taskchampion-sync-server-storage-postgres = { path = "../postgres", optional = true } 27 | uuid.workspace = true 28 | actix-web.workspace = true 29 | anyhow.workspace = true 30 | thiserror.workspace = true 31 | futures.workspace = true 32 | serde_json.workspace = true 33 | serde.workspace = true 34 | clap.workspace = true 35 | log.workspace = true 36 | env_logger.workspace = true 37 | chrono.workspace = true 38 | 39 | [dev-dependencies] 40 | actix-rt.workspace = true 41 | tempfile.workspace = true 42 | pretty_assertions.workspace = true 43 | temp-env.workspace = true 44 | -------------------------------------------------------------------------------- /docs/src/introduction.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | Taskchampion-sync-server is an implementation of the TaskChampion [sync 4 | protocol][sync-protocol] server. It supports synchronizing Taskwarrior tasks 5 | between multiple systems. 6 | 7 | The project provides both pre-built images for common use-cases (see 8 | [usage](./usage.md)) and Rust libraries that can be used to build more 9 | sophisticated applications ([integration](./integration.md)). 10 | 11 | It also serves as a reference implementation: where the 12 | [specification][sync-protocol] is ambiguous, this implementation's 13 | interpretation is favored in resolving the ambiguity. Other implementations of 14 | the protocol should interoperate with this implementation. 15 | 16 | ## Sync Overview 17 | 18 | The server identifies each user with a client ID. For example, when 19 | syncing Taskwarrior tasks between a desktop computer and a laptop, both systems 20 | would use the same client ID to indicate that they share the same user's task data. 21 | 22 | Task data is encrypted, and the server does not have access to the encryption 23 | secret. The server sees only encrypted data and cannot read or modify tasks in 24 | any way. 25 | 26 | To perform a sync, a replica first downloads and decrypts any changes that have 27 | been sent to the server since its last sync. It then gathers any local changes, 28 | encrypts them, and uploads them to the server. 29 | 30 | [sync-protocol]: https://gothenburgbitfactory.org/taskchampion/sync.html 31 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | volumes: 2 | data: 3 | 4 | 5 | services: 6 | mkdir: 7 | image: caddy:2-alpine 8 | command: | 9 | /bin/sh -c " 10 | mkdir -p /data/caddy/data /data/caddy/config /data/tss/taskchampion-sync-server" 11 | volumes: 12 | - type: volume 13 | source: data 14 | target: /data 15 | read_only: false 16 | volume: 17 | nocopy: true 18 | 19 | caddy: 20 | image: caddy:2-alpine 21 | restart: unless-stopped 22 | ports: 23 | - "80:80" 24 | - "443:443" 25 | volumes: 26 | - type: volume 27 | source: data 28 | target: /data 29 | read_only: false 30 | volume: 31 | nocopy: true 32 | subpath: caddy/data 33 | - type: volume 34 | source: data 35 | target: /config 36 | read_only: false 37 | volume: 38 | nocopy: true 39 | subpath: caddy/config 40 | command: caddy reverse-proxy --from https://${TASKCHAMPION_SYNC_SERVER_HOSTNAME} --to http://tss:8080 41 | depends_on: 42 | mkdir: 43 | condition: service_completed_successfully 44 | 45 | tss: 46 | image: ghcr.io/gothenburgbitfactory/taskchampion-sync-server:0.7.1 47 | restart: unless-stopped 48 | environment: 49 | - "RUST_LOG=info" 50 | - "DATA_DIR=/var/lib/taskchampion-sync-server/data" 51 | - "LISTEN=0.0.0.0:8080" 52 | - "CLIENT_ID=${TASKCHAMPION_SYNC_SERVER_CLIENT_ID}" 53 | volumes: 54 | - type: volume 55 | source: data 56 | target: /var/lib/taskchampion-sync-server/data 57 | read_only: false 58 | volume: 59 | nocopy: true 60 | subpath: tss/taskchampion-sync-server 61 | depends_on: 62 | mkdir: 63 | condition: service_completed_successfully 64 | -------------------------------------------------------------------------------- /.github/workflows/rust-tests.yml: -------------------------------------------------------------------------------- 1 | name: tests - rust 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, reopened, synchronize] 9 | 10 | jobs: 11 | test: 12 | strategy: 13 | matrix: 14 | postgres: 15 | - "17" 16 | rust: 17 | # MSRV 18 | - "1.85.0" 19 | - "stable" 20 | 21 | runs-on: ubuntu-latest 22 | name: "rust ${{ matrix.rust }} / postgres ${{ matrix.postgres }}" 23 | 24 | services: 25 | # Service container for PostgreSQL 26 | postgres: 27 | image: "postgres:${{ matrix.postgres }}" 28 | env: 29 | POSTGRES_DB: test_db 30 | POSTGRES_USER: test_user 31 | POSTGRES_PASSWORD: test_password 32 | ports: 33 | - 5432:5432 34 | # Set health checks to ensure Postgres is ready before the job starts 35 | options: >- 36 | --health-cmd pg_isready 37 | --health-interval 10s 38 | --health-timeout 5s 39 | --health-retries 5 40 | 41 | steps: 42 | - uses: actions/checkout@v6 43 | 44 | - name: Cache cargo registry 45 | uses: actions/cache@v5 46 | with: 47 | path: ~/.cargo/registry 48 | key: ${{ runner.os }}-${{ matrix.rust }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} 49 | 50 | - name: Cache cargo build 51 | uses: actions/cache@v5 52 | with: 53 | path: target 54 | key: ${{ runner.os }}-${{ matrix.rust }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }} 55 | 56 | - uses: actions-rs/toolchain@v1 57 | with: 58 | toolchain: "${{ matrix.rust }}" 59 | override: true 60 | 61 | - name: test 62 | env: 63 | TEST_DB_URL: postgresql://test_user:test_password@localhost:5432/test_db 64 | run: cargo test 65 | -------------------------------------------------------------------------------- /docs/src/integration/pre-built.md: -------------------------------------------------------------------------------- 1 | # Pre-built Images 2 | 3 | The pre-built Postgres Docker image described in [Docker 4 | Images](../usage/docker-images.md) may be adequate for a production deployment. 5 | The image is stateless and can be easily scaled horizontally to increase 6 | capacity. 7 | 8 | ## Database Schema 9 | 10 | The schema defined in 11 | [`postgres/schema.sql`](https://github.com/GothenburgBitFactory/taskchampion-sync-server/blob/main/postgres/schema.sql) 12 | must be applied to the database before the container will function. 13 | 14 | The schema is stable, and any changes to the schema will be made in a major 15 | version with migration instructions provided. 16 | 17 | An integration may: 18 | 19 | - Add additional tables to the database 20 | - Add additional columns to the `clients` table. If those columns do not have 21 | default values, ensure the server is configured with `CREATE_CLIENTS=false` as 22 | described below. 23 | - Insert rows into the `clients` table, using default values for all columns 24 | except `client_id` and any application-specific columns. 25 | - Delete rows from the `clients` table. Note that this table is configured to 26 | automatically delete all data associated with a client when the client's row is 27 | deleted. 28 | 29 | ## Managing Clients 30 | 31 | By default, taskchampion-sync-server creates a new, empty client when it 32 | receives a connection from an unrecognized client ID. Setting 33 | `CREATE_CLIENTS=false` disables this functionality, and is recommended in 34 | production deployments to avoid abuse. 35 | 36 | In this configuration, it is the responsibility of the integration to create 37 | new client rows when desired, using a statement like `INSERT into clients 38 | (client_id) values ($1)` with the new client ID as a parameter. Similarly, 39 | clients may be deleted, along with all stored task data, using a statement like 40 | `DELETE from clients where client_id = $1`. 41 | -------------------------------------------------------------------------------- /docs/src/usage/docker-compose.md: -------------------------------------------------------------------------------- 1 | # Docker Compose 2 | 3 | The 4 | [`docker-compose.yml`](https://raw.githubusercontent.com/GothenburgBitFactory/taskchampion-sync-server/refs/tags/v0.7.1/docker-compose.yml) 5 | file in this repository is sufficient to run taskchampion-sync-server, 6 | including setting up TLS certificates using Lets Encrypt, thanks to 7 | [Caddy](https://caddyserver.com/). This setup uses the SQLite backend, which is 8 | adequate for one or a few clients. 9 | 10 | You will need a server with ports 80 and 443 open to the Internet and with a 11 | fixed, publicly-resolvable hostname. These ports must be available both to your 12 | Taskwarrior clients and to the Lets Encrypt servers. 13 | 14 | On that server, download `docker-compose.yml` from the link above (it is pinned 15 | to the latest release) into the current directory. Then run 16 | 17 | ```sh 18 | TASKCHAMPION_SYNC_SERVER_HOSTNAME=taskwarrior.example.com \ 19 | TASKCHAMPION_SYNC_SERVER_CLIENT_ID=your-client-id \ 20 | docker compose up 21 | ``` 22 | 23 | The `TASKCHAMPION_SYNC_SERVER_CLIENT_ID` limits the server to the given client 24 | ID; omit it to allow all client IDs. You may specify multiple client IDs 25 | separated by commas. 26 | 27 | It can take a few minutes to obtain the certificate; the caddy container will 28 | log a message "certificate obtained successfully" when this is complete, or 29 | error messages if the process fails. Once this process is complete, configure 30 | your `.taskrc`'s to point to the server: 31 | 32 | ```none 33 | sync.server.url=https://taskwarrior.example.com 34 | sync.server.client_id=your-client-id 35 | sync.encryption_secret=your-encryption-secret 36 | ``` 37 | 38 | The docker-compose images store data in a docker volume named 39 | `taskchampion-sync-server_data`. This volume contains all of the task data, as 40 | well as the TLS certificate information. It will persist over restarts, in a 41 | typical Docker installation. The docker containers will start automatically 42 | when the Docker dameon starts. See the docker-compose documentation for more 43 | information. 44 | -------------------------------------------------------------------------------- /server/src/bin/taskchampion-sync-server.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all)] 2 | 3 | use clap::{arg, builder::ValueParser, ArgMatches, Command}; 4 | use std::ffi::OsString; 5 | use taskchampion_sync_server::{args, web}; 6 | use taskchampion_sync_server_storage_sqlite::SqliteStorage; 7 | 8 | fn command() -> Command { 9 | args::command().arg( 10 | arg!(-d --"data-dir" "Directory in which to store data") 11 | .value_parser(ValueParser::os_string()) 12 | .env("DATA_DIR") 13 | .default_value("/var/lib/taskchampion-sync-server"), 14 | ) 15 | } 16 | 17 | fn data_dir_from_matches(matches: &ArgMatches) -> OsString { 18 | matches.get_one::("data-dir").unwrap().clone() 19 | } 20 | 21 | #[actix_web::main] 22 | async fn main() -> anyhow::Result<()> { 23 | env_logger::init(); 24 | let matches = command().get_matches(); 25 | let server_config = args::server_config_from_matches(&matches); 26 | let web_config = args::web_config_from_matches(&matches); 27 | let data_dir = data_dir_from_matches(&matches); 28 | let storage = SqliteStorage::new(data_dir)?; 29 | 30 | let server = web::WebServer::new(server_config, web_config, storage); 31 | server.run().await 32 | } 33 | 34 | #[cfg(test)] 35 | mod test { 36 | use super::*; 37 | use temp_env::{with_var, with_var_unset}; 38 | 39 | #[test] 40 | fn command_data_dir() { 41 | with_var_unset("DATA_DIR", || { 42 | let matches = command().get_matches_from([ 43 | "tss", 44 | "--data-dir", 45 | "/foo/bar", 46 | "--listen", 47 | "localhost:8080", 48 | ]); 49 | assert_eq!(data_dir_from_matches(&matches), "/foo/bar"); 50 | }); 51 | } 52 | 53 | #[test] 54 | fn command_data_dir_env() { 55 | with_var("DATA_DIR", Some("/foo/bar"), || { 56 | let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]); 57 | assert_eq!(data_dir_from_matches(&matches), "/foo/bar"); 58 | }); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /server/src/bin/taskchampion-sync-server-postgres.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all)] 2 | 3 | use clap::{arg, builder::ValueParser, ArgMatches, Command}; 4 | use std::ffi::OsString; 5 | use taskchampion_sync_server::{args, web}; 6 | use taskchampion_sync_server_storage_postgres::PostgresStorage; 7 | 8 | fn command() -> Command { 9 | args::command().arg( 10 | arg!(-c --"connection" "LibPQ-style connection URI") 11 | .value_parser(ValueParser::os_string()) 12 | .help("See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-URIS") 13 | .required(true) 14 | .env("CONNECTION") 15 | ) 16 | } 17 | 18 | fn connection_from_matches(matches: &ArgMatches) -> String { 19 | matches 20 | .get_one::("connection") 21 | .unwrap() 22 | .to_str() 23 | .expect("--connection must be valid UTF-8") 24 | .to_string() 25 | } 26 | 27 | #[actix_web::main] 28 | async fn main() -> anyhow::Result<()> { 29 | env_logger::init(); 30 | let matches = command().get_matches(); 31 | let server_config = args::server_config_from_matches(&matches); 32 | let web_config = args::web_config_from_matches(&matches); 33 | let connection = connection_from_matches(&matches); 34 | let storage = PostgresStorage::new(connection).await?; 35 | 36 | let server = web::WebServer::new(server_config, web_config, storage); 37 | server.run().await 38 | } 39 | 40 | #[cfg(test)] 41 | mod test { 42 | use super::*; 43 | use temp_env::{with_var, with_var_unset}; 44 | 45 | #[test] 46 | fn command_connection() { 47 | with_var_unset("CONNECTION", || { 48 | let matches = command().get_matches_from([ 49 | "tss", 50 | "--connection", 51 | "postgresql:/foo/bar", 52 | "--listen", 53 | "localhost:8080", 54 | ]); 55 | assert_eq!(connection_from_matches(&matches), "postgresql:/foo/bar"); 56 | }); 57 | } 58 | 59 | #[test] 60 | fn command_connection_env() { 61 | with_var("CONNECTION", Some("postgresql:/foo/bar"), || { 62 | let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]); 63 | assert_eq!(connection_from_matches(&matches), "postgresql:/foo/bar"); 64 | }); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Build Docker 2 | 3 | on: 4 | push: 5 | tags: 6 | - '*' 7 | 8 | jobs: 9 | sqlite: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set up Docker Buildx 13 | uses: docker/setup-buildx-action@v3 14 | - name: Set up QEMU 15 | uses: docker/setup-qemu-action@v3 16 | - name: Login to ghcr.io 17 | uses: docker/login-action@v3 18 | with: 19 | registry: ghcr.io 20 | username: ${{ github.repository_owner }} 21 | password: ${{ secrets.GITHUB_TOKEN }} 22 | - name: Docker meta 23 | id: meta-sqlite 24 | uses: docker/metadata-action@v5 25 | with: 26 | images: | 27 | ghcr.io/gothenburgbitfactory/taskchampion-sync-server 28 | tags: | 29 | type=ref,event=branch 30 | type=semver,pattern={{version}} 31 | type=semver,pattern={{major}}.{{minor}} 32 | type=match,pattern=\d.\d.\d,value=latest 33 | - name: Build and push 34 | uses: docker/build-push-action@v6 35 | with: 36 | file: "./Dockerfile-sqlite" 37 | platforms: linux/amd64,linux/arm64 38 | push: true 39 | tags: ${{ steps.meta-sqlite.outputs.tags }} 40 | labels: ${{ steps.meta-sqlite.outputs.labels }} 41 | postgres: 42 | runs-on: ubuntu-latest 43 | steps: 44 | - name: Set up Docker Buildx 45 | uses: docker/setup-buildx-action@v3 46 | - name: Login to ghcr.io 47 | uses: docker/login-action@v3 48 | with: 49 | registry: ghcr.io 50 | username: ${{ github.repository_owner }} 51 | password: ${{ secrets.GITHUB_TOKEN }} 52 | - name: Docker meta 53 | id: meta-postgres 54 | uses: docker/metadata-action@v5 55 | with: 56 | images: | 57 | ghcr.io/gothenburgbitfactory/taskchampion-sync-server-postgres 58 | tags: | 59 | type=ref,event=branch 60 | type=semver,pattern={{version}} 61 | type=semver,pattern={{major}}.{{minor}} 62 | type=match,pattern=\d.\d.\d,value=latest 63 | - name: Build and push 64 | uses: docker/build-push-action@v6 65 | with: 66 | file: "./Dockerfile-postgres" 67 | platforms: linux/amd64,linux/arm64 68 | push: true 69 | tags: ${{ steps.meta-postgres.outputs.tags }} 70 | labels: ${{ steps.meta-postgres.outputs.labels }} 71 | -------------------------------------------------------------------------------- /docs/src/usage/docker-images.md: -------------------------------------------------------------------------------- 1 | # Docker Images 2 | 3 | Every release of the server generates Docker images. One image is produced for 4 | each storage backend: 5 | - `ghcr.io/gothenburgbitfactory/taskchampion-sync-server` (SQLite) 6 | - `ghcr.io/gothenburgbitfactory/taskchampion-sync-server-postgres` (Postgres) 7 | 8 | The image tags include `latest` for the latest release, and both minor and 9 | patch versions, e.g., `0.5` and `0.5.1`. 10 | 11 | ## Running the Image 12 | 13 | At startup, each image applies some default values and runs the relevant binary 14 | directly. Configuration is typically by environment variables, all of which are 15 | documented in the `--help` output of the binaries. These include 16 | 17 | - `RUST_LOG` - log level, one of `trace`, `debug`, `info`, `warn` and `error`. 18 | - `DATA_DIR` (SQLite only; default `/var/lib/taskchampion-sync-server/data`) - 19 | directory for the synced data. 20 | - `CONNECTION` (Postgres only) - Postgres connection information, in the form 21 | of a [LibPQ-style connection 22 | URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-URIS). 23 | - `LISTEN` (default `0.0.0.0:8080`) - address and port on which to listen for 24 | HTTP requests. 25 | - `CLIENT_ID` - comma-separated list of client IDs that will be allowed, or 26 | empty to allow all clients. 27 | - `CREATE_CLIENTS` (default `true`) - if true, automatically create clients on 28 | first sync. If this is set to false, it is up to you to initialize clients in 29 | the DB. 30 | 31 | ### Example 32 | 33 | ```shell 34 | docker run -d \ 35 | --name=taskchampion-sync-server \ 36 | -p 8080:8080 \ 37 | -e RUST_LOG=debug \ 38 | -v /data/taskchampion-sync-server:/var/lib/taskchampion-sync-server/data \ 39 | taskchampion-sync-server 40 | ``` 41 | 42 | ### Image-Specific Setup 43 | 44 | The SQLite image is configured with `VOLUME 45 | /var/lib/taskchampion-sync-server/data`, persisting the task data in an 46 | anonymous Docker volume. It is recommended to put this on a named volume, or 47 | persistent storage in an environment like Kubernetes, so that it is not 48 | accidentally deleted. 49 | 50 | The Postgres image does not automatically create its database schema. See the 51 | [integration section](../integration/pre-built.md) for more detail. This 52 | implementation is tested with Postgres version 17 but should work with any 53 | recent version. 54 | 55 | Note that the Docker images do not implement TLS. The expectation is that 56 | another component, such as a Kubernetes ingress, will terminate the TLS 57 | connection and proxy HTTP traffic to the taskchampion-sync-server container. 58 | -------------------------------------------------------------------------------- /sqlite/tests/concurrency.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | use taskchampion_sync_server_core::{Storage, NIL_VERSION_ID}; 3 | use taskchampion_sync_server_storage_sqlite::SqliteStorage; 4 | use tempfile::TempDir; 5 | use tokio::runtime; 6 | use uuid::Uuid; 7 | 8 | /// Test that calls to `add_version` from different threads maintain sequential consistency. 9 | /// 10 | /// This uses `std::thread` to ensure actual parallelism, with a different, single-threaded Tokio runtime 11 | /// in each thread. Asynchronous concurrency does not actually test consistency. 12 | #[tokio::test] 13 | async fn add_version_concurrency() -> anyhow::Result<()> { 14 | let tmp_dir = TempDir::new()?; 15 | let client_id = Uuid::new_v4(); 16 | 17 | { 18 | let con = SqliteStorage::new(tmp_dir.path())?; 19 | let mut txn = con.txn(client_id).await?; 20 | txn.new_client(NIL_VERSION_ID).await?; 21 | txn.commit().await?; 22 | } 23 | 24 | const N: i32 = 100; 25 | const T: i32 = 4; 26 | 27 | // Add N versions to the DB. 28 | let add_versions = |tmp_dir, client_id| { 29 | let rt = runtime::Builder::new_current_thread().build()?; 30 | rt.block_on(async { 31 | let con = SqliteStorage::new(tmp_dir)?; 32 | 33 | for _ in 0..N { 34 | let mut txn = con.txn(client_id).await?; 35 | let client = txn.get_client().await?.unwrap(); 36 | let version_id = Uuid::new_v4(); 37 | let parent_version_id = client.latest_version_id; 38 | std::thread::yield_now(); // Make failure more likely. 39 | txn.add_version(version_id, parent_version_id, b"data".to_vec()) 40 | .await?; 41 | txn.commit().await?; 42 | } 43 | 44 | Ok::<_, anyhow::Error>(()) 45 | }) 46 | }; 47 | 48 | thread::scope(|s| { 49 | // Spawn T threads. 50 | for _ in 0..T { 51 | let tmp_dir = tmp_dir.path(); 52 | s.spawn(move || add_versions(tmp_dir, client_id)); 53 | } 54 | }); 55 | 56 | // There should now be precisely N*T versions. This number will be smaller if there were 57 | // concurrent transactions, which would have allowed two `add_version` calls with the 58 | // same `parent_version_id`. 59 | { 60 | let con = SqliteStorage::new(tmp_dir.path())?; 61 | let mut txn = con.txn(client_id).await?; 62 | let client = txn.get_client().await?.unwrap(); 63 | 64 | let mut n = 0; 65 | let mut version_id = client.latest_version_id; 66 | while version_id != NIL_VERSION_ID { 67 | let version = txn 68 | .get_version(version_id) 69 | .await? 70 | .expect("version should exist"); 71 | n += 1; 72 | version_id = version.parent_version_id; 73 | } 74 | 75 | assert_eq!(n, N * T); 76 | } 77 | 78 | Ok(()) 79 | } 80 | -------------------------------------------------------------------------------- /postgres/src/testing.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, sync::LazyLock}; 2 | use tokio::{sync::Mutex, task}; 3 | use tokio_postgres::NoTls; 4 | 5 | // An async mutex used to ensure exclusive access to the database. 6 | static DB_LOCK: LazyLock> = std::sync::LazyLock::new(|| Mutex::new(())); 7 | 8 | /// Call the given function with a DB client, pointing to an initialized DB. 9 | /// 10 | /// This serializes use of the database so that two tests are not simultaneously 11 | /// modifying it. 12 | /// 13 | /// The function's future need not be `Send`. 14 | pub(crate) async fn with_db(f: F) -> anyhow::Result<()> 15 | where 16 | F: FnOnce(String, tokio_postgres::Client) -> FUT, 17 | FUT: Future> + 'static, 18 | { 19 | let _ = env_logger::builder().is_test(true).try_init(); 20 | 21 | let Ok(connection_string) = std::env::var("TEST_DB_URL") else { 22 | // If this is run in a GitHub action, then we really don't want to skip the tests. 23 | if std::env::var("GITHUB_ACTIONS").is_ok() { 24 | panic!("TEST_DB_URL must be set in GitHub actions"); 25 | } 26 | // Skip the test. 27 | return Ok(()); 28 | }; 29 | 30 | // Serialize use of the DB. 31 | let _db_guard = DB_LOCK.lock().await; 32 | 33 | let local_set = task::LocalSet::new(); 34 | local_set 35 | .run_until(async move { 36 | let (client, connection) = tokio_postgres::connect(&connection_string, NoTls).await?; 37 | let conn_join_handle = tokio::spawn(async move { 38 | if let Err(e) = connection.await { 39 | log::warn!("connection error: {e}"); 40 | } 41 | }); 42 | 43 | // Set up the DB. 44 | client 45 | .execute("drop schema if exists public cascade", &[]) 46 | .await?; 47 | client.execute("create schema public", &[]).await?; 48 | client.simple_query(include_str!("../schema.sql")).await?; 49 | 50 | // Run the test in its own task, so that we can handle all failure cases. This task must be 51 | // local because the future typically uses `StorageTxn` which is not `Send`. 52 | let test_join_handle = tokio::task::spawn_local(f(connection_string.clone(), client)); 53 | 54 | // Wait for the test task to complete. 55 | let test_res = test_join_handle.await?; 56 | 57 | conn_join_handle.await?; 58 | 59 | // Clean up the DB. 60 | 61 | let (client, connection) = tokio_postgres::connect(&connection_string, NoTls).await?; 62 | let conn_join_handle = tokio::spawn(async move { 63 | if let Err(e) = connection.await { 64 | log::warn!("connection error: {e}"); 65 | } 66 | }); 67 | client 68 | .execute("drop schema if exists public cascade", &[]) 69 | .await?; 70 | drop(client); 71 | conn_join_handle.await?; 72 | 73 | test_res 74 | }) 75 | .await 76 | } 77 | -------------------------------------------------------------------------------- /docs/src/usage/binaries.md: -------------------------------------------------------------------------------- 1 | # Binaries 2 | 3 | Taskchampion-sync-server is a single binary that serves HTTP requests on a TCP 4 | port. The server does not implement TLS; for public deployments, the 5 | recommendation is to use a reverse proxy such as Nginx, haproxy, or Apache 6 | httpd. 7 | 8 | One binary is provided for each storage backend: 9 | 10 | - `taskchampion-sync-server` (SQLite) 11 | - `taskchampion-sync-server-postgres` (Postgres) 12 | 13 | ### Building the Binary 14 | 15 | This is a standard Rust project, and can be built with `cargo build --release`. 16 | 17 | By default, only the SQLite binary is built. To also build the Postgres binary, 18 | use 19 | ```none 20 | cargo build --release --features postgres 21 | ``` 22 | 23 | To disable building the SQLite binary and build only the Postgres binary, use 24 | 25 | ```none 26 | cargo build --release --no-default-features --features postgres 27 | ``` 28 | 29 | ### Running the Binary 30 | 31 | The server is configured with command-line options or environment variables. 32 | See the `--help` output for full details. 33 | 34 | For the SQLite binary, the `--data-dir` option or `DATA_DIR` environment 35 | variable specifies where the server should store its data. 36 | 37 | For the Postgres binary, the `--connection` option or `CONNECTION` environment 38 | variable specifies the connection information, in the form of a [LibPQ-style 39 | connection 40 | URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-URIS). 41 | Note that unlike LibPQ, the Rust client only supports `sslmode` values 42 | `disable`, `prefer`, and `require`, and will always validate CA hostnames and 43 | certificates when using TLS. 44 | 45 | The remaining options are common to all binaries. 46 | 47 | The `--listen` option specifies the interface and port the server listens on. 48 | It must contain an IP-Address or a DNS name and a port number. This option is 49 | mandatory, but can be repeated to specify multiple interfaces or ports. This 50 | value can be specified in environment variable `LISTEN`, as a comma-separated 51 | list of values. 52 | 53 | By default, the server will allow all clients and create them in the database 54 | on first contact. There are two ways to limit the clients the server will 55 | interact with: 56 | 57 | - To limit the accepted client IDs, specify them in the environment variable 58 | `CLIENT_ID`, as a comma-separated list of UUIDs. Client IDs can be specified 59 | with `--allow-client-id`, but this should not be used on shared systems, as 60 | command line arguments are visible to all users on the system. This convenient 61 | option is suitable for personal and small-scale deployments. 62 | 63 | - To disable the automatic creation of clients, use the `--no-create-clients` 64 | flag or the `CREATE_CLIENTS=false` environment variable. You are now 65 | responsible for creating clients in the database manually, so this option is 66 | more suitable for large scale deployments. See [Integration](../integration.md) 67 | for more information on such deployments. 68 | 69 | The server only logs errors by default. To add additional logging output, set 70 | environment variable `RUST_LOG` to `info` to get a log message for every 71 | request, or to `debug` to get more verbose debugging output. 72 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | TaskChampion Sync-Server 2 | ------------------------ 3 | 4 | TaskChampion is the task database [Taskwarrior][tw] uses to store and sync 5 | tasks. This repository implements a sync server against which Taskwarrior 6 | and other applications embedding TaskChampion can sync. 7 | 8 | [tw]: https://github.com/GothenburgBitFactory/taskwarrior 9 | 10 | ## Status 11 | 12 | This project provides both pre-built images for common use-cases and Rust 13 | libraries that can be used to build more sophisticated applications. See [the documentation][documentation] 14 | for more on how to use this project. 15 | 16 | [documentation]: https://gothenburgbitfactory.org/taskchampion-sync-server 17 | 18 | ## Repository Guide 19 | 20 | The repository is comprised of four crates: 21 | 22 | - `taskchampion-sync-server-core` implements the core of the protocol 23 | - `taskchampion-sync-server-storage-sqlite` implements an SQLite backend for the core 24 | - `taskchampion-sync-server-storage-postgres` implements a Postgres backend for the core 25 | - `taskchampion-sync-server` implements a simple HTTP server for the protocol 26 | 27 | ### Building From Source 28 | 29 | #### Installing Rust 30 | 31 | TaskChampion Sync-Server build has been tested with current Rust stable 32 | release version. You can install Rust from your distribution package or use 33 | [`rustup`][rustup]. 34 | ```sh 35 | rustup default stable 36 | ``` 37 | 38 | The minimum supported Rust version (MSRV) is given in 39 | [`Cargo.toml`](./Cargo.toml). Note that package repositories typically do not 40 | have sufficiently new versions of Rust. 41 | 42 | If you prefer, you can use the stable version only for installing TaskChampion 43 | Sync-Server (you must clone the repository first). 44 | ```sh 45 | rustup override set stable 46 | ``` 47 | 48 | [rustup]: https://rustup.rs/ 49 | 50 | #### Building TaskChampion Sync-Server 51 | 52 | To build TaskChampion Sync-Server binary simply execute the following 53 | commands. 54 | ```sh 55 | git clone https://github.com/GothenburgBitFactory/taskchampion-sync-server.git 56 | cd taskchampion-sync-server 57 | cargo build --release 58 | ``` 59 | 60 | After build the binary is located in 61 | `target/release/taskchampion-sync-server`. 62 | 63 | #### Building the Postgres Backend 64 | 65 | The storage backend is controlled by Cargo features `postres` and `sqlite`. 66 | By default, only the `sqlite` feature is enabled. 67 | To enable building the Postgres backend, add `--features postgres`. 68 | The Postgres binary is located in 69 | `target/release/taskchampion-sync-server-postgres`. 70 | 71 | ### Building the Docker Images 72 | 73 | To build the images, execute the following commands. 74 | 75 | SQLite: 76 | ```sh 77 | docker build \ 78 | -t taskchampion-sync-server \ 79 | -f Dockerfile-sqlite 80 | ``` 81 | 82 | Postgres: 83 | ```sh 84 | source .env 85 | docker build \ 86 | -t taskchampion-sync-server-postgres \ 87 | -f Dockerfile-postgres 88 | ``` 89 | 90 | Now to run it, simply exec. 91 | ```sh 92 | docker run -t -d \ 93 | --name=taskchampion \ 94 | -p 8080:8080 \ 95 | taskchampion-sync-server 96 | ``` 97 | 98 | This starts TaskChampion Sync-Server and publishes port 8080 to the host. Please 99 | note that this is a basic run, all data will be destroyed after stop and 100 | delete container. You may also set `DATA_DIR`, `CLIENT_ID`, or `LISTEN` with `-e`, e.g., 101 | 102 | ```sh 103 | docker run -t -d \ 104 | --name=taskchampion \ 105 | -e LISTEN=0.0.0.0:9000 \ 106 | -p 9000:9000 \ 107 | taskchampion-sync-server 108 | ``` 109 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at dustin@cs.uchicago.edu. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /server/src/web.rs: -------------------------------------------------------------------------------- 1 | use crate::api::{api_scope, ServerState}; 2 | use actix_web::{ 3 | dev::ServiceResponse, 4 | get, 5 | http::StatusCode, 6 | middleware, 7 | middleware::{ErrorHandlerResponse, ErrorHandlers, Logger}, 8 | web, App, HttpServer, Responder, 9 | }; 10 | use std::{collections::HashSet, sync::Arc}; 11 | use taskchampion_sync_server_core::{Server, ServerConfig, Storage}; 12 | use uuid::Uuid; 13 | 14 | fn print_error(res: ServiceResponse) -> actix_web::Result> { 15 | if let Some(err) = res.response().error() { 16 | log::error!("Internal Server Error caused by:\n{err:?}"); 17 | } 18 | Ok(ErrorHandlerResponse::Response(res.map_into_left_body())) 19 | } 20 | 21 | /// Configuration for WebServer (as distinct from [`ServerConfig`]). 22 | pub struct WebConfig { 23 | pub client_id_allowlist: Option>, 24 | pub create_clients: bool, 25 | pub listen_addresses: Vec, 26 | } 27 | 28 | impl Default for WebConfig { 29 | fn default() -> Self { 30 | Self { 31 | client_id_allowlist: Default::default(), 32 | create_clients: true, 33 | listen_addresses: vec![], 34 | } 35 | } 36 | } 37 | 38 | #[get("/")] 39 | async fn index() -> impl Responder { 40 | format!("TaskChampion sync server v{}", env!("CARGO_PKG_VERSION")) 41 | } 42 | 43 | /// A Server represents a sync server. 44 | #[derive(Clone)] 45 | pub struct WebServer { 46 | pub(crate) server_state: Arc, 47 | } 48 | 49 | impl WebServer { 50 | /// Create a new sync server with the given storage implementation. 51 | pub fn new( 52 | config: ServerConfig, 53 | web_config: WebConfig, 54 | storage: ST, 55 | ) -> Self { 56 | Self { 57 | server_state: Arc::new(ServerState { 58 | server: Server::new(config, storage), 59 | web_config, 60 | }), 61 | } 62 | } 63 | 64 | pub fn config(&self, cfg: &mut web::ServiceConfig) { 65 | cfg.service( 66 | web::scope("") 67 | .app_data(web::Data::new(self.server_state.clone())) 68 | .wrap( 69 | middleware::DefaultHeaders::new().add(("Cache-Control", "no-store, max-age=0")), 70 | ) 71 | .service(index) 72 | .service(api_scope()), 73 | ); 74 | } 75 | 76 | pub async fn run(self) -> anyhow::Result<()> { 77 | let listen_addresses = self.server_state.web_config.listen_addresses.clone(); 78 | let mut http_server = HttpServer::new(move || { 79 | App::new() 80 | .wrap(ErrorHandlers::new().handler(StatusCode::INTERNAL_SERVER_ERROR, print_error)) 81 | .wrap(Logger::default()) 82 | .configure(|cfg| self.config(cfg)) 83 | }); 84 | for listen_address in listen_addresses { 85 | log::info!("Serving on {listen_address}"); 86 | http_server = http_server.bind(listen_address)? 87 | } 88 | http_server.run().await?; 89 | Ok(()) 90 | } 91 | } 92 | 93 | #[cfg(test)] 94 | mod test { 95 | use super::*; 96 | use actix_web::{test, App}; 97 | use pretty_assertions::assert_eq; 98 | use taskchampion_sync_server_core::InMemoryStorage; 99 | 100 | #[actix_rt::test] 101 | async fn test_cache_control() { 102 | let server = WebServer::new( 103 | ServerConfig::default(), 104 | WebConfig::default(), 105 | InMemoryStorage::new(), 106 | ); 107 | let app = App::new().configure(|sc| server.config(sc)); 108 | let app = test::init_service(app).await; 109 | 110 | let req = test::TestRequest::get().uri("/").to_request(); 111 | let resp = test::call_service(&app, req).await; 112 | assert!(resp.status().is_success()); 113 | assert_eq!( 114 | resp.headers().get("Cache-Control").unwrap(), 115 | &"no-store, max-age=0".to_string() 116 | ) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /core/src/storage.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | use uuid::Uuid; 3 | 4 | /// A representation of stored metadata about a client. 5 | #[derive(Clone, PartialEq, Eq, Debug)] 6 | pub struct Client { 7 | /// The latest version for this client (may be the nil version) 8 | pub latest_version_id: Uuid, 9 | /// Data about the latest snapshot for this client 10 | pub snapshot: Option, 11 | } 12 | 13 | /// Metadata about a snapshot, not including the snapshot data itself. 14 | #[derive(Clone, PartialEq, Eq, Debug)] 15 | pub struct Snapshot { 16 | /// ID of the version at which this snapshot was made 17 | pub version_id: Uuid, 18 | 19 | /// Timestamp at which this snapshot was set 20 | pub timestamp: DateTime, 21 | 22 | /// Number of versions since this snapshot was made 23 | pub versions_since: u32, 24 | } 25 | 26 | #[derive(Clone, PartialEq, Eq, Debug)] 27 | pub struct Version { 28 | /// The uuid identifying this version. 29 | pub version_id: Uuid, 30 | /// The uuid identifying this version's parent. 31 | pub parent_version_id: Uuid, 32 | /// The data carried in this version. 33 | pub history_segment: Vec, 34 | } 35 | 36 | /// A transaction in the storage backend. 37 | /// 38 | /// Transactions must be sequentially consistent. That is, the results of transactions performed 39 | /// in storage must be as if each were executed sequentially in some order. In particular, 40 | /// un-committed changes must not be read by another transaction, but committed changes must 41 | /// be visible to subequent transations. Together, this guarantees that `add_version` reliably 42 | /// constructs a linear sequence of versions. 43 | /// 44 | /// Transactions with different client IDs cannot share any data, so it is safe to handle them 45 | /// concurrently. 46 | /// 47 | /// Changes in a transaction that is dropped without calling `commit` must not appear in any other 48 | /// transaction. 49 | #[async_trait::async_trait(?Send)] 50 | pub trait StorageTxn { 51 | /// Get information about the client for this transaction 52 | async fn get_client(&mut self) -> anyhow::Result>; 53 | 54 | /// Create the client for this transaction, with the given latest_version_id. The client must 55 | /// not already exist. 56 | async fn new_client(&mut self, latest_version_id: Uuid) -> anyhow::Result<()>; 57 | 58 | /// Set the client's most recent snapshot. 59 | async fn set_snapshot(&mut self, snapshot: Snapshot, data: Vec) -> anyhow::Result<()>; 60 | 61 | /// Get the data for the most recent snapshot. The version_id 62 | /// is used to verify that the snapshot is for the correct version. 63 | async fn get_snapshot_data(&mut self, version_id: Uuid) -> anyhow::Result>>; 64 | 65 | /// Get a version, indexed by parent version id 66 | async fn get_version_by_parent( 67 | &mut self, 68 | parent_version_id: Uuid, 69 | ) -> anyhow::Result>; 70 | 71 | /// Get a version, indexed by its own version id 72 | async fn get_version(&mut self, version_id: Uuid) -> anyhow::Result>; 73 | 74 | /// Add a version (that must not already exist), and 75 | /// - update latest_version_id from parent_version_id to version_id 76 | /// - increment snapshot.versions_since 77 | /// Fails if the existing `latest_version_id` is not equal to `parent_version_id`. Check 78 | /// this by calling `get_client` earlier in the same transaction. 79 | async fn add_version( 80 | &mut self, 81 | version_id: Uuid, 82 | parent_version_id: Uuid, 83 | history_segment: Vec, 84 | ) -> anyhow::Result<()>; 85 | 86 | /// Commit any changes made in the transaction. It is an error to call this more than 87 | /// once. It is safe to skip this call for read-only operations. 88 | async fn commit(&mut self) -> anyhow::Result<()>; 89 | } 90 | 91 | /// A trait for objects able to act as storage. Most of the interesting behavior is in the 92 | /// [`crate::storage::StorageTxn`] trait. 93 | #[async_trait::async_trait] 94 | pub trait Storage: Send + Sync { 95 | /// Begin a transaction for the given client ID. 96 | async fn txn(&self, client_id: Uuid) -> anyhow::Result>; 97 | } 98 | -------------------------------------------------------------------------------- /server/src/api/get_snapshot.rs: -------------------------------------------------------------------------------- 1 | use crate::api::{server_error_to_actix, ServerState, SNAPSHOT_CONTENT_TYPE, VERSION_ID_HEADER}; 2 | use actix_web::{error, get, web, HttpRequest, HttpResponse, Result}; 3 | use std::sync::Arc; 4 | 5 | /// Get a snapshot. 6 | /// 7 | /// If a snapshot for this client exists, it is returned with content-type 8 | /// `application/vnd.taskchampion.snapshot`. The `X-Version-Id` header contains the version of the 9 | /// snapshot. 10 | /// 11 | /// If no snapshot exists, returns a 404 with no content. Returns other 4xx or 5xx responses on 12 | /// other errors. 13 | #[get("/v1/client/snapshot")] 14 | pub(crate) async fn service( 15 | req: HttpRequest, 16 | server_state: web::Data>, 17 | ) -> Result { 18 | let client_id = server_state.client_id_header(&req)?; 19 | 20 | if let Some((version_id, data)) = server_state 21 | .server 22 | .get_snapshot(client_id) 23 | .await 24 | .map_err(server_error_to_actix)? 25 | { 26 | Ok(HttpResponse::Ok() 27 | .content_type(SNAPSHOT_CONTENT_TYPE) 28 | .append_header((VERSION_ID_HEADER, version_id.to_string())) 29 | .body(data)) 30 | } else { 31 | Err(error::ErrorNotFound("no snapshot")) 32 | } 33 | } 34 | 35 | #[cfg(test)] 36 | mod test { 37 | use crate::{ 38 | api::CLIENT_ID_HEADER, 39 | web::{WebConfig, WebServer}, 40 | }; 41 | use actix_web::{http::StatusCode, test, App}; 42 | use chrono::{TimeZone, Utc}; 43 | use pretty_assertions::assert_eq; 44 | use taskchampion_sync_server_core::{InMemoryStorage, ServerConfig, Snapshot, Storage}; 45 | use uuid::Uuid; 46 | 47 | #[actix_rt::test] 48 | async fn test_not_found() { 49 | let client_id = Uuid::new_v4(); 50 | let storage = InMemoryStorage::new(); 51 | 52 | // set up the storage contents.. 53 | { 54 | let mut txn = storage.txn(client_id).await.unwrap(); 55 | txn.new_client(Uuid::new_v4()).await.unwrap(); 56 | txn.commit().await.unwrap(); 57 | } 58 | 59 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 60 | let app = App::new().configure(|sc| server.config(sc)); 61 | let app = test::init_service(app).await; 62 | 63 | let uri = "/v1/client/snapshot"; 64 | let req = test::TestRequest::get() 65 | .uri(uri) 66 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 67 | .to_request(); 68 | let resp = test::call_service(&app, req).await; 69 | assert_eq!(resp.status(), StatusCode::NOT_FOUND); 70 | } 71 | 72 | #[actix_rt::test] 73 | async fn test_success() { 74 | let client_id = Uuid::new_v4(); 75 | let version_id = Uuid::new_v4(); 76 | let snapshot_data = vec![1, 2, 3, 4]; 77 | let storage = InMemoryStorage::new(); 78 | 79 | // set up the storage contents.. 80 | { 81 | let mut txn = storage.txn(client_id).await.unwrap(); 82 | txn.new_client(Uuid::new_v4()).await.unwrap(); 83 | txn.set_snapshot( 84 | Snapshot { 85 | version_id, 86 | versions_since: 3, 87 | timestamp: Utc.with_ymd_and_hms(2001, 9, 9, 1, 46, 40).unwrap(), 88 | }, 89 | snapshot_data.clone(), 90 | ) 91 | .await 92 | .unwrap(); 93 | txn.commit().await.unwrap(); 94 | } 95 | 96 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 97 | let app = App::new().configure(|sc| server.config(sc)); 98 | let app = test::init_service(app).await; 99 | 100 | let uri = "/v1/client/snapshot"; 101 | let req = test::TestRequest::get() 102 | .uri(uri) 103 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 104 | .to_request(); 105 | let resp = test::call_service(&app, req).await; 106 | assert_eq!(resp.status(), StatusCode::OK); 107 | 108 | use actix_web::body::MessageBody; 109 | let bytes = resp.into_body().try_into_bytes().unwrap(); 110 | assert_eq!(bytes.as_ref(), snapshot_data); 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /.github/workflows/checks.yml: -------------------------------------------------------------------------------- 1 | name: checks 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, reopened, synchronize] 9 | 10 | jobs: 11 | clippy: 12 | runs-on: ubuntu-latest 13 | name: "Check & Clippy" 14 | 15 | steps: 16 | - uses: actions/checkout@v6 17 | 18 | - name: Cache cargo registry 19 | uses: actions/cache@v5 20 | with: 21 | path: ~/.cargo/registry 22 | key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} 23 | 24 | - name: Cache cargo build 25 | uses: actions/cache@v5 26 | with: 27 | path: target 28 | key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }} 29 | 30 | - uses: actions-rs/toolchain@v1 31 | with: 32 | toolchain: "stable" 33 | override: true 34 | components: clippy 35 | 36 | - uses: actions-rs/cargo@v1.0.3 37 | with: 38 | command: check 39 | 40 | - uses: actions-rs/clippy-check@v1 41 | with: 42 | token: ${{ secrets.GITHUB_TOKEN }} 43 | args: --all-features --no-deps -- -D warnings 44 | name: "Clippy Results" 45 | 46 | rustdoc: 47 | runs-on: ubuntu-latest 48 | name: "Rustdoc" 49 | 50 | steps: 51 | - uses: actions/checkout@v6 52 | 53 | - name: Cache cargo registry 54 | uses: actions/cache@v5 55 | with: 56 | path: ~/.cargo/registry 57 | key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} 58 | 59 | - uses: actions-rs/toolchain@v1 60 | with: 61 | toolchain: nightly 62 | override: true 63 | minimal: true 64 | 65 | - name: taskchampion-sync-server 66 | uses: actions-rs/cargo@v1.0.3 67 | with: 68 | command: rustdoc 69 | args: -p taskchampion-sync-server --bin taskchampion-sync-server --all-features -- -Z unstable-options --check -Dwarnings 70 | 71 | - name: taskchampion-sync-server-postgres 72 | uses: actions-rs/cargo@v1.0.3 73 | with: 74 | command: rustdoc 75 | args: -p taskchampion-sync-server --bin taskchampion-sync-server-postgres --all-features -- -Z unstable-options --check -Dwarnings 76 | 77 | - name: taskchampion-sync-server-core 78 | uses: actions-rs/cargo@v1.0.3 79 | with: 80 | command: rustdoc 81 | args: -p taskchampion-sync-server-core --all-features -- -Z unstable-options --check -Dwarnings 82 | 83 | - name: taskchampion-sync-server-storage-sqlite 84 | uses: actions-rs/cargo@v1.0.3 85 | with: 86 | command: rustdoc 87 | args: -p taskchampion-sync-server-storage-sqlite --all-features -- -Z unstable-options --check -Dwarnings 88 | 89 | - name: taskchampion-sync-server-storage-postgres 90 | uses: actions-rs/cargo@v1.0.3 91 | with: 92 | command: rustdoc 93 | args: -p taskchampion-sync-server-storage-postgres --all-features -- -Z unstable-options --check -Dwarnings 94 | 95 | fmt: 96 | runs-on: ubuntu-latest 97 | name: "Formatting" 98 | steps: 99 | - uses: actions/checkout@v6 100 | 101 | - uses: actions-rs/toolchain@v1 102 | with: 103 | profile: minimal 104 | components: rustfmt 105 | toolchain: stable 106 | override: true 107 | 108 | - uses: actions-rs/cargo@v1.0.3 109 | with: 110 | command: fmt 111 | args: --all -- --check 112 | 113 | semver-checks: 114 | runs-on: ubuntu-latest 115 | name: "Cargo Semver Checks" 116 | steps: 117 | - uses: actions/checkout@v6 118 | - uses: obi1kenobi/cargo-semver-checks-action@v2 119 | with: 120 | # exclude the binary package from semver checks, since it is not published as a crate. 121 | exclude: taskchampion-sync-server 122 | 123 | mdbook: 124 | runs-on: ubuntu-latest 125 | name: "mdBook Documentation" 126 | 127 | steps: 128 | - uses: actions/checkout@v6 129 | 130 | - name: Setup mdBook 131 | uses: peaceiris/actions-mdbook@v2 132 | with: 133 | # if this changes, change it in .github/workflows/publish-docs.yml as well 134 | mdbook-version: '0.4.48' 135 | 136 | - run: mdbook test docs 137 | - run: mdbook build docs 138 | -------------------------------------------------------------------------------- /server/src/api/mod.rs: -------------------------------------------------------------------------------- 1 | use actix_web::{error, web, HttpRequest, Result, Scope}; 2 | use taskchampion_sync_server_core::{ClientId, Server, ServerError}; 3 | 4 | use crate::web::WebConfig; 5 | 6 | mod add_snapshot; 7 | mod add_version; 8 | mod get_child_version; 9 | mod get_snapshot; 10 | 11 | /// The content-type for history segments (opaque blobs of bytes) 12 | pub(crate) const HISTORY_SEGMENT_CONTENT_TYPE: &str = 13 | "application/vnd.taskchampion.history-segment"; 14 | 15 | /// The content-type for snapshots (opaque blobs of bytes) 16 | pub(crate) const SNAPSHOT_CONTENT_TYPE: &str = "application/vnd.taskchampion.snapshot"; 17 | 18 | /// The header name for version ID 19 | pub(crate) const VERSION_ID_HEADER: &str = "X-Version-Id"; 20 | 21 | /// The header name for client id 22 | pub(crate) const CLIENT_ID_HEADER: &str = "X-Client-Id"; 23 | 24 | /// The header name for parent version ID 25 | pub(crate) const PARENT_VERSION_ID_HEADER: &str = "X-Parent-Version-Id"; 26 | 27 | /// The header name for parent version ID 28 | pub(crate) const SNAPSHOT_REQUEST_HEADER: &str = "X-Snapshot-Request"; 29 | 30 | /// The type containing a reference to the persistent state for the server 31 | pub(crate) struct ServerState { 32 | pub(crate) server: Server, 33 | pub(crate) web_config: WebConfig, 34 | } 35 | 36 | impl ServerState { 37 | /// Get the client id 38 | fn client_id_header(&self, req: &HttpRequest) -> Result { 39 | fn badrequest() -> error::Error { 40 | error::ErrorBadRequest("bad x-client-id") 41 | } 42 | if let Some(client_id_hdr) = req.headers().get(CLIENT_ID_HEADER) { 43 | let client_id = client_id_hdr.to_str().map_err(|_| badrequest())?; 44 | let client_id = ClientId::parse_str(client_id).map_err(|_| badrequest())?; 45 | if let Some(allow_list) = &self.web_config.client_id_allowlist { 46 | if !allow_list.contains(&client_id) { 47 | return Err(error::ErrorForbidden("unknown x-client-id")); 48 | } 49 | } 50 | Ok(client_id) 51 | } else { 52 | Err(badrequest()) 53 | } 54 | } 55 | } 56 | 57 | pub(crate) fn api_scope() -> Scope { 58 | web::scope("") 59 | .service(get_child_version::service) 60 | .service(add_version::service) 61 | .service(get_snapshot::service) 62 | .service(add_snapshot::service) 63 | } 64 | 65 | /// Convert a `anyhow::Error` to an Actix ISE 66 | fn failure_to_ise(err: anyhow::Error) -> actix_web::Error { 67 | error::ErrorInternalServerError(err) 68 | } 69 | 70 | /// Convert a ServerError to an Actix error 71 | fn server_error_to_actix(err: ServerError) -> actix_web::Error { 72 | match err { 73 | ServerError::NoSuchClient => error::ErrorNotFound(err), 74 | ServerError::Other(err) => error::ErrorInternalServerError(err), 75 | } 76 | } 77 | 78 | #[cfg(test)] 79 | mod test { 80 | use super::*; 81 | use taskchampion_sync_server_core::InMemoryStorage; 82 | use uuid::Uuid; 83 | 84 | #[test] 85 | fn client_id_header_allow_all() { 86 | let client_id = Uuid::new_v4(); 87 | let state = ServerState { 88 | server: Server::new(Default::default(), InMemoryStorage::new()), 89 | web_config: WebConfig { 90 | client_id_allowlist: None, 91 | create_clients: true, 92 | ..WebConfig::default() 93 | }, 94 | }; 95 | let req = actix_web::test::TestRequest::default() 96 | .insert_header((CLIENT_ID_HEADER, client_id.to_string())) 97 | .to_http_request(); 98 | assert_eq!(state.client_id_header(&req).unwrap(), client_id); 99 | } 100 | 101 | #[test] 102 | fn client_id_header_allow_list() { 103 | let client_id_ok = Uuid::new_v4(); 104 | let client_id_disallowed = Uuid::new_v4(); 105 | let state = ServerState { 106 | server: Server::new(Default::default(), InMemoryStorage::new()), 107 | web_config: WebConfig { 108 | client_id_allowlist: Some([client_id_ok].into()), 109 | create_clients: true, 110 | ..WebConfig::default() 111 | }, 112 | }; 113 | let req = actix_web::test::TestRequest::default() 114 | .insert_header((CLIENT_ID_HEADER, client_id_ok.to_string())) 115 | .to_http_request(); 116 | assert_eq!(state.client_id_header(&req).unwrap(), client_id_ok); 117 | let req = actix_web::test::TestRequest::default() 118 | .insert_header((CLIENT_ID_HEADER, client_id_disallowed.to_string())) 119 | .to_http_request(); 120 | assert_eq!( 121 | state 122 | .client_id_header(&req) 123 | .unwrap_err() 124 | .as_response_error() 125 | .status_code(), 126 | 403 127 | ); 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /server/src/api/add_snapshot.rs: -------------------------------------------------------------------------------- 1 | use crate::api::{server_error_to_actix, ServerState, SNAPSHOT_CONTENT_TYPE}; 2 | use actix_web::{error, post, web, HttpMessage, HttpRequest, HttpResponse, Result}; 3 | use futures::StreamExt; 4 | use std::sync::Arc; 5 | use taskchampion_sync_server_core::VersionId; 6 | 7 | /// Max snapshot size: 100MB 8 | const MAX_SIZE: usize = 100 * 1024 * 1024; 9 | 10 | /// Add a new snapshot, after checking prerequisites. The snapshot should be transmitted in the 11 | /// request entity body and must have content-type `application/vnd.taskchampion.snapshot`. The 12 | /// content can be encoded in any of the formats supported by actix-web. 13 | /// 14 | /// On success, the response is a 200 OK. Even in a 200 OK, the snapshot may not appear in a 15 | /// subsequent `GetSnapshot` call. 16 | /// 17 | /// Returns other 4xx or 5xx responses on other errors. 18 | #[post("/v1/client/add-snapshot/{version_id}")] 19 | pub(crate) async fn service( 20 | req: HttpRequest, 21 | server_state: web::Data>, 22 | path: web::Path, 23 | mut payload: web::Payload, 24 | ) -> Result { 25 | let version_id = path.into_inner(); 26 | 27 | // check content-type 28 | if req.content_type() != SNAPSHOT_CONTENT_TYPE { 29 | return Err(error::ErrorBadRequest("Bad content-type")); 30 | } 31 | 32 | let client_id = server_state.client_id_header(&req)?; 33 | 34 | // read the body in its entirety 35 | let mut body = web::BytesMut::new(); 36 | while let Some(chunk) = payload.next().await { 37 | let chunk = chunk?; 38 | // limit max size of in-memory payload 39 | if (body.len() + chunk.len()) > MAX_SIZE { 40 | return Err(error::ErrorBadRequest("Snapshot over maximum allowed size")); 41 | } 42 | body.extend_from_slice(&chunk); 43 | } 44 | 45 | if body.is_empty() { 46 | return Err(error::ErrorBadRequest("No snapshot supplied")); 47 | } 48 | 49 | server_state 50 | .server 51 | .add_snapshot(client_id, version_id, body.to_vec()) 52 | .await 53 | .map_err(server_error_to_actix)?; 54 | Ok(HttpResponse::Ok().body("")) 55 | } 56 | 57 | #[cfg(test)] 58 | mod test { 59 | use crate::{ 60 | api::CLIENT_ID_HEADER, 61 | web::{WebConfig, WebServer}, 62 | }; 63 | use actix_web::{http::StatusCode, test, App}; 64 | use pretty_assertions::assert_eq; 65 | use taskchampion_sync_server_core::{InMemoryStorage, ServerConfig, Storage, NIL_VERSION_ID}; 66 | use uuid::Uuid; 67 | 68 | #[actix_rt::test] 69 | async fn test_success() -> anyhow::Result<()> { 70 | let client_id = Uuid::new_v4(); 71 | let version_id = Uuid::new_v4(); 72 | let storage = InMemoryStorage::new(); 73 | 74 | // set up the storage contents.. 75 | { 76 | let mut txn = storage.txn(client_id).await.unwrap(); 77 | txn.new_client(version_id).await.unwrap(); 78 | txn.add_version(version_id, NIL_VERSION_ID, vec![]).await?; 79 | txn.commit().await?; 80 | } 81 | 82 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 83 | let app = App::new().configure(|sc| server.config(sc)); 84 | let app = test::init_service(app).await; 85 | 86 | let uri = format!("/v1/client/add-snapshot/{version_id}"); 87 | let req = test::TestRequest::post() 88 | .uri(&uri) 89 | .insert_header(("Content-Type", "application/vnd.taskchampion.snapshot")) 90 | .insert_header((CLIENT_ID_HEADER, client_id.to_string())) 91 | .set_payload(b"abcd".to_vec()) 92 | .to_request(); 93 | let resp = test::call_service(&app, req).await; 94 | assert_eq!(resp.status(), StatusCode::OK); 95 | 96 | // read back that snapshot 97 | let uri = "/v1/client/snapshot"; 98 | let req = test::TestRequest::get() 99 | .uri(uri) 100 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 101 | .to_request(); 102 | let resp = test::call_service(&app, req).await; 103 | assert_eq!(resp.status(), StatusCode::OK); 104 | 105 | use actix_web::body::MessageBody; 106 | let bytes = resp.into_body().try_into_bytes().unwrap(); 107 | assert_eq!(bytes.as_ref(), b"abcd"); 108 | 109 | Ok(()) 110 | } 111 | 112 | #[actix_rt::test] 113 | async fn test_not_added_200() { 114 | let client_id = Uuid::new_v4(); 115 | let version_id = Uuid::new_v4(); 116 | let storage = InMemoryStorage::new(); 117 | 118 | // set up the storage contents.. 119 | { 120 | let mut txn = storage.txn(client_id).await.unwrap(); 121 | txn.new_client(NIL_VERSION_ID).await.unwrap(); 122 | txn.commit().await.unwrap(); 123 | } 124 | 125 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 126 | let app = App::new().configure(|sc| server.config(sc)); 127 | let app = test::init_service(app).await; 128 | 129 | // add a snapshot for a nonexistent version 130 | let uri = format!("/v1/client/add-snapshot/{version_id}"); 131 | let req = test::TestRequest::post() 132 | .uri(&uri) 133 | .append_header(("Content-Type", "application/vnd.taskchampion.snapshot")) 134 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 135 | .set_payload(b"abcd".to_vec()) 136 | .to_request(); 137 | let resp = test::call_service(&app, req).await; 138 | assert_eq!(resp.status(), StatusCode::OK); 139 | 140 | // read back, seeing no snapshot 141 | let uri = "/v1/client/snapshot"; 142 | let req = test::TestRequest::get() 143 | .uri(uri) 144 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 145 | .to_request(); 146 | let resp = test::call_service(&app, req).await; 147 | assert_eq!(resp.status(), StatusCode::NOT_FOUND); 148 | } 149 | 150 | #[actix_rt::test] 151 | async fn test_bad_content_type() { 152 | let client_id = Uuid::new_v4(); 153 | let version_id = Uuid::new_v4(); 154 | let storage = InMemoryStorage::new(); 155 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 156 | let app = App::new().configure(|sc| server.config(sc)); 157 | let app = test::init_service(app).await; 158 | 159 | let uri = format!("/v1/client/add-snapshot/{version_id}"); 160 | let req = test::TestRequest::post() 161 | .uri(&uri) 162 | .append_header(("Content-Type", "not/correct")) 163 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 164 | .set_payload(b"abcd".to_vec()) 165 | .to_request(); 166 | let resp = test::call_service(&app, req).await; 167 | assert_eq!(resp.status(), StatusCode::BAD_REQUEST); 168 | } 169 | 170 | #[actix_rt::test] 171 | async fn test_empty_body() { 172 | let client_id = Uuid::new_v4(); 173 | let version_id = Uuid::new_v4(); 174 | let storage = InMemoryStorage::new(); 175 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 176 | let app = App::new().configure(|sc| server.config(sc)); 177 | let app = test::init_service(app).await; 178 | 179 | let uri = format!("/v1/client/add-snapshot/{version_id}"); 180 | let req = test::TestRequest::post() 181 | .uri(&uri) 182 | .append_header(( 183 | "Content-Type", 184 | "application/vnd.taskchampion.history-segment", 185 | )) 186 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 187 | .to_request(); 188 | let resp = test::call_service(&app, req).await; 189 | assert_eq!(resp.status(), StatusCode::BAD_REQUEST); 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /server/src/api/get_child_version.rs: -------------------------------------------------------------------------------- 1 | use crate::api::{ 2 | server_error_to_actix, ServerState, HISTORY_SEGMENT_CONTENT_TYPE, PARENT_VERSION_ID_HEADER, 3 | VERSION_ID_HEADER, 4 | }; 5 | use actix_web::{error, get, web, HttpRequest, HttpResponse, Result}; 6 | use std::sync::Arc; 7 | use taskchampion_sync_server_core::{GetVersionResult, ServerError, VersionId}; 8 | 9 | /// Get a child version. 10 | /// 11 | /// On succcess, the response is the same sequence of bytes originally sent to the server, 12 | /// with content-type `application/vnd.taskchampion.history-segment`. The `X-Version-Id` and 13 | /// `X-Parent-Version-Id` headers contain the corresponding values. 14 | /// 15 | /// If no such child exists, returns a 404 with no content. 16 | /// Returns other 4xx or 5xx responses on other errors. 17 | #[get("/v1/client/get-child-version/{parent_version_id}")] 18 | pub(crate) async fn service( 19 | req: HttpRequest, 20 | server_state: web::Data>, 21 | path: web::Path, 22 | ) -> Result { 23 | let parent_version_id = path.into_inner(); 24 | let client_id = server_state.client_id_header(&req)?; 25 | 26 | match server_state 27 | .server 28 | .get_child_version(client_id, parent_version_id) 29 | .await 30 | { 31 | Ok(GetVersionResult::Success { 32 | version_id, 33 | parent_version_id, 34 | history_segment, 35 | }) => Ok(HttpResponse::Ok() 36 | .content_type(HISTORY_SEGMENT_CONTENT_TYPE) 37 | .append_header((VERSION_ID_HEADER, version_id.to_string())) 38 | .append_header((PARENT_VERSION_ID_HEADER, parent_version_id.to_string())) 39 | .body(history_segment)), 40 | Ok(GetVersionResult::NotFound) => Err(error::ErrorNotFound("no such version")), 41 | Ok(GetVersionResult::Gone) => Err(error::ErrorGone("version has been deleted")), 42 | // Note that the HTTP client cannot differentiate `NotFound` and `NoSuchClient`, as both 43 | // are a 404 NOT FOUND response. In either case, the HTTP client will typically attempt 44 | // to add a new version, which may create the new client at the same time. 45 | Err(ServerError::NoSuchClient) => Err(error::ErrorNotFound("no such client")), 46 | Err(e) => Err(server_error_to_actix(e)), 47 | } 48 | } 49 | 50 | #[cfg(test)] 51 | mod test { 52 | use crate::{ 53 | api::CLIENT_ID_HEADER, 54 | web::{WebConfig, WebServer}, 55 | }; 56 | use actix_web::{http::StatusCode, test, App}; 57 | use pretty_assertions::assert_eq; 58 | use taskchampion_sync_server_core::{InMemoryStorage, ServerConfig, Storage, NIL_VERSION_ID}; 59 | use uuid::Uuid; 60 | 61 | #[actix_rt::test] 62 | async fn test_success() { 63 | let client_id = Uuid::new_v4(); 64 | let version_id = Uuid::new_v4(); 65 | let parent_version_id = Uuid::new_v4(); 66 | let storage = InMemoryStorage::new(); 67 | 68 | // set up the storage contents.. 69 | { 70 | let mut txn = storage.txn(client_id).await.unwrap(); 71 | txn.new_client(Uuid::new_v4()).await.unwrap(); 72 | txn.add_version(version_id, parent_version_id, b"abcd".to_vec()) 73 | .await 74 | .unwrap(); 75 | txn.commit().await.unwrap(); 76 | } 77 | 78 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 79 | let app = App::new().configure(|sc| server.config(sc)); 80 | let app = test::init_service(app).await; 81 | 82 | let uri = format!("/v1/client/get-child-version/{parent_version_id}"); 83 | let req = test::TestRequest::get() 84 | .uri(&uri) 85 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 86 | .to_request(); 87 | let resp = test::call_service(&app, req).await; 88 | assert_eq!(resp.status(), StatusCode::OK); 89 | assert_eq!( 90 | resp.headers().get("X-Version-Id").unwrap(), 91 | &version_id.to_string() 92 | ); 93 | assert_eq!( 94 | resp.headers().get("X-Parent-Version-Id").unwrap(), 95 | &parent_version_id.to_string() 96 | ); 97 | assert_eq!( 98 | resp.headers().get("Content-Type").unwrap(), 99 | &"application/vnd.taskchampion.history-segment".to_string() 100 | ); 101 | 102 | use actix_web::body::MessageBody; 103 | let bytes = resp.into_body().try_into_bytes().unwrap(); 104 | assert_eq!(bytes.as_ref(), b"abcd"); 105 | } 106 | 107 | #[actix_rt::test] 108 | async fn test_client_not_found() { 109 | let client_id = Uuid::new_v4(); 110 | let parent_version_id = Uuid::new_v4(); 111 | let storage = InMemoryStorage::new(); 112 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 113 | let app = App::new().configure(|sc| server.config(sc)); 114 | let app = test::init_service(app).await; 115 | 116 | let uri = format!("/v1/client/get-child-version/{parent_version_id}"); 117 | let req = test::TestRequest::get() 118 | .uri(&uri) 119 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 120 | .to_request(); 121 | let resp = test::call_service(&app, req).await; 122 | assert_eq!(resp.status(), StatusCode::NOT_FOUND); 123 | assert_eq!(resp.headers().get("X-Version-Id"), None); 124 | assert_eq!(resp.headers().get("X-Parent-Version-Id"), None); 125 | } 126 | 127 | #[actix_rt::test] 128 | async fn test_version_not_found_and_gone() { 129 | let client_id = Uuid::new_v4(); 130 | let test_version_id = Uuid::new_v4(); 131 | let storage = InMemoryStorage::new(); 132 | 133 | // create the client and a single version. 134 | { 135 | let mut txn = storage.txn(client_id).await.unwrap(); 136 | txn.new_client(Uuid::new_v4()).await.unwrap(); 137 | txn.add_version(test_version_id, NIL_VERSION_ID, b"vers".to_vec()) 138 | .await 139 | .unwrap(); 140 | txn.commit().await.unwrap(); 141 | } 142 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 143 | let app = App::new().configure(|sc| server.config(sc)); 144 | let app = test::init_service(app).await; 145 | 146 | // the child of the nil version is the added version 147 | let uri = format!("/v1/client/get-child-version/{NIL_VERSION_ID}"); 148 | let req = test::TestRequest::get() 149 | .uri(&uri) 150 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 151 | .to_request(); 152 | let resp = test::call_service(&app, req).await; 153 | assert_eq!(resp.status(), StatusCode::OK); 154 | assert_eq!( 155 | resp.headers().get("X-Version-Id").unwrap(), 156 | &test_version_id.to_string(), 157 | ); 158 | assert_eq!( 159 | resp.headers().get("X-Parent-Version-Id").unwrap(), 160 | &NIL_VERSION_ID.to_string(), 161 | ); 162 | 163 | // the child of an unknown parent_version_id is GONE. 164 | let uri = format!("/v1/client/get-child-version/{}", Uuid::new_v4()); 165 | let req = test::TestRequest::get() 166 | .uri(&uri) 167 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 168 | .to_request(); 169 | let resp = test::call_service(&app, req).await; 170 | assert_eq!(resp.status(), StatusCode::GONE); 171 | assert_eq!(resp.headers().get("X-Version-Id"), None); 172 | assert_eq!(resp.headers().get("X-Parent-Version-Id"), None); 173 | 174 | // The child of the latest version is NOT_FOUND. The tests in crate::server test more 175 | // corner cases. 176 | let uri = format!("/v1/client/get-child-version/{test_version_id}"); 177 | let req = test::TestRequest::get() 178 | .uri(&uri) 179 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 180 | .to_request(); 181 | let resp = test::call_service(&app, req).await; 182 | assert_eq!(resp.status(), StatusCode::NOT_FOUND); 183 | assert_eq!(resp.headers().get("X-Version-Id"), None); 184 | assert_eq!(resp.headers().get("X-Parent-Version-Id"), None); 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /server/src/args.rs: -------------------------------------------------------------------------------- 1 | use crate::web::WebConfig; 2 | use clap::{arg, builder::ValueParser, value_parser, ArgAction, ArgMatches, Command}; 3 | use taskchampion_sync_server_core::ServerConfig; 4 | use uuid::Uuid; 5 | 6 | pub fn command() -> Command { 7 | let defaults = ServerConfig::default(); 8 | let default_snapshot_versions = defaults.snapshot_versions.to_string(); 9 | let default_snapshot_days = defaults.snapshot_days.to_string(); 10 | Command::new("taskchampion-sync-server") 11 | .version(env!("CARGO_PKG_VERSION")) 12 | .about("Server for TaskChampion") 13 | .arg( 14 | arg!(-l --listen
) 15 | .help("Address and Port on which to listen on. Can be an IP Address or a DNS name followed by a colon and a port e.g. localhost:8080") 16 | .value_delimiter(',') 17 | .value_parser(ValueParser::string()) 18 | .env("LISTEN") 19 | .action(ArgAction::Append) 20 | .required(true), 21 | ) 22 | .arg( 23 | arg!(-C --"allow-client-id" "Client IDs to allow (can be repeated; if not specified, all clients are allowed)") 24 | .value_delimiter(',') 25 | .value_parser(value_parser!(Uuid)) 26 | .env("CLIENT_ID") 27 | .action(ArgAction::Append) 28 | .required(false), 29 | ) 30 | .arg( 31 | arg!("create-clients": --"no-create-clients" "If a client does not exist in the database, do not create it") 32 | .env("CREATE_CLIENTS") 33 | .default_value("true") 34 | .action(ArgAction::SetFalse) 35 | .required(false), 36 | ) 37 | .arg( 38 | arg!(--"snapshot-versions" "Target number of versions between snapshots") 39 | .value_parser(value_parser!(u32)) 40 | .env("SNAPSHOT_VERSIONS") 41 | .default_value(default_snapshot_versions), 42 | ) 43 | .arg( 44 | arg!(--"snapshot-days" "Target number of days between snapshots") 45 | .value_parser(value_parser!(i64)) 46 | .env("SNAPSHOT_DAYS") 47 | .default_value(default_snapshot_days), 48 | ) 49 | } 50 | 51 | /// Create a ServerConfig from these args. 52 | pub fn server_config_from_matches(matches: &ArgMatches) -> ServerConfig { 53 | ServerConfig { 54 | snapshot_versions: *matches.get_one("snapshot-versions").unwrap(), 55 | snapshot_days: *matches.get_one("snapshot-days").unwrap(), 56 | } 57 | } 58 | 59 | /// Create a WebConfig from these args. 60 | pub fn web_config_from_matches(matches: &ArgMatches) -> WebConfig { 61 | WebConfig { 62 | client_id_allowlist: matches 63 | .get_many("allow-client-id") 64 | .map(|ids| ids.copied().collect()), 65 | create_clients: matches.get_one("create-clients").copied().unwrap_or(true), 66 | listen_addresses: matches 67 | .get_many::("listen") 68 | .unwrap() 69 | .cloned() 70 | .collect(), 71 | } 72 | } 73 | 74 | #[cfg(test)] 75 | mod test { 76 | #![allow(clippy::bool_assert_comparison)] 77 | 78 | use super::*; 79 | use crate::web::WebServer; 80 | use actix_web::{self, App}; 81 | use clap::ArgMatches; 82 | use taskchampion_sync_server_core::InMemoryStorage; 83 | use temp_env::{with_var, with_var_unset, with_vars, with_vars_unset}; 84 | 85 | /// Get the list of allowed client IDs, sorted. 86 | fn allowed(matches: ArgMatches) -> Option> { 87 | web_config_from_matches(&matches) 88 | .client_id_allowlist 89 | .map(|ids| ids.into_iter().collect::>()) 90 | .map(|mut ids| { 91 | ids.sort(); 92 | ids 93 | }) 94 | } 95 | 96 | #[test] 97 | fn command_listen_two() { 98 | with_var_unset("LISTEN", || { 99 | let matches = command().get_matches_from([ 100 | "tss", 101 | "--listen", 102 | "localhost:8080", 103 | "--listen", 104 | "otherhost:9090", 105 | ]); 106 | assert_eq!( 107 | web_config_from_matches(&matches).listen_addresses, 108 | vec!["localhost:8080".to_string(), "otherhost:9090".to_string()] 109 | ); 110 | }); 111 | } 112 | 113 | #[test] 114 | fn command_listen_two_env() { 115 | with_var("LISTEN", Some("localhost:8080,otherhost:9090"), || { 116 | let matches = command().get_matches_from(["tss"]); 117 | assert_eq!( 118 | web_config_from_matches(&matches).listen_addresses, 119 | vec!["localhost:8080".to_string(), "otherhost:9090".to_string()] 120 | ); 121 | }); 122 | } 123 | 124 | #[test] 125 | fn command_allowed_client_ids_none() { 126 | with_var_unset("CLIENT_ID", || { 127 | let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]); 128 | assert_eq!(allowed(matches), None); 129 | }); 130 | } 131 | 132 | #[test] 133 | fn command_allowed_client_ids_one() { 134 | with_var_unset("CLIENT_ID", || { 135 | let matches = command().get_matches_from([ 136 | "tss", 137 | "--listen", 138 | "localhost:8080", 139 | "-C", 140 | "711d5cf3-0cf0-4eb8-9eca-6f7f220638c0", 141 | ]); 142 | assert_eq!( 143 | allowed(matches), 144 | Some(vec![Uuid::parse_str( 145 | "711d5cf3-0cf0-4eb8-9eca-6f7f220638c0" 146 | ) 147 | .unwrap()]) 148 | ); 149 | }); 150 | } 151 | 152 | #[test] 153 | fn command_allowed_client_ids_one_env() { 154 | with_var( 155 | "CLIENT_ID", 156 | Some("711d5cf3-0cf0-4eb8-9eca-6f7f220638c0"), 157 | || { 158 | let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]); 159 | assert_eq!( 160 | allowed(matches), 161 | Some(vec![Uuid::parse_str( 162 | "711d5cf3-0cf0-4eb8-9eca-6f7f220638c0" 163 | ) 164 | .unwrap()]) 165 | ); 166 | }, 167 | ); 168 | } 169 | 170 | #[test] 171 | fn command_allowed_client_ids_two() { 172 | with_var_unset("CLIENT_ID", || { 173 | let matches = command().get_matches_from([ 174 | "tss", 175 | "--listen", 176 | "localhost:8080", 177 | "-C", 178 | "711d5cf3-0cf0-4eb8-9eca-6f7f220638c0", 179 | "-C", 180 | "bbaf4b61-344a-4a39-a19e-8caa0669b353", 181 | ]); 182 | assert_eq!( 183 | allowed(matches), 184 | Some(vec![ 185 | Uuid::parse_str("711d5cf3-0cf0-4eb8-9eca-6f7f220638c0").unwrap(), 186 | Uuid::parse_str("bbaf4b61-344a-4a39-a19e-8caa0669b353").unwrap() 187 | ]) 188 | ); 189 | }); 190 | } 191 | 192 | #[test] 193 | fn command_allowed_client_ids_two_env() { 194 | with_var( 195 | "CLIENT_ID", 196 | Some("711d5cf3-0cf0-4eb8-9eca-6f7f220638c0,bbaf4b61-344a-4a39-a19e-8caa0669b353"), 197 | || { 198 | let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]); 199 | assert_eq!( 200 | allowed(matches), 201 | Some(vec![ 202 | Uuid::parse_str("711d5cf3-0cf0-4eb8-9eca-6f7f220638c0").unwrap(), 203 | Uuid::parse_str("bbaf4b61-344a-4a39-a19e-8caa0669b353").unwrap() 204 | ]) 205 | ); 206 | }, 207 | ); 208 | } 209 | 210 | #[test] 211 | fn command_snapshot() { 212 | with_vars_unset(["SNAPSHOT_DAYS", "SNAPSHOT_VERSIONS"], || { 213 | let matches = command().get_matches_from([ 214 | "tss", 215 | "--listen", 216 | "localhost:8080", 217 | "--snapshot-days", 218 | "13", 219 | "--snapshot-versions", 220 | "20", 221 | ]); 222 | let server_config = server_config_from_matches(&matches); 223 | assert_eq!(server_config.snapshot_days, 13i64); 224 | assert_eq!(server_config.snapshot_versions, 20u32); 225 | }); 226 | } 227 | 228 | #[test] 229 | fn command_snapshot_env() { 230 | with_vars( 231 | [ 232 | ("SNAPSHOT_DAYS", Some("13")), 233 | ("SNAPSHOT_VERSIONS", Some("20")), 234 | ], 235 | || { 236 | let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]); 237 | let server_config = server_config_from_matches(&matches); 238 | assert_eq!(server_config.snapshot_days, 13i64); 239 | assert_eq!(server_config.snapshot_versions, 20u32); 240 | }, 241 | ); 242 | } 243 | 244 | #[test] 245 | fn command_create_clients_default() { 246 | with_var_unset("CREATE_CLIENTS", || { 247 | let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]); 248 | let server_config = web_config_from_matches(&matches); 249 | assert_eq!(server_config.create_clients, true); 250 | }); 251 | } 252 | 253 | #[test] 254 | fn command_create_clients_cmdline() { 255 | with_var_unset("CREATE_CLIENTS", || { 256 | let matches = command().get_matches_from([ 257 | "tss", 258 | "--listen", 259 | "localhost:8080", 260 | "--no-create-clients", 261 | ]); 262 | let server_config = web_config_from_matches(&matches); 263 | assert_eq!(server_config.create_clients, false); 264 | }); 265 | } 266 | 267 | #[test] 268 | fn command_create_clients_env_true() { 269 | with_vars([("CREATE_CLIENTS", Some("true"))], || { 270 | let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]); 271 | let server_config = web_config_from_matches(&matches); 272 | assert_eq!(server_config.create_clients, true); 273 | }); 274 | } 275 | 276 | #[test] 277 | fn command_create_clients_env_false() { 278 | with_vars([("CREATE_CLIENTS", Some("false"))], || { 279 | let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]); 280 | let server_config = web_config_from_matches(&matches); 281 | assert_eq!(server_config.create_clients, false); 282 | }); 283 | } 284 | 285 | #[actix_rt::test] 286 | async fn test_index_get() { 287 | let server = WebServer::new( 288 | ServerConfig::default(), 289 | WebConfig::default(), 290 | InMemoryStorage::new(), 291 | ); 292 | let app = App::new().configure(|sc| server.config(sc)); 293 | let app = actix_web::test::init_service(app).await; 294 | 295 | let req = actix_web::test::TestRequest::get().uri("/").to_request(); 296 | let resp = actix_web::test::call_service(&app, req).await; 297 | assert!(resp.status().is_success()); 298 | } 299 | } 300 | -------------------------------------------------------------------------------- /core/src/inmemory.rs: -------------------------------------------------------------------------------- 1 | use super::{Client, Snapshot, Storage, StorageTxn, Version}; 2 | use std::collections::HashMap; 3 | use std::sync::{Mutex, MutexGuard}; 4 | use uuid::Uuid; 5 | 6 | struct Inner { 7 | /// Clients, indexed by client_id 8 | clients: HashMap, 9 | 10 | /// Snapshot data, indexed by client id 11 | snapshots: HashMap>, 12 | 13 | /// Versions, indexed by (client_id, version_id) 14 | versions: HashMap<(Uuid, Uuid), Version>, 15 | 16 | /// Child versions, indexed by (client_id, parent_version_id) 17 | children: HashMap<(Uuid, Uuid), Uuid>, 18 | } 19 | 20 | /// In-memory storage for testing and experimentation. 21 | /// 22 | /// This is not for production use, but supports testing of sync server implementations. 23 | /// 24 | /// NOTE: this panics if changes were made in a transaction that is later dropped without being 25 | /// committed, as this likely represents a bug that should be exposed in tests. 26 | pub struct InMemoryStorage(Mutex); 27 | 28 | impl InMemoryStorage { 29 | #[allow(clippy::new_without_default)] 30 | pub fn new() -> Self { 31 | Self(Mutex::new(Inner { 32 | clients: HashMap::new(), 33 | snapshots: HashMap::new(), 34 | versions: HashMap::new(), 35 | children: HashMap::new(), 36 | })) 37 | } 38 | } 39 | 40 | struct InnerTxn<'a> { 41 | client_id: Uuid, 42 | guard: MutexGuard<'a, Inner>, 43 | written: bool, 44 | committed: bool, 45 | } 46 | 47 | #[async_trait::async_trait] 48 | impl Storage for InMemoryStorage { 49 | async fn txn(&self, client_id: Uuid) -> anyhow::Result> { 50 | Ok(Box::new(InnerTxn { 51 | client_id, 52 | guard: self.0.lock().expect("poisoned lock"), 53 | written: false, 54 | committed: false, 55 | })) 56 | } 57 | } 58 | 59 | #[async_trait::async_trait(?Send)] 60 | impl StorageTxn for InnerTxn<'_> { 61 | async fn get_client(&mut self) -> anyhow::Result> { 62 | Ok(self.guard.clients.get(&self.client_id).cloned()) 63 | } 64 | 65 | async fn new_client(&mut self, latest_version_id: Uuid) -> anyhow::Result<()> { 66 | if self.guard.clients.contains_key(&self.client_id) { 67 | return Err(anyhow::anyhow!("Client {} already exists", self.client_id)); 68 | } 69 | self.guard.clients.insert( 70 | self.client_id, 71 | Client { 72 | latest_version_id, 73 | snapshot: None, 74 | }, 75 | ); 76 | self.written = true; 77 | Ok(()) 78 | } 79 | 80 | async fn set_snapshot(&mut self, snapshot: Snapshot, data: Vec) -> anyhow::Result<()> { 81 | let client = self 82 | .guard 83 | .clients 84 | .get_mut(&self.client_id) 85 | .ok_or_else(|| anyhow::anyhow!("no such client"))?; 86 | client.snapshot = Some(snapshot); 87 | self.guard.snapshots.insert(self.client_id, data); 88 | self.written = true; 89 | Ok(()) 90 | } 91 | 92 | async fn get_snapshot_data(&mut self, version_id: Uuid) -> anyhow::Result>> { 93 | // sanity check 94 | let client = self.guard.clients.get(&self.client_id); 95 | let client = client.ok_or_else(|| anyhow::anyhow!("no such client"))?; 96 | if Some(&version_id) != client.snapshot.as_ref().map(|snap| &snap.version_id) { 97 | return Err(anyhow::anyhow!("unexpected snapshot_version_id")); 98 | } 99 | Ok(self.guard.snapshots.get(&self.client_id).cloned()) 100 | } 101 | 102 | async fn get_version_by_parent( 103 | &mut self, 104 | parent_version_id: Uuid, 105 | ) -> anyhow::Result> { 106 | if let Some(parent_version_id) = self 107 | .guard 108 | .children 109 | .get(&(self.client_id, parent_version_id)) 110 | { 111 | Ok(self 112 | .guard 113 | .versions 114 | .get(&(self.client_id, *parent_version_id)) 115 | .cloned()) 116 | } else { 117 | Ok(None) 118 | } 119 | } 120 | 121 | async fn get_version(&mut self, version_id: Uuid) -> anyhow::Result> { 122 | Ok(self 123 | .guard 124 | .versions 125 | .get(&(self.client_id, version_id)) 126 | .cloned()) 127 | } 128 | 129 | async fn add_version( 130 | &mut self, 131 | version_id: Uuid, 132 | parent_version_id: Uuid, 133 | history_segment: Vec, 134 | ) -> anyhow::Result<()> { 135 | let version = Version { 136 | version_id, 137 | parent_version_id, 138 | history_segment, 139 | }; 140 | 141 | if let Some(client) = self.guard.clients.get_mut(&self.client_id) { 142 | client.latest_version_id = version_id; 143 | if let Some(ref mut snap) = client.snapshot { 144 | snap.versions_since += 1; 145 | } 146 | } else { 147 | anyhow::bail!("Client {} does not exist", self.client_id); 148 | } 149 | 150 | if self 151 | .guard 152 | .children 153 | .insert((self.client_id, parent_version_id), version_id) 154 | .is_some() 155 | { 156 | anyhow::bail!( 157 | "Client {} already has a child for {}", 158 | self.client_id, 159 | parent_version_id 160 | ); 161 | } 162 | if self 163 | .guard 164 | .versions 165 | .insert((self.client_id, version_id), version) 166 | .is_some() 167 | { 168 | anyhow::bail!( 169 | "Client {} already has a version {}", 170 | self.client_id, 171 | version_id 172 | ); 173 | } 174 | 175 | self.written = true; 176 | Ok(()) 177 | } 178 | 179 | async fn commit(&mut self) -> anyhow::Result<()> { 180 | self.committed = true; 181 | Ok(()) 182 | } 183 | } 184 | 185 | impl Drop for InnerTxn<'_> { 186 | fn drop(&mut self) { 187 | if self.written && !self.committed { 188 | panic!("Uncommitted InMemoryStorage transaction dropped without commiting"); 189 | } 190 | } 191 | } 192 | 193 | #[cfg(test)] 194 | mod test { 195 | use super::*; 196 | use chrono::Utc; 197 | 198 | #[tokio::test] 199 | async fn test_get_client_empty() -> anyhow::Result<()> { 200 | let storage = InMemoryStorage::new(); 201 | let mut txn = storage.txn(Uuid::new_v4()).await?; 202 | let maybe_client = txn.get_client().await?; 203 | assert!(maybe_client.is_none()); 204 | Ok(()) 205 | } 206 | 207 | #[tokio::test] 208 | async fn test_client_storage() -> anyhow::Result<()> { 209 | let storage = InMemoryStorage::new(); 210 | let client_id = Uuid::new_v4(); 211 | let mut txn = storage.txn(client_id).await?; 212 | 213 | let latest_version_id = Uuid::new_v4(); 214 | txn.new_client(latest_version_id).await?; 215 | 216 | let client = txn.get_client().await?.unwrap(); 217 | assert_eq!(client.latest_version_id, latest_version_id); 218 | assert!(client.snapshot.is_none()); 219 | 220 | let latest_version_id = Uuid::new_v4(); 221 | txn.add_version(latest_version_id, Uuid::new_v4(), vec![1, 1]) 222 | .await?; 223 | 224 | let client = txn.get_client().await?.unwrap(); 225 | assert_eq!(client.latest_version_id, latest_version_id); 226 | assert!(client.snapshot.is_none()); 227 | 228 | let snap = Snapshot { 229 | version_id: Uuid::new_v4(), 230 | timestamp: Utc::now(), 231 | versions_since: 4, 232 | }; 233 | txn.set_snapshot(snap.clone(), vec![1, 2, 3]).await?; 234 | 235 | let client = txn.get_client().await?.unwrap(); 236 | assert_eq!(client.latest_version_id, latest_version_id); 237 | assert_eq!(client.snapshot.unwrap(), snap); 238 | 239 | txn.commit().await?; 240 | Ok(()) 241 | } 242 | 243 | #[tokio::test] 244 | async fn test_gvbp_empty() -> anyhow::Result<()> { 245 | let storage = InMemoryStorage::new(); 246 | let client_id = Uuid::new_v4(); 247 | let mut txn = storage.txn(client_id).await?; 248 | let maybe_version = txn.get_version_by_parent(Uuid::new_v4()).await?; 249 | assert!(maybe_version.is_none()); 250 | Ok(()) 251 | } 252 | 253 | #[tokio::test] 254 | async fn test_add_version_and_get_version() -> anyhow::Result<()> { 255 | let storage = InMemoryStorage::new(); 256 | let client_id = Uuid::new_v4(); 257 | let mut txn = storage.txn(client_id).await?; 258 | 259 | let version_id = Uuid::new_v4(); 260 | let parent_version_id = Uuid::new_v4(); 261 | let history_segment = b"abc".to_vec(); 262 | 263 | txn.new_client(parent_version_id).await?; 264 | txn.add_version(version_id, parent_version_id, history_segment.clone()) 265 | .await?; 266 | 267 | let expected = Version { 268 | version_id, 269 | parent_version_id, 270 | history_segment, 271 | }; 272 | 273 | let version = txn.get_version_by_parent(parent_version_id).await?.unwrap(); 274 | assert_eq!(version, expected); 275 | 276 | let version = txn.get_version(version_id).await?.unwrap(); 277 | assert_eq!(version, expected); 278 | 279 | txn.commit().await?; 280 | Ok(()) 281 | } 282 | 283 | #[tokio::test] 284 | async fn test_add_version_exists() -> anyhow::Result<()> { 285 | let storage = InMemoryStorage::new(); 286 | let client_id = Uuid::new_v4(); 287 | let mut txn = storage.txn(client_id).await?; 288 | 289 | let version_id = Uuid::new_v4(); 290 | let parent_version_id = Uuid::new_v4(); 291 | let history_segment = b"abc".to_vec(); 292 | 293 | txn.new_client(parent_version_id).await?; 294 | txn.add_version(version_id, parent_version_id, history_segment.clone()) 295 | .await?; 296 | assert!(txn 297 | .add_version(version_id, parent_version_id, history_segment.clone()) 298 | .await 299 | .is_err()); 300 | txn.commit().await?; 301 | Ok(()) 302 | } 303 | 304 | #[tokio::test] 305 | async fn test_snapshots() -> anyhow::Result<()> { 306 | let storage = InMemoryStorage::new(); 307 | let client_id = Uuid::new_v4(); 308 | let mut txn = storage.txn(client_id).await?; 309 | 310 | txn.new_client(Uuid::new_v4()).await?; 311 | assert!(txn.get_client().await?.unwrap().snapshot.is_none()); 312 | 313 | let snap = Snapshot { 314 | version_id: Uuid::new_v4(), 315 | timestamp: Utc::now(), 316 | versions_since: 3, 317 | }; 318 | txn.set_snapshot(snap.clone(), vec![9, 8, 9]).await?; 319 | 320 | assert_eq!( 321 | txn.get_snapshot_data(snap.version_id).await?.unwrap(), 322 | vec![9, 8, 9] 323 | ); 324 | assert_eq!(txn.get_client().await?.unwrap().snapshot, Some(snap)); 325 | 326 | let snap2 = Snapshot { 327 | version_id: Uuid::new_v4(), 328 | timestamp: Utc::now(), 329 | versions_since: 10, 330 | }; 331 | txn.set_snapshot(snap2.clone(), vec![0, 2, 4, 6]).await?; 332 | 333 | assert_eq!( 334 | txn.get_snapshot_data(snap2.version_id).await?.unwrap(), 335 | vec![0, 2, 4, 6] 336 | ); 337 | assert_eq!(txn.get_client().await?.unwrap().snapshot, Some(snap2)); 338 | 339 | // check that mismatched version is detected 340 | assert!(txn.get_snapshot_data(Uuid::new_v4()).await.is_err()); 341 | 342 | txn.commit().await?; 343 | Ok(()) 344 | } 345 | } 346 | -------------------------------------------------------------------------------- /server/src/api/add_version.rs: -------------------------------------------------------------------------------- 1 | use crate::api::{ 2 | failure_to_ise, server_error_to_actix, ServerState, HISTORY_SEGMENT_CONTENT_TYPE, 3 | PARENT_VERSION_ID_HEADER, SNAPSHOT_REQUEST_HEADER, VERSION_ID_HEADER, 4 | }; 5 | use actix_web::{error, post, web, HttpMessage, HttpRequest, HttpResponse, Result}; 6 | use futures::StreamExt; 7 | use std::sync::Arc; 8 | use taskchampion_sync_server_core::{ 9 | AddVersionResult, ServerError, SnapshotUrgency, VersionId, NIL_VERSION_ID, 10 | }; 11 | 12 | /// Max history segment size: 100MB 13 | const MAX_SIZE: usize = 100 * 1024 * 1024; 14 | 15 | /// Add a new version, after checking prerequisites. The history segment should be transmitted in 16 | /// the request entity body and must have content-type 17 | /// `application/vnd.taskchampion.history-segment`. The content can be encoded in any of the 18 | /// formats supported by actix-web. 19 | /// 20 | /// On success, the response is a 200 OK with the new version ID in the `X-Version-Id` header. If 21 | /// the version cannot be added due to a conflict, the response is a 409 CONFLICT with the expected 22 | /// parent version ID in the `X-Parent-Version-Id` header. 23 | /// 24 | /// If included, a snapshot request appears in the `X-Snapshot-Request` header with value 25 | /// `urgency=low` or `urgency=high`. 26 | /// 27 | /// Returns other 4xx or 5xx responses on other errors. 28 | #[post("/v1/client/add-version/{parent_version_id}")] 29 | pub(crate) async fn service( 30 | req: HttpRequest, 31 | server_state: web::Data>, 32 | path: web::Path, 33 | mut payload: web::Payload, 34 | ) -> Result { 35 | let parent_version_id = path.into_inner(); 36 | 37 | // check content-type 38 | if req.content_type() != HISTORY_SEGMENT_CONTENT_TYPE { 39 | return Err(error::ErrorBadRequest("Bad content-type")); 40 | } 41 | 42 | let client_id = server_state.client_id_header(&req)?; 43 | 44 | // read the body in its entirety 45 | let mut body = web::BytesMut::new(); 46 | while let Some(chunk) = payload.next().await { 47 | let chunk = chunk?; 48 | // limit max size of in-memory payload 49 | if (body.len() + chunk.len()) > MAX_SIZE { 50 | return Err(error::ErrorBadRequest("overflow")); 51 | } 52 | body.extend_from_slice(&chunk); 53 | } 54 | 55 | if body.is_empty() { 56 | return Err(error::ErrorBadRequest("Empty body")); 57 | } 58 | 59 | loop { 60 | return match server_state 61 | .server 62 | .add_version(client_id, parent_version_id, body.to_vec()) 63 | .await 64 | { 65 | Ok((AddVersionResult::Ok(version_id), snap_urgency)) => { 66 | let mut rb = HttpResponse::Ok(); 67 | rb.append_header((VERSION_ID_HEADER, version_id.to_string())); 68 | match snap_urgency { 69 | SnapshotUrgency::None => {} 70 | SnapshotUrgency::Low => { 71 | rb.append_header((SNAPSHOT_REQUEST_HEADER, "urgency=low")); 72 | } 73 | SnapshotUrgency::High => { 74 | rb.append_header((SNAPSHOT_REQUEST_HEADER, "urgency=high")); 75 | } 76 | }; 77 | Ok(rb.finish()) 78 | } 79 | Ok((AddVersionResult::ExpectedParentVersion(parent_version_id), _)) => { 80 | let mut rb = HttpResponse::Conflict(); 81 | rb.append_header((PARENT_VERSION_ID_HEADER, parent_version_id.to_string())); 82 | Ok(rb.finish()) 83 | } 84 | Err(ServerError::NoSuchClient) if server_state.web_config.create_clients => { 85 | // Create a new client and repeat the `add_version` call. 86 | let mut txn = server_state 87 | .server 88 | .txn(client_id) 89 | .await 90 | .map_err(server_error_to_actix)?; 91 | txn.new_client(NIL_VERSION_ID) 92 | .await 93 | .map_err(failure_to_ise)?; 94 | txn.commit().await.map_err(failure_to_ise)?; 95 | continue; 96 | } 97 | Err(e) => Err(server_error_to_actix(e)), 98 | }; 99 | } 100 | } 101 | 102 | #[cfg(test)] 103 | mod test { 104 | use crate::{ 105 | api::CLIENT_ID_HEADER, 106 | web::{WebConfig, WebServer}, 107 | }; 108 | use actix_web::{http::StatusCode, test, App}; 109 | use pretty_assertions::assert_eq; 110 | use taskchampion_sync_server_core::{InMemoryStorage, ServerConfig, Storage}; 111 | use uuid::Uuid; 112 | 113 | #[actix_rt::test] 114 | async fn test_success() { 115 | let client_id = Uuid::new_v4(); 116 | let version_id = Uuid::new_v4(); 117 | let parent_version_id = Uuid::new_v4(); 118 | let storage = InMemoryStorage::new(); 119 | 120 | // set up the storage contents.. 121 | { 122 | let mut txn = storage.txn(client_id).await.unwrap(); 123 | txn.new_client(Uuid::nil()).await.unwrap(); 124 | txn.commit().await.unwrap(); 125 | } 126 | 127 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 128 | let app = App::new().configure(|sc| server.config(sc)); 129 | let app = test::init_service(app).await; 130 | 131 | let uri = format!("/v1/client/add-version/{parent_version_id}"); 132 | let req = test::TestRequest::post() 133 | .uri(&uri) 134 | .append_header(( 135 | "Content-Type", 136 | "application/vnd.taskchampion.history-segment", 137 | )) 138 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 139 | .set_payload(b"abcd".to_vec()) 140 | .to_request(); 141 | let resp = test::call_service(&app, req).await; 142 | assert_eq!(resp.status(), StatusCode::OK); 143 | 144 | // the returned version ID is random, but let's check that it's not 145 | // the passed parent version ID, at least 146 | let new_version_id = resp.headers().get("X-Version-Id").unwrap(); 147 | assert!(new_version_id != &version_id.to_string()); 148 | 149 | // Shapshot should be requested, since there is no existing snapshot 150 | let snapshot_request = resp.headers().get("X-Snapshot-Request").unwrap(); 151 | assert_eq!(snapshot_request, "urgency=high"); 152 | 153 | assert_eq!(resp.headers().get("X-Parent-Version-Id"), None); 154 | } 155 | 156 | #[actix_rt::test] 157 | async fn test_auto_add_client() { 158 | let client_id = Uuid::new_v4(); 159 | let version_id = Uuid::new_v4(); 160 | let parent_version_id = Uuid::new_v4(); 161 | let server = WebServer::new( 162 | ServerConfig::default(), 163 | WebConfig::default(), 164 | InMemoryStorage::new(), 165 | ); 166 | let app = App::new().configure(|sc| server.config(sc)); 167 | let app = test::init_service(app).await; 168 | 169 | let uri = format!("/v1/client/add-version/{parent_version_id}"); 170 | let req = test::TestRequest::post() 171 | .uri(&uri) 172 | .append_header(( 173 | "Content-Type", 174 | "application/vnd.taskchampion.history-segment", 175 | )) 176 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 177 | .set_payload(b"abcd".to_vec()) 178 | .to_request(); 179 | let resp = test::call_service(&app, req).await; 180 | assert_eq!(resp.status(), StatusCode::OK); 181 | 182 | // the returned version ID is random, but let's check that it's not 183 | // the passed parent version ID, at least 184 | let new_version_id = resp.headers().get("X-Version-Id").unwrap(); 185 | let new_version_id = Uuid::parse_str(new_version_id.to_str().unwrap()).unwrap(); 186 | assert!(new_version_id != version_id); 187 | 188 | // Shapshot should be requested, since there is no existing snapshot 189 | let snapshot_request = resp.headers().get("X-Snapshot-Request").unwrap(); 190 | assert_eq!(snapshot_request, "urgency=high"); 191 | 192 | assert_eq!(resp.headers().get("X-Parent-Version-Id"), None); 193 | 194 | // Check that the client really was created 195 | { 196 | let mut txn = server.server_state.server.txn(client_id).await.unwrap(); 197 | let client = txn.get_client().await.unwrap().unwrap(); 198 | assert_eq!(client.latest_version_id, new_version_id); 199 | assert_eq!(client.snapshot, None); 200 | } 201 | } 202 | 203 | #[actix_rt::test] 204 | async fn test_auto_add_client_disabled() { 205 | let client_id = Uuid::new_v4(); 206 | let parent_version_id = Uuid::new_v4(); 207 | let server = WebServer::new( 208 | ServerConfig::default(), 209 | WebConfig { 210 | create_clients: false, 211 | ..WebConfig::default() 212 | }, 213 | InMemoryStorage::new(), 214 | ); 215 | let app = App::new().configure(|sc| server.config(sc)); 216 | let app = test::init_service(app).await; 217 | 218 | let uri = format!("/v1/client/add-version/{parent_version_id}"); 219 | let req = test::TestRequest::post() 220 | .uri(&uri) 221 | .append_header(( 222 | "Content-Type", 223 | "application/vnd.taskchampion.history-segment", 224 | )) 225 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 226 | .set_payload(b"abcd".to_vec()) 227 | .to_request(); 228 | let resp = test::call_service(&app, req).await; 229 | // Client is not added, and returns 404. 230 | assert_eq!(resp.status(), StatusCode::NOT_FOUND); 231 | } 232 | 233 | #[actix_rt::test] 234 | async fn test_conflict() { 235 | let client_id = Uuid::new_v4(); 236 | let version_id = Uuid::new_v4(); 237 | let parent_version_id = Uuid::new_v4(); 238 | let storage = InMemoryStorage::new(); 239 | 240 | // set up the storage contents.. 241 | { 242 | let mut txn = storage.txn(client_id).await.unwrap(); 243 | txn.new_client(version_id).await.unwrap(); 244 | txn.commit().await.unwrap(); 245 | } 246 | 247 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 248 | let app = App::new().configure(|sc| server.config(sc)); 249 | let app = test::init_service(app).await; 250 | 251 | let uri = format!("/v1/client/add-version/{parent_version_id}"); 252 | let req = test::TestRequest::post() 253 | .uri(&uri) 254 | .append_header(( 255 | "Content-Type", 256 | "application/vnd.taskchampion.history-segment", 257 | )) 258 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 259 | .set_payload(b"abcd".to_vec()) 260 | .to_request(); 261 | let resp = test::call_service(&app, req).await; 262 | assert_eq!(resp.status(), StatusCode::CONFLICT); 263 | assert_eq!(resp.headers().get("X-Version-Id"), None); 264 | assert_eq!( 265 | resp.headers().get("X-Parent-Version-Id").unwrap(), 266 | &version_id.to_string() 267 | ); 268 | } 269 | 270 | #[actix_rt::test] 271 | async fn test_bad_content_type() { 272 | let client_id = Uuid::new_v4(); 273 | let parent_version_id = Uuid::new_v4(); 274 | let storage = InMemoryStorage::new(); 275 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 276 | let app = App::new().configure(|sc| server.config(sc)); 277 | let app = test::init_service(app).await; 278 | 279 | let uri = format!("/v1/client/add-version/{parent_version_id}"); 280 | let req = test::TestRequest::post() 281 | .uri(&uri) 282 | .append_header(("Content-Type", "not/correct")) 283 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 284 | .set_payload(b"abcd".to_vec()) 285 | .to_request(); 286 | let resp = test::call_service(&app, req).await; 287 | assert_eq!(resp.status(), StatusCode::BAD_REQUEST); 288 | } 289 | 290 | #[actix_rt::test] 291 | async fn test_empty_body() { 292 | let client_id = Uuid::new_v4(); 293 | let parent_version_id = Uuid::new_v4(); 294 | let storage = InMemoryStorage::new(); 295 | let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage); 296 | let app = App::new().configure(|sc| server.config(sc)); 297 | let app = test::init_service(app).await; 298 | 299 | let uri = format!("/v1/client/add-version/{parent_version_id}"); 300 | let req = test::TestRequest::post() 301 | .uri(&uri) 302 | .append_header(( 303 | "Content-Type", 304 | "application/vnd.taskchampion.history-segment", 305 | )) 306 | .append_header((CLIENT_ID_HEADER, client_id.to_string())) 307 | .to_request(); 308 | let resp = test::call_service(&app, req).await; 309 | assert_eq!(resp.status(), StatusCode::BAD_REQUEST); 310 | } 311 | } 312 | -------------------------------------------------------------------------------- /sqlite/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! This crate implements a SQLite storage backend for the TaskChampion sync server. 2 | //! 3 | //! Use the [`SqliteStorage`] type as an implementation of the [`Storage`] trait. 4 | //! 5 | //! This crate is intended for small deployments of a sync server, supporting one or a small number 6 | //! of users. The schema for the database is considered an implementation detail. For more robust 7 | //! database support, consider `taskchampion-sync-server-storage-postgres`. 8 | 9 | use anyhow::Context; 10 | use chrono::{TimeZone, Utc}; 11 | use rusqlite::types::{FromSql, ToSql}; 12 | use rusqlite::{params, Connection, OptionalExtension}; 13 | use std::path::Path; 14 | use taskchampion_sync_server_core::{Client, Snapshot, Storage, StorageTxn, Version}; 15 | use uuid::Uuid; 16 | 17 | /// Newtype to allow implementing `FromSql` for foreign `uuid::Uuid` 18 | struct StoredUuid(Uuid); 19 | 20 | /// Conversion from Uuid stored as a string (rusqlite's uuid feature stores as binary blob) 21 | impl FromSql for StoredUuid { 22 | fn column_result(value: rusqlite::types::ValueRef<'_>) -> rusqlite::types::FromSqlResult { 23 | let u = Uuid::parse_str(value.as_str()?) 24 | .map_err(|_| rusqlite::types::FromSqlError::InvalidType)?; 25 | Ok(StoredUuid(u)) 26 | } 27 | } 28 | 29 | /// Store Uuid as string in database 30 | impl ToSql for StoredUuid { 31 | fn to_sql(&self) -> rusqlite::Result> { 32 | let s = self.0.to_string(); 33 | Ok(s.into()) 34 | } 35 | } 36 | 37 | /// An on-disk storage backend which uses SQLite. 38 | /// 39 | /// A new connection is opened for each transaction, and only one transaction may be active at a 40 | /// time; a second call to `txn` will block until the first transaction is dropped. 41 | pub struct SqliteStorage { 42 | db_file: std::path::PathBuf, 43 | } 44 | 45 | impl SqliteStorage { 46 | fn new_connection(&self) -> anyhow::Result { 47 | Ok(Connection::open(&self.db_file)?) 48 | } 49 | 50 | /// Create a new instance using a database at the given directory. 51 | /// 52 | /// The database will be stored in a file named `taskchampion-sync-server.sqlite3` in the given 53 | /// directory. The database will be created if it does not exist. 54 | pub fn new>(directory: P) -> anyhow::Result { 55 | std::fs::create_dir_all(&directory) 56 | .with_context(|| format!("Failed to create `{}`.", directory.as_ref().display()))?; 57 | let db_file = directory.as_ref().join("taskchampion-sync-server.sqlite3"); 58 | 59 | let o = SqliteStorage { db_file }; 60 | 61 | let con = o.new_connection()?; 62 | 63 | // Use the modern WAL mode. 64 | con.query_row("PRAGMA journal_mode=WAL", [], |_row| Ok(())) 65 | .context("Setting journal_mode=WAL")?; 66 | 67 | let queries = vec![ 68 | "CREATE TABLE IF NOT EXISTS clients ( 69 | client_id STRING PRIMARY KEY, 70 | latest_version_id STRING, 71 | snapshot_version_id STRING, 72 | versions_since_snapshot INTEGER, 73 | snapshot_timestamp INTEGER, 74 | snapshot BLOB);", 75 | "CREATE TABLE IF NOT EXISTS versions (version_id STRING PRIMARY KEY, client_id STRING, parent_version_id STRING, history_segment BLOB);", 76 | "CREATE INDEX IF NOT EXISTS versions_by_parent ON versions (parent_version_id);", 77 | ]; 78 | for q in queries { 79 | con.execute(q, []) 80 | .context("Error while creating SQLite tables")?; 81 | } 82 | 83 | Ok(o) 84 | } 85 | } 86 | 87 | #[async_trait::async_trait] 88 | impl Storage for SqliteStorage { 89 | async fn txn(&self, client_id: Uuid) -> anyhow::Result> { 90 | let con = self.new_connection()?; 91 | // Begin the transaction on this new connection. An IMMEDIATE connection is in 92 | // write (exclusive) mode from the start. 93 | con.execute("BEGIN IMMEDIATE", [])?; 94 | let txn = Txn { con, client_id }; 95 | Ok(Box::new(txn)) 96 | } 97 | } 98 | 99 | struct Txn { 100 | // SQLite only allows one concurrent transaction per connection, and rusqlite emulates 101 | // transactions by running `BEGIN ...` and `COMMIT` at appropriate times. So we will do 102 | // the same. 103 | con: Connection, 104 | client_id: Uuid, 105 | } 106 | 107 | impl Txn { 108 | /// Implementation for queries from the versions table 109 | fn get_version_impl( 110 | &mut self, 111 | query: &'static str, 112 | client_id: Uuid, 113 | version_id_arg: Uuid, 114 | ) -> anyhow::Result> { 115 | let r = self 116 | .con 117 | .query_row( 118 | query, 119 | params![&StoredUuid(version_id_arg), &StoredUuid(client_id)], 120 | |r| { 121 | let version_id: StoredUuid = r.get("version_id")?; 122 | let parent_version_id: StoredUuid = r.get("parent_version_id")?; 123 | 124 | Ok(Version { 125 | version_id: version_id.0, 126 | parent_version_id: parent_version_id.0, 127 | history_segment: r.get("history_segment")?, 128 | }) 129 | }, 130 | ) 131 | .optional() 132 | .context("Error getting version")?; 133 | Ok(r) 134 | } 135 | } 136 | 137 | #[async_trait::async_trait(?Send)] 138 | impl StorageTxn for Txn { 139 | async fn get_client(&mut self) -> anyhow::Result> { 140 | let result: Option = self 141 | .con 142 | .query_row( 143 | "SELECT 144 | latest_version_id, 145 | snapshot_timestamp, 146 | versions_since_snapshot, 147 | snapshot_version_id 148 | FROM clients 149 | WHERE client_id = ? 150 | LIMIT 1", 151 | [&StoredUuid(self.client_id)], 152 | |r| { 153 | let latest_version_id: StoredUuid = r.get(0)?; 154 | let snapshot_timestamp: Option = r.get(1)?; 155 | let versions_since_snapshot: Option = r.get(2)?; 156 | let snapshot_version_id: Option = r.get(3)?; 157 | 158 | // if all of the relevant fields are non-NULL, return a snapshot 159 | let snapshot = match ( 160 | snapshot_timestamp, 161 | versions_since_snapshot, 162 | snapshot_version_id, 163 | ) { 164 | (Some(ts), Some(vs), Some(v)) => Some(Snapshot { 165 | version_id: v.0, 166 | timestamp: Utc.timestamp_opt(ts, 0).unwrap(), 167 | versions_since: vs, 168 | }), 169 | _ => None, 170 | }; 171 | Ok(Client { 172 | latest_version_id: latest_version_id.0, 173 | snapshot, 174 | }) 175 | }, 176 | ) 177 | .optional() 178 | .context("Error getting client")?; 179 | 180 | Ok(result) 181 | } 182 | 183 | async fn new_client(&mut self, latest_version_id: Uuid) -> anyhow::Result<()> { 184 | self.con 185 | .execute( 186 | "INSERT INTO clients (client_id, latest_version_id) VALUES (?, ?)", 187 | params![&StoredUuid(self.client_id), &StoredUuid(latest_version_id)], 188 | ) 189 | .context("Error creating/updating client")?; 190 | Ok(()) 191 | } 192 | 193 | async fn set_snapshot(&mut self, snapshot: Snapshot, data: Vec) -> anyhow::Result<()> { 194 | self.con 195 | .execute( 196 | "UPDATE clients 197 | SET 198 | snapshot_version_id = ?, 199 | snapshot_timestamp = ?, 200 | versions_since_snapshot = ?, 201 | snapshot = ? 202 | WHERE client_id = ?", 203 | params![ 204 | &StoredUuid(snapshot.version_id), 205 | snapshot.timestamp.timestamp(), 206 | snapshot.versions_since, 207 | data, 208 | &StoredUuid(self.client_id), 209 | ], 210 | ) 211 | .context("Error creating/updating snapshot")?; 212 | Ok(()) 213 | } 214 | 215 | async fn get_snapshot_data(&mut self, version_id: Uuid) -> anyhow::Result>> { 216 | let r = self 217 | .con 218 | .query_row( 219 | "SELECT snapshot, snapshot_version_id FROM clients WHERE client_id = ?", 220 | params![&StoredUuid(self.client_id)], 221 | |r| { 222 | let v: StoredUuid = r.get("snapshot_version_id")?; 223 | let d: Vec = r.get("snapshot")?; 224 | Ok((v.0, d)) 225 | }, 226 | ) 227 | .optional() 228 | .context("Error getting snapshot")?; 229 | r.map(|(v, d)| { 230 | if v != version_id { 231 | return Err(anyhow::anyhow!("unexpected snapshot_version_id")); 232 | } 233 | 234 | Ok(d) 235 | }) 236 | .transpose() 237 | } 238 | 239 | async fn get_version_by_parent( 240 | &mut self, 241 | parent_version_id: Uuid, 242 | ) -> anyhow::Result> { 243 | self.get_version_impl( 244 | "SELECT version_id, parent_version_id, history_segment FROM versions WHERE parent_version_id = ? AND client_id = ?", 245 | self.client_id, 246 | parent_version_id) 247 | } 248 | 249 | async fn get_version(&mut self, version_id: Uuid) -> anyhow::Result> { 250 | self.get_version_impl( 251 | "SELECT version_id, parent_version_id, history_segment FROM versions WHERE version_id = ? AND client_id = ?", 252 | self.client_id, 253 | version_id) 254 | } 255 | 256 | async fn add_version( 257 | &mut self, 258 | version_id: Uuid, 259 | parent_version_id: Uuid, 260 | history_segment: Vec, 261 | ) -> anyhow::Result<()> { 262 | self.con.execute( 263 | "INSERT INTO versions (version_id, client_id, parent_version_id, history_segment) VALUES(?, ?, ?, ?)", 264 | params![ 265 | StoredUuid(version_id), 266 | StoredUuid(self.client_id), 267 | StoredUuid(parent_version_id), 268 | history_segment 269 | ] 270 | ) 271 | .context("Error adding version")?; 272 | let rows_changed = self 273 | .con 274 | .execute( 275 | "UPDATE clients 276 | SET 277 | latest_version_id = ?, 278 | versions_since_snapshot = versions_since_snapshot + 1 279 | WHERE client_id = ? and (latest_version_id = ? or latest_version_id = ?)", 280 | params![ 281 | StoredUuid(version_id), 282 | StoredUuid(self.client_id), 283 | StoredUuid(parent_version_id), 284 | StoredUuid(Uuid::nil()) 285 | ], 286 | ) 287 | .context("Error updating client for new version")?; 288 | 289 | if rows_changed == 0 { 290 | anyhow::bail!("clients.latest_version_id does not match parent_version_id"); 291 | } 292 | 293 | Ok(()) 294 | } 295 | 296 | async fn commit(&mut self) -> anyhow::Result<()> { 297 | self.con.execute("COMMIT", [])?; 298 | Ok(()) 299 | } 300 | } 301 | 302 | #[cfg(test)] 303 | mod test { 304 | use super::*; 305 | use chrono::DateTime; 306 | use pretty_assertions::assert_eq; 307 | use tempfile::TempDir; 308 | 309 | #[tokio::test] 310 | async fn test_emtpy_dir() -> anyhow::Result<()> { 311 | let tmp_dir = TempDir::new()?; 312 | let non_existant = tmp_dir.path().join("subdir"); 313 | let storage = SqliteStorage::new(non_existant)?; 314 | let client_id = Uuid::new_v4(); 315 | let mut txn = storage.txn(client_id).await?; 316 | let maybe_client = txn.get_client().await?; 317 | assert!(maybe_client.is_none()); 318 | Ok(()) 319 | } 320 | 321 | #[tokio::test] 322 | async fn test_get_client_empty() -> anyhow::Result<()> { 323 | let tmp_dir = TempDir::new()?; 324 | let storage = SqliteStorage::new(tmp_dir.path())?; 325 | let client_id = Uuid::new_v4(); 326 | let mut txn = storage.txn(client_id).await?; 327 | let maybe_client = txn.get_client().await?; 328 | assert!(maybe_client.is_none()); 329 | Ok(()) 330 | } 331 | 332 | #[tokio::test] 333 | async fn test_client_storage() -> anyhow::Result<()> { 334 | let tmp_dir = TempDir::new()?; 335 | let storage = SqliteStorage::new(tmp_dir.path())?; 336 | let client_id = Uuid::new_v4(); 337 | let mut txn = storage.txn(client_id).await?; 338 | 339 | let latest_version_id = Uuid::new_v4(); 340 | txn.new_client(latest_version_id).await?; 341 | 342 | let client = txn.get_client().await?.unwrap(); 343 | assert_eq!(client.latest_version_id, latest_version_id); 344 | assert!(client.snapshot.is_none()); 345 | 346 | let new_version_id = Uuid::new_v4(); 347 | txn.add_version(new_version_id, latest_version_id, vec![1, 1]) 348 | .await?; 349 | 350 | let client = txn.get_client().await?.unwrap(); 351 | assert_eq!(client.latest_version_id, new_version_id); 352 | assert!(client.snapshot.is_none()); 353 | 354 | let snap = Snapshot { 355 | version_id: Uuid::new_v4(), 356 | timestamp: "2014-11-28T12:00:09Z".parse::>().unwrap(), 357 | versions_since: 4, 358 | }; 359 | txn.set_snapshot(snap.clone(), vec![1, 2, 3]).await?; 360 | 361 | let client = txn.get_client().await?.unwrap(); 362 | assert_eq!(client.latest_version_id, new_version_id); 363 | assert_eq!(client.snapshot.unwrap(), snap); 364 | 365 | Ok(()) 366 | } 367 | 368 | #[tokio::test] 369 | async fn test_gvbp_empty() -> anyhow::Result<()> { 370 | let tmp_dir = TempDir::new()?; 371 | let storage = SqliteStorage::new(tmp_dir.path())?; 372 | let client_id = Uuid::new_v4(); 373 | let mut txn = storage.txn(client_id).await?; 374 | let maybe_version = txn.get_version_by_parent(Uuid::new_v4()).await?; 375 | assert!(maybe_version.is_none()); 376 | Ok(()) 377 | } 378 | 379 | #[tokio::test] 380 | async fn test_add_version_and_get_version() -> anyhow::Result<()> { 381 | let tmp_dir = TempDir::new()?; 382 | let storage = SqliteStorage::new(tmp_dir.path())?; 383 | let client_id = Uuid::new_v4(); 384 | let mut txn = storage.txn(client_id).await?; 385 | 386 | let parent_version_id = Uuid::new_v4(); 387 | txn.new_client(parent_version_id).await?; 388 | 389 | let version_id = Uuid::new_v4(); 390 | let history_segment = b"abc".to_vec(); 391 | txn.add_version(version_id, parent_version_id, history_segment.clone()) 392 | .await?; 393 | 394 | let expected = Version { 395 | version_id, 396 | parent_version_id, 397 | history_segment, 398 | }; 399 | 400 | let version = txn.get_version_by_parent(parent_version_id).await?.unwrap(); 401 | assert_eq!(version, expected); 402 | 403 | let version = txn.get_version(version_id).await?.unwrap(); 404 | assert_eq!(version, expected); 405 | 406 | Ok(()) 407 | } 408 | 409 | #[tokio::test] 410 | async fn test_add_version_exists() -> anyhow::Result<()> { 411 | let tmp_dir = TempDir::new()?; 412 | let storage = SqliteStorage::new(tmp_dir.path())?; 413 | let client_id = Uuid::new_v4(); 414 | let mut txn = storage.txn(client_id).await?; 415 | 416 | let parent_version_id = Uuid::new_v4(); 417 | txn.new_client(parent_version_id).await?; 418 | 419 | let version_id = Uuid::new_v4(); 420 | let history_segment = b"abc".to_vec(); 421 | txn.add_version(version_id, parent_version_id, history_segment.clone()) 422 | .await?; 423 | // Fails because the version already exists. 424 | assert!(txn 425 | .add_version(version_id, parent_version_id, history_segment.clone()) 426 | .await 427 | .is_err()); 428 | Ok(()) 429 | } 430 | 431 | #[tokio::test] 432 | async fn test_add_version_mismatch() -> anyhow::Result<()> { 433 | let tmp_dir = TempDir::new()?; 434 | let storage = SqliteStorage::new(tmp_dir.path())?; 435 | let client_id = Uuid::new_v4(); 436 | let mut txn = storage.txn(client_id).await?; 437 | 438 | let latest_version_id = Uuid::new_v4(); 439 | txn.new_client(latest_version_id).await?; 440 | 441 | let version_id = Uuid::new_v4(); 442 | let parent_version_id = Uuid::new_v4(); // != latest_version_id 443 | let history_segment = b"abc".to_vec(); 444 | // Fails because the latest_version_id is not parent_version_id. 445 | assert!(txn 446 | .add_version(version_id, parent_version_id, history_segment.clone()) 447 | .await 448 | .is_err()); 449 | Ok(()) 450 | } 451 | 452 | #[tokio::test] 453 | async fn test_snapshots() -> anyhow::Result<()> { 454 | let tmp_dir = TempDir::new()?; 455 | let storage = SqliteStorage::new(tmp_dir.path())?; 456 | let client_id = Uuid::new_v4(); 457 | let mut txn = storage.txn(client_id).await?; 458 | 459 | txn.new_client(Uuid::new_v4()).await?; 460 | assert!(txn.get_client().await?.unwrap().snapshot.is_none()); 461 | 462 | let snap = Snapshot { 463 | version_id: Uuid::new_v4(), 464 | timestamp: "2013-10-08T12:00:09Z".parse::>().unwrap(), 465 | versions_since: 3, 466 | }; 467 | txn.set_snapshot(snap.clone(), vec![9, 8, 9]).await?; 468 | 469 | assert_eq!( 470 | txn.get_snapshot_data(snap.version_id).await?.unwrap(), 471 | vec![9, 8, 9] 472 | ); 473 | assert_eq!(txn.get_client().await?.unwrap().snapshot, Some(snap)); 474 | 475 | let snap2 = Snapshot { 476 | version_id: Uuid::new_v4(), 477 | timestamp: "2014-11-28T12:00:09Z".parse::>().unwrap(), 478 | versions_since: 10, 479 | }; 480 | txn.set_snapshot(snap2.clone(), vec![0, 2, 4, 6]).await?; 481 | 482 | assert_eq!( 483 | txn.get_snapshot_data(snap2.version_id).await?.unwrap(), 484 | vec![0, 2, 4, 6] 485 | ); 486 | assert_eq!(txn.get_client().await?.unwrap().snapshot, Some(snap2)); 487 | 488 | // check that mismatched version is detected 489 | assert!(txn.get_snapshot_data(Uuid::new_v4()).await.is_err()); 490 | 491 | Ok(()) 492 | } 493 | 494 | #[tokio::test] 495 | /// When an add_version call specifies a `parent_version_id` that does not exist in the 496 | /// DB, but no other versions exist, the call succeeds. 497 | async fn test_add_version_no_history() -> anyhow::Result<()> { 498 | let tmp_dir = TempDir::new()?; 499 | let storage = SqliteStorage::new(tmp_dir.path())?; 500 | let client_id = Uuid::new_v4(); 501 | let mut txn = storage.txn(client_id).await?; 502 | txn.new_client(Uuid::nil()).await?; 503 | 504 | let version_id = Uuid::new_v4(); 505 | let parent_version_id = Uuid::new_v4(); 506 | txn.add_version(version_id, parent_version_id, b"v1".to_vec()) 507 | .await?; 508 | Ok(()) 509 | } 510 | } 511 | -------------------------------------------------------------------------------- /postgres/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! This crate implements a Postgres storage backend for the TaskChampion sync server. 2 | //! 3 | //! Use the [`PostgresStorage`] type as an implementation of the [`Storage`] trait. 4 | //! 5 | //! This implementation is tested with Postgres version 17 but should work with any recent version. 6 | //! 7 | //! ## Schema Setup 8 | //! 9 | //! The database identified by the connection string must already exist and be set up with the 10 | //! following schema (also available in `postgres/schema.sql` in the repository): 11 | //! 12 | //! ```sql 13 | #![doc=include_str!("../schema.sql")] 14 | //! ``` 15 | //! 16 | //! ## Integration with External Applications 17 | //! 18 | //! The schema is stable, and any changes to the schema will be made in a major version with 19 | //! migration instructions provided. 20 | //! 21 | //! An external application may: 22 | //! - Add additional tables to the database 23 | //! - Add additional columns to the `clients` table. If those columns do not have default 24 | //! values, calls to `Txn::new_client` will fail. It is possible to configure 25 | //! `taskchampion-sync-server` to never call this method. 26 | //! - Insert rows into the `clients` table, using default values for all columns except 27 | //! `client_id` and application-specific columns. 28 | //! - Delete rows from the `clients` table, noting that any associated task data 29 | //! is also deleted. 30 | 31 | use anyhow::Context; 32 | use bb8::PooledConnection; 33 | use bb8_postgres::PostgresConnectionManager; 34 | use chrono::{TimeZone, Utc}; 35 | use postgres_native_tls::MakeTlsConnector; 36 | use taskchampion_sync_server_core::{Client, Snapshot, Storage, StorageTxn, Version}; 37 | use uuid::Uuid; 38 | 39 | #[cfg(test)] 40 | mod testing; 41 | 42 | /// An `ErrorSink` implementation that logs errors to the Rust log. 43 | #[derive(Debug, Clone, Copy)] 44 | pub struct LogErrorSink; 45 | 46 | impl LogErrorSink { 47 | fn new() -> Box { 48 | Box::new(Self) 49 | } 50 | } 51 | 52 | impl bb8::ErrorSink for LogErrorSink { 53 | fn sink(&self, e: tokio_postgres::Error) { 54 | log::error!("Postgres connection error: {e}"); 55 | } 56 | 57 | fn boxed_clone(&self) -> Box> { 58 | Self::new() 59 | } 60 | } 61 | 62 | /// A storage backend which uses Postgres. 63 | pub struct PostgresStorage { 64 | pool: bb8::Pool>, 65 | } 66 | 67 | impl PostgresStorage { 68 | pub async fn new(connection_string: impl ToString) -> anyhow::Result { 69 | let connector = native_tls::TlsConnector::new()?; 70 | let connector = postgres_native_tls::MakeTlsConnector::new(connector); 71 | let manager = PostgresConnectionManager::new_from_stringlike(connection_string, connector)?; 72 | let pool = bb8::Pool::builder() 73 | .error_sink(LogErrorSink::new()) 74 | .build(manager) 75 | .await?; 76 | Ok(Self { pool }) 77 | } 78 | } 79 | 80 | #[async_trait::async_trait] 81 | impl Storage for PostgresStorage { 82 | async fn txn(&self, client_id: Uuid) -> anyhow::Result> { 83 | let db_client = self.pool.get_owned().await?; 84 | 85 | db_client 86 | .execute("BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE", &[]) 87 | .await?; 88 | 89 | Ok(Box::new(Txn { 90 | client_id, 91 | db_client: Some(db_client), 92 | })) 93 | } 94 | } 95 | 96 | struct Txn { 97 | client_id: Uuid, 98 | /// The DB client or, if `commit` has been called, None. This ensures queries aren't executed 99 | /// after commit, and also frees connections back to the pool as quickly as possible. 100 | db_client: Option>>, 101 | } 102 | 103 | impl Txn { 104 | /// Get the db_client, or panic if it is gone (after commit). 105 | fn db_client(&self) -> &tokio_postgres::Client { 106 | let Some(db_client) = &self.db_client else { 107 | panic!("Cannot use a postgres Txn after commit"); 108 | }; 109 | db_client 110 | } 111 | 112 | /// Implementation for queries from the versions table 113 | async fn get_version_impl( 114 | &mut self, 115 | query: &'static str, 116 | client_id: Uuid, 117 | version_id_arg: Uuid, 118 | ) -> anyhow::Result> { 119 | Ok(self 120 | .db_client() 121 | .query_opt(query, &[&version_id_arg, &client_id]) 122 | .await 123 | .context("error getting version")? 124 | .map(|r| Version { 125 | version_id: r.get(0), 126 | parent_version_id: r.get(1), 127 | history_segment: r.get("history_segment"), 128 | })) 129 | } 130 | } 131 | 132 | #[async_trait::async_trait(?Send)] 133 | impl StorageTxn for Txn { 134 | async fn get_client(&mut self) -> anyhow::Result> { 135 | Ok(self 136 | .db_client() 137 | .query_opt( 138 | "SELECT 139 | latest_version_id, 140 | snapshot_timestamp, 141 | versions_since_snapshot, 142 | snapshot_version_id 143 | FROM clients 144 | WHERE client_id = $1 145 | LIMIT 1", 146 | &[&self.client_id], 147 | ) 148 | .await 149 | .context("error getting client")? 150 | .map(|r| { 151 | let latest_version_id: Uuid = r.get(0); 152 | let snapshot_timestamp: Option = r.get(1); 153 | let versions_since_snapshot: Option = r.get(2); 154 | let snapshot_version_id: Option = r.get(3); 155 | 156 | // if all of the relevant fields are non-NULL, return a snapshot 157 | let snapshot = match ( 158 | snapshot_timestamp, 159 | versions_since_snapshot, 160 | snapshot_version_id, 161 | ) { 162 | (Some(ts), Some(vs), Some(v)) => Some(Snapshot { 163 | version_id: v, 164 | timestamp: Utc.timestamp_opt(ts, 0).unwrap(), 165 | versions_since: vs as u32, 166 | }), 167 | _ => None, 168 | }; 169 | Client { 170 | latest_version_id, 171 | snapshot, 172 | } 173 | })) 174 | } 175 | 176 | async fn new_client(&mut self, latest_version_id: Uuid) -> anyhow::Result<()> { 177 | self.db_client() 178 | .execute( 179 | "INSERT INTO clients (client_id, latest_version_id) VALUES ($1, $2)", 180 | &[&self.client_id, &latest_version_id], 181 | ) 182 | .await 183 | .context("error creating/updating client")?; 184 | Ok(()) 185 | } 186 | 187 | async fn set_snapshot(&mut self, snapshot: Snapshot, data: Vec) -> anyhow::Result<()> { 188 | let timestamp = snapshot.timestamp.timestamp(); 189 | self.db_client() 190 | .execute( 191 | "UPDATE clients 192 | SET snapshot_version_id = $1, 193 | versions_since_snapshot = $2, 194 | snapshot_timestamp = $3, 195 | snapshot = $4 196 | WHERE client_id = $5", 197 | &[ 198 | &snapshot.version_id, 199 | &(snapshot.versions_since as i32), 200 | ×tamp, 201 | &data, 202 | &self.client_id, 203 | ], 204 | ) 205 | .await 206 | .context("error setting snapshot")?; 207 | Ok(()) 208 | } 209 | 210 | async fn get_snapshot_data(&mut self, version_id: Uuid) -> anyhow::Result>> { 211 | Ok(self 212 | .db_client() 213 | .query_opt( 214 | "SELECT snapshot 215 | FROM clients 216 | WHERE client_id = $1 and snapshot_version_id = $2 217 | LIMIT 1", 218 | &[&self.client_id, &version_id], 219 | ) 220 | .await 221 | .context("error getting snapshot data")? 222 | .map(|r| r.get(0))) 223 | } 224 | 225 | async fn get_version_by_parent( 226 | &mut self, 227 | parent_version_id: Uuid, 228 | ) -> anyhow::Result> { 229 | self.get_version_impl( 230 | "SELECT version_id, parent_version_id, history_segment 231 | FROM versions 232 | WHERE parent_version_id = $1 AND client_id = $2", 233 | self.client_id, 234 | parent_version_id, 235 | ) 236 | .await 237 | } 238 | 239 | async fn get_version(&mut self, version_id: Uuid) -> anyhow::Result> { 240 | self.get_version_impl( 241 | "SELECT version_id, parent_version_id, history_segment 242 | FROM versions 243 | WHERE version_id = $1 AND client_id = $2", 244 | self.client_id, 245 | version_id, 246 | ) 247 | .await 248 | } 249 | 250 | async fn add_version( 251 | &mut self, 252 | version_id: Uuid, 253 | parent_version_id: Uuid, 254 | history_segment: Vec, 255 | ) -> anyhow::Result<()> { 256 | self.db_client() 257 | .execute( 258 | "INSERT INTO versions (version_id, client_id, parent_version_id, history_segment) 259 | VALUES ($1, $2, $3, $4)", 260 | &[ 261 | &version_id, 262 | &self.client_id, 263 | &parent_version_id, 264 | &history_segment, 265 | ], 266 | ) 267 | .await 268 | .context("error inserting new version")?; 269 | let rows_modified = self 270 | .db_client() 271 | .execute( 272 | "UPDATE clients 273 | SET latest_version_id = $1, 274 | versions_since_snapshot = versions_since_snapshot + 1 275 | WHERE client_id = $2 and (latest_version_id = $3 or latest_version_id = $4)", 276 | &[ 277 | &version_id, 278 | &self.client_id, 279 | &parent_version_id, 280 | &Uuid::nil(), 281 | ], 282 | ) 283 | .await 284 | .context("error updating latest_version_id")?; 285 | 286 | // If no rows were modified, this operation failed. 287 | if rows_modified == 0 { 288 | anyhow::bail!("clients.latest_version_id does not match parent_version_id"); 289 | } 290 | Ok(()) 291 | } 292 | 293 | async fn commit(&mut self) -> anyhow::Result<()> { 294 | self.db_client().execute("COMMIT", &[]).await?; 295 | self.db_client = None; 296 | Ok(()) 297 | } 298 | } 299 | 300 | #[cfg(test)] 301 | mod test { 302 | use super::*; 303 | use crate::testing::with_db; 304 | 305 | async fn make_client(db_client: &tokio_postgres::Client) -> anyhow::Result { 306 | let client_id = Uuid::new_v4(); 307 | db_client 308 | .execute("insert into clients (client_id) values ($1)", &[&client_id]) 309 | .await?; 310 | Ok(client_id) 311 | } 312 | 313 | async fn make_version( 314 | db_client: &tokio_postgres::Client, 315 | client_id: Uuid, 316 | parent_version_id: Uuid, 317 | history_segment: &[u8], 318 | ) -> anyhow::Result { 319 | let version_id = Uuid::new_v4(); 320 | db_client 321 | .execute( 322 | "insert into versions 323 | (version_id, client_id, parent_version_id, history_segment) 324 | values ($1, $2, $3, $4)", 325 | &[ 326 | &version_id, 327 | &client_id, 328 | &parent_version_id, 329 | &history_segment, 330 | ], 331 | ) 332 | .await?; 333 | Ok(version_id) 334 | } 335 | 336 | async fn set_client_latest_version_id( 337 | db_client: &tokio_postgres::Client, 338 | client_id: Uuid, 339 | latest_version_id: Uuid, 340 | ) -> anyhow::Result<()> { 341 | db_client 342 | .execute( 343 | "update clients set latest_version_id = $1 where client_id = $2", 344 | &[&latest_version_id, &client_id], 345 | ) 346 | .await?; 347 | Ok(()) 348 | } 349 | 350 | async fn set_client_snapshot( 351 | db_client: &tokio_postgres::Client, 352 | client_id: Uuid, 353 | snapshot_version_id: Uuid, 354 | versions_since_snapshot: u32, 355 | snapshot_timestamp: i64, 356 | snapshot: &[u8], 357 | ) -> anyhow::Result<()> { 358 | db_client 359 | .execute( 360 | " 361 | update clients 362 | set snapshot_version_id = $1, 363 | versions_since_snapshot = $2, 364 | snapshot_timestamp = $3, 365 | snapshot = $4 366 | where client_id = $5", 367 | &[ 368 | &snapshot_version_id, 369 | &(versions_since_snapshot as i32), 370 | &snapshot_timestamp, 371 | &snapshot, 372 | &client_id, 373 | ], 374 | ) 375 | .await?; 376 | Ok(()) 377 | } 378 | 379 | #[tokio::test] 380 | async fn test_get_client_none() -> anyhow::Result<()> { 381 | with_db(async |connection_string, _db_client| { 382 | let storage = PostgresStorage::new(connection_string).await?; 383 | let client_id = Uuid::new_v4(); 384 | let mut txn = storage.txn(client_id).await?; 385 | assert_eq!(txn.get_client().await?, None); 386 | Ok(()) 387 | }) 388 | .await 389 | } 390 | 391 | #[tokio::test] 392 | async fn test_get_client_exists_empty() -> anyhow::Result<()> { 393 | with_db(async |connection_string, db_client| { 394 | let storage = PostgresStorage::new(connection_string).await?; 395 | let client_id = make_client(&db_client).await?; 396 | let mut txn = storage.txn(client_id).await?; 397 | assert_eq!( 398 | txn.get_client().await?, 399 | Some(Client { 400 | latest_version_id: Uuid::nil(), 401 | snapshot: None 402 | }) 403 | ); 404 | Ok(()) 405 | }) 406 | .await 407 | } 408 | 409 | #[tokio::test] 410 | async fn test_get_client_exists_latest() -> anyhow::Result<()> { 411 | with_db(async |connection_string, db_client| { 412 | let storage = PostgresStorage::new(connection_string).await?; 413 | let client_id = make_client(&db_client).await?; 414 | let latest_version_id = Uuid::new_v4(); 415 | set_client_latest_version_id(&db_client, client_id, latest_version_id).await?; 416 | let mut txn = storage.txn(client_id).await?; 417 | assert_eq!( 418 | txn.get_client().await?, 419 | Some(Client { 420 | latest_version_id, 421 | snapshot: None 422 | }) 423 | ); 424 | Ok(()) 425 | }) 426 | .await 427 | } 428 | 429 | #[tokio::test] 430 | async fn test_get_client_exists_with_snapshot() -> anyhow::Result<()> { 431 | with_db(async |connection_string, db_client| { 432 | let storage = PostgresStorage::new(connection_string).await?; 433 | let client_id = make_client(&db_client).await?; 434 | let snapshot_version_id = Uuid::new_v4(); 435 | let versions_since_snapshot = 10; 436 | let snapshot_timestamp = 10000000; 437 | let snapshot = b"abcd"; 438 | set_client_snapshot( 439 | &db_client, 440 | client_id, 441 | snapshot_version_id, 442 | versions_since_snapshot, 443 | snapshot_timestamp, 444 | snapshot, 445 | ) 446 | .await?; 447 | let mut txn = storage.txn(client_id).await?; 448 | assert_eq!( 449 | txn.get_client().await?, 450 | Some(Client { 451 | latest_version_id: Uuid::nil(), 452 | snapshot: Some(Snapshot { 453 | version_id: snapshot_version_id, 454 | timestamp: Utc.timestamp_opt(snapshot_timestamp, 0).unwrap(), 455 | versions_since: versions_since_snapshot, 456 | }) 457 | }) 458 | ); 459 | Ok(()) 460 | }) 461 | .await 462 | } 463 | 464 | #[tokio::test] 465 | async fn test_new_client() -> anyhow::Result<()> { 466 | with_db(async |connection_string, _db_client| { 467 | let storage = PostgresStorage::new(connection_string).await?; 468 | let client_id = Uuid::new_v4(); 469 | let latest_version_id = Uuid::new_v4(); 470 | 471 | let mut txn1 = storage.txn(client_id).await?; 472 | txn1.new_client(latest_version_id).await?; 473 | 474 | // Client is not visible yet as txn1 is not committed. 475 | let mut txn2 = storage.txn(client_id).await?; 476 | assert_eq!(txn2.get_client().await?, None); 477 | 478 | txn1.commit().await?; 479 | 480 | // Client is now visible. 481 | let mut txn2 = storage.txn(client_id).await?; 482 | assert_eq!( 483 | txn2.get_client().await?, 484 | Some(Client { 485 | latest_version_id, 486 | snapshot: None 487 | }) 488 | ); 489 | 490 | Ok(()) 491 | }) 492 | .await 493 | } 494 | 495 | #[tokio::test] 496 | async fn test_set_snapshot() -> anyhow::Result<()> { 497 | with_db(async |connection_string, db_client| { 498 | let storage = PostgresStorage::new(connection_string).await?; 499 | let client_id = make_client(&db_client).await?; 500 | let mut txn = storage.txn(client_id).await?; 501 | let snapshot_version_id = Uuid::new_v4(); 502 | let versions_since_snapshot = 10; 503 | let snapshot_timestamp = 10000000; 504 | let snapshot = b"abcd"; 505 | 506 | txn.set_snapshot( 507 | Snapshot { 508 | version_id: snapshot_version_id, 509 | timestamp: Utc.timestamp_opt(snapshot_timestamp, 0).unwrap(), 510 | versions_since: versions_since_snapshot, 511 | }, 512 | snapshot.to_vec(), 513 | ) 514 | .await?; 515 | txn.commit().await?; 516 | 517 | txn = storage.txn(client_id).await?; 518 | assert_eq!( 519 | txn.get_client().await?, 520 | Some(Client { 521 | latest_version_id: Uuid::nil(), 522 | snapshot: Some(Snapshot { 523 | version_id: snapshot_version_id, 524 | timestamp: Utc.timestamp_opt(snapshot_timestamp, 0).unwrap(), 525 | versions_since: versions_since_snapshot, 526 | }) 527 | }) 528 | ); 529 | 530 | let row = db_client 531 | .query_one( 532 | "select snapshot from clients where client_id = $1", 533 | &[&client_id], 534 | ) 535 | .await?; 536 | assert_eq!(row.get::<_, &[u8]>(0), b"abcd"); 537 | 538 | Ok(()) 539 | }) 540 | .await 541 | } 542 | 543 | #[tokio::test] 544 | async fn test_get_snapshot_none() -> anyhow::Result<()> { 545 | with_db(async |connection_string, db_client| { 546 | let storage = PostgresStorage::new(connection_string).await?; 547 | let client_id = make_client(&db_client).await?; 548 | let mut txn = storage.txn(client_id).await?; 549 | assert_eq!(txn.get_snapshot_data(Uuid::new_v4()).await?, None); 550 | 551 | Ok(()) 552 | }) 553 | .await 554 | } 555 | 556 | #[tokio::test] 557 | async fn test_get_snapshot_mismatched_version() -> anyhow::Result<()> { 558 | with_db(async |connection_string, db_client| { 559 | let storage = PostgresStorage::new(connection_string).await?; 560 | let client_id = make_client(&db_client).await?; 561 | let mut txn = storage.txn(client_id).await?; 562 | 563 | let snapshot_version_id = Uuid::new_v4(); 564 | let versions_since_snapshot = 10; 565 | let snapshot_timestamp = 10000000; 566 | let snapshot = b"abcd"; 567 | txn.set_snapshot( 568 | Snapshot { 569 | version_id: snapshot_version_id, 570 | timestamp: Utc.timestamp_opt(snapshot_timestamp, 0).unwrap(), 571 | versions_since: versions_since_snapshot, 572 | }, 573 | snapshot.to_vec(), 574 | ) 575 | .await?; 576 | 577 | assert_eq!(txn.get_snapshot_data(Uuid::new_v4()).await?, None); 578 | 579 | Ok(()) 580 | }) 581 | .await 582 | } 583 | 584 | #[tokio::test] 585 | async fn test_get_version() -> anyhow::Result<()> { 586 | with_db(async |connection_string, db_client| { 587 | let storage = PostgresStorage::new(connection_string).await?; 588 | let client_id = make_client(&db_client).await?; 589 | let parent_version_id = Uuid::new_v4(); 590 | let version_id = make_version(&db_client, client_id, parent_version_id, b"v1").await?; 591 | 592 | let mut txn = storage.txn(client_id).await?; 593 | 594 | // Different parent doesn't exist. 595 | assert_eq!(txn.get_version_by_parent(Uuid::new_v4()).await?, None); 596 | 597 | // Different version doesn't exist. 598 | assert_eq!(txn.get_version(Uuid::new_v4()).await?, None); 599 | 600 | let version = Version { 601 | version_id, 602 | parent_version_id, 603 | history_segment: b"v1".to_vec(), 604 | }; 605 | 606 | // Version found by parent. 607 | assert_eq!( 608 | txn.get_version_by_parent(parent_version_id).await?, 609 | Some(version.clone()) 610 | ); 611 | 612 | // Version found by ID. 613 | assert_eq!(txn.get_version(version_id).await?, Some(version)); 614 | 615 | Ok(()) 616 | }) 617 | .await 618 | } 619 | 620 | #[tokio::test] 621 | async fn test_add_version() -> anyhow::Result<()> { 622 | with_db(async |connection_string, db_client| { 623 | let storage = PostgresStorage::new(connection_string).await?; 624 | let client_id = make_client(&db_client).await?; 625 | let mut txn = storage.txn(client_id).await?; 626 | let version_id = Uuid::new_v4(); 627 | txn.add_version(version_id, Uuid::nil(), b"v1".to_vec()) 628 | .await?; 629 | assert_eq!( 630 | txn.get_version(version_id).await?, 631 | Some(Version { 632 | version_id, 633 | parent_version_id: Uuid::nil(), 634 | history_segment: b"v1".to_vec() 635 | }) 636 | ); 637 | Ok(()) 638 | }) 639 | .await 640 | } 641 | 642 | #[tokio::test] 643 | /// When an add_version call specifies an incorrect `parent_version_id, it fails. This is 644 | /// typically avoided by calling `get_client` beforehand, which (due to repeatable reads) 645 | /// allows the caller to check the `latest_version_id` before calling `add_version`. 646 | async fn test_add_version_mismatch() -> anyhow::Result<()> { 647 | with_db(async |connection_string, db_client| { 648 | let storage = PostgresStorage::new(connection_string).await?; 649 | let client_id = make_client(&db_client).await?; 650 | let latest_version_id = Uuid::new_v4(); 651 | set_client_latest_version_id(&db_client, client_id, latest_version_id).await?; 652 | 653 | let mut txn = storage.txn(client_id).await?; 654 | let version_id = Uuid::new_v4(); 655 | let parent_version_id = Uuid::new_v4(); // != latest_version_id 656 | let res = txn 657 | .add_version(version_id, parent_version_id, b"v1".to_vec()) 658 | .await; 659 | assert!(res.is_err()); 660 | Ok(()) 661 | }) 662 | .await 663 | } 664 | 665 | #[tokio::test] 666 | /// Adding versions to two different clients can proceed concurrently. 667 | async fn test_add_version_no_conflict_different_clients() -> anyhow::Result<()> { 668 | with_db(async |connection_string, db_client| { 669 | let storage = PostgresStorage::new(connection_string).await?; 670 | 671 | // Clients 1 and 2 do not interfere with each other; if these are the same client, then 672 | // this will deadlock as one transaction waits for the other. If the postgres storage 673 | // implementation serialized _all_ transactions across clients, that would limit its 674 | // scalability. 675 | // 676 | // So the asertion here is "does not deadlock". 677 | 678 | let client_id1 = make_client(&db_client).await?; 679 | let mut txn1 = storage.txn(client_id1).await?; 680 | let version_id1 = Uuid::new_v4(); 681 | txn1.add_version(version_id1, Uuid::nil(), b"v1".to_vec()) 682 | .await?; 683 | 684 | let client_id2 = make_client(&db_client).await?; 685 | let mut txn2 = storage.txn(client_id2).await?; 686 | let version_id2 = Uuid::new_v4(); 687 | txn2.add_version(version_id2, Uuid::nil(), b"v2".to_vec()) 688 | .await?; 689 | 690 | txn1.commit().await?; 691 | txn2.commit().await?; 692 | 693 | Ok(()) 694 | }) 695 | .await 696 | } 697 | 698 | #[tokio::test] 699 | /// When an add_version call specifies a `parent_version_id` that does not exist in the 700 | /// DB, but no other versions exist, the call succeeds. 701 | async fn test_add_version_no_history() -> anyhow::Result<()> { 702 | with_db(async |connection_string, db_client| { 703 | let storage = PostgresStorage::new(connection_string).await?; 704 | let client_id = make_client(&db_client).await?; 705 | 706 | let mut txn = storage.txn(client_id).await?; 707 | let version_id = Uuid::new_v4(); 708 | let parent_version_id = Uuid::new_v4(); 709 | txn.add_version(version_id, parent_version_id, b"v1".to_vec()) 710 | .await?; 711 | Ok(()) 712 | }) 713 | .await 714 | } 715 | } 716 | -------------------------------------------------------------------------------- /core/src/server.rs: -------------------------------------------------------------------------------- 1 | use crate::error::ServerError; 2 | use crate::storage::{Snapshot, Storage, StorageTxn}; 3 | use chrono::Utc; 4 | use uuid::Uuid; 5 | 6 | /// The distinguished value for "no version" 7 | pub const NIL_VERSION_ID: VersionId = Uuid::nil(); 8 | 9 | /// Number of versions to search back from the latest to find the 10 | /// version for a newly-added snapshot. Snapshots for versions older 11 | /// than this will be rejected. 12 | const SNAPSHOT_SEARCH_LEN: i32 = 5; 13 | 14 | pub type HistorySegment = Vec; 15 | pub type ClientId = Uuid; 16 | pub type VersionId = Uuid; 17 | 18 | /// ServerConfig contains configuration parameters for the server. 19 | pub struct ServerConfig { 20 | /// Target number of days between snapshots. 21 | pub snapshot_days: i64, 22 | 23 | /// Target number of versions between snapshots. 24 | pub snapshot_versions: u32, 25 | } 26 | 27 | impl Default for ServerConfig { 28 | fn default() -> Self { 29 | ServerConfig { 30 | snapshot_days: 14, 31 | snapshot_versions: 100, 32 | } 33 | } 34 | } 35 | 36 | /// Response to get_child_version. See the protocol documentation. 37 | #[derive(Clone, PartialEq, Debug)] 38 | pub enum GetVersionResult { 39 | NotFound, 40 | Gone, 41 | Success { 42 | version_id: Uuid, 43 | parent_version_id: Uuid, 44 | history_segment: HistorySegment, 45 | }, 46 | } 47 | 48 | /// Response to add_version 49 | #[derive(Clone, PartialEq, Debug)] 50 | pub enum AddVersionResult { 51 | /// OK, version added with the given ID 52 | Ok(VersionId), 53 | /// Rejected; expected a version with the given parent version 54 | ExpectedParentVersion(VersionId), 55 | } 56 | 57 | /// Urgency of a snapshot for a client; used to create the `X-Snapshot-Request` header. 58 | #[derive(PartialEq, Debug, Clone, Copy, Eq, PartialOrd, Ord)] 59 | pub enum SnapshotUrgency { 60 | /// Don't need a snapshot right now. 61 | None, 62 | 63 | /// A snapshot would be good, but can wait for other replicas to provide it. 64 | Low, 65 | 66 | /// A snapshot is needed right now. 67 | High, 68 | } 69 | 70 | impl SnapshotUrgency { 71 | /// Calculate the urgency for a snapshot based on its age in days 72 | fn for_days(config: &ServerConfig, days: i64) -> Self { 73 | if days >= config.snapshot_days * 3 / 2 { 74 | SnapshotUrgency::High 75 | } else if days >= config.snapshot_days { 76 | SnapshotUrgency::Low 77 | } else { 78 | SnapshotUrgency::None 79 | } 80 | } 81 | 82 | /// Calculate the urgency for a snapshot based on its age in versions 83 | fn for_versions_since(config: &ServerConfig, versions_since: u32) -> Self { 84 | if versions_since >= config.snapshot_versions * 3 / 2 { 85 | SnapshotUrgency::High 86 | } else if versions_since >= config.snapshot_versions { 87 | SnapshotUrgency::Low 88 | } else { 89 | SnapshotUrgency::None 90 | } 91 | } 92 | } 93 | 94 | /// A server implementing the TaskChampion sync protocol. 95 | pub struct Server { 96 | config: ServerConfig, 97 | storage: Box, 98 | } 99 | 100 | impl Server { 101 | pub fn new(config: ServerConfig, storage: ST) -> Self { 102 | Self { 103 | config, 104 | storage: Box::new(storage), 105 | } 106 | } 107 | 108 | /// Implementation of the GetChildVersion protocol transaction. 109 | pub async fn get_child_version( 110 | &self, 111 | client_id: ClientId, 112 | parent_version_id: VersionId, 113 | ) -> Result { 114 | let mut txn = self.txn(client_id).await?; 115 | let client = txn.get_client().await?.ok_or(ServerError::NoSuchClient)?; 116 | 117 | // If a version with parentVersionId equal to the requested parentVersionId exists, it is 118 | // returned. 119 | if let Some(version) = txn.get_version_by_parent(parent_version_id).await? { 120 | return Ok(GetVersionResult::Success { 121 | version_id: version.version_id, 122 | parent_version_id: version.parent_version_id, 123 | history_segment: version.history_segment, 124 | }); 125 | } 126 | 127 | // Return NotFound if an AddVersion with this parent_version_id would succeed, and 128 | // otherwise return Gone. 129 | // 130 | // AddVersion will succeed if either 131 | // - the requested parent version is the latest version; or 132 | // - there is no latest version, meaning there are no versions stored for this client 133 | Ok( 134 | if client.latest_version_id == parent_version_id 135 | || client.latest_version_id == NIL_VERSION_ID 136 | { 137 | GetVersionResult::NotFound 138 | } else { 139 | GetVersionResult::Gone 140 | }, 141 | ) 142 | } 143 | 144 | /// Implementation of the AddVersion protocol transaction 145 | pub async fn add_version( 146 | &self, 147 | client_id: ClientId, 148 | parent_version_id: VersionId, 149 | history_segment: HistorySegment, 150 | ) -> Result<(AddVersionResult, SnapshotUrgency), ServerError> { 151 | log::debug!("add_version(client_id: {client_id}, parent_version_id: {parent_version_id})"); 152 | 153 | let mut txn = self.txn(client_id).await?; 154 | let client = txn.get_client().await?.ok_or(ServerError::NoSuchClient)?; 155 | 156 | // check if this version is acceptable, under the protection of the transaction 157 | if client.latest_version_id != NIL_VERSION_ID 158 | && parent_version_id != client.latest_version_id 159 | { 160 | log::debug!("add_version request rejected: mismatched latest_version_id"); 161 | return Ok(( 162 | AddVersionResult::ExpectedParentVersion(client.latest_version_id), 163 | SnapshotUrgency::None, 164 | )); 165 | } 166 | 167 | // invent a version ID 168 | let version_id = Uuid::new_v4(); 169 | log::debug!("add_version request accepted: new version_id: {version_id}"); 170 | 171 | // update the DB 172 | txn.add_version(version_id, parent_version_id, history_segment) 173 | .await?; 174 | txn.commit().await?; 175 | 176 | // calculate the urgency 177 | let time_urgency = match client.snapshot { 178 | None => SnapshotUrgency::High, 179 | Some(Snapshot { timestamp, .. }) => { 180 | SnapshotUrgency::for_days(&self.config, (Utc::now() - timestamp).num_days()) 181 | } 182 | }; 183 | 184 | let version_urgency = match client.snapshot { 185 | None => SnapshotUrgency::High, 186 | Some(Snapshot { versions_since, .. }) => { 187 | SnapshotUrgency::for_versions_since(&self.config, versions_since) 188 | } 189 | }; 190 | 191 | Ok(( 192 | AddVersionResult::Ok(version_id), 193 | std::cmp::max(time_urgency, version_urgency), 194 | )) 195 | } 196 | 197 | /// Implementation of the AddSnapshot protocol transaction 198 | pub async fn add_snapshot( 199 | &self, 200 | client_id: ClientId, 201 | version_id: VersionId, 202 | data: Vec, 203 | ) -> Result<(), ServerError> { 204 | log::debug!("add_snapshot(client_id: {client_id}, version_id: {version_id})"); 205 | 206 | let mut txn = self.txn(client_id).await?; 207 | let client = txn.get_client().await?.ok_or(ServerError::NoSuchClient)?; 208 | 209 | // NOTE: if the snapshot is rejected, this function logs about it and returns 210 | // Ok(()), as there's no reason to report an errot to the client / user. 211 | 212 | let last_snapshot = client.snapshot.map(|snap| snap.version_id); 213 | if Some(version_id) == last_snapshot { 214 | log::debug!("rejecting snapshot for version {version_id}: already exists"); 215 | return Ok(()); 216 | } 217 | 218 | // look for this version in the history of this client, starting at the latest version, and 219 | // only iterating for a limited number of versions. 220 | let mut search_len = SNAPSHOT_SEARCH_LEN; 221 | let mut vid = client.latest_version_id; 222 | 223 | loop { 224 | if vid == version_id && version_id != NIL_VERSION_ID { 225 | // the new snapshot is for a recent version, so proceed 226 | break; 227 | } 228 | 229 | if Some(vid) == last_snapshot { 230 | // the new snapshot is older than the last snapshot, so ignore it 231 | log::debug!("rejecting snapshot for version {version_id}: newer snapshot already exists or no such version"); 232 | return Ok(()); 233 | } 234 | 235 | search_len -= 1; 236 | if search_len <= 0 || vid == NIL_VERSION_ID { 237 | // this should not happen in normal operation, so warn about it 238 | log::warn!("rejecting snapshot for version {version_id}: version is too old or no such version"); 239 | return Ok(()); 240 | } 241 | 242 | // get the parent version ID 243 | if let Some(parent) = txn.get_version(vid).await? { 244 | vid = parent.parent_version_id; 245 | } else { 246 | // this version does not exist; "this should not happen" but if it does, 247 | // we don't need a snapshot earlier than the missing version. 248 | log::warn!("rejecting snapshot for version {version_id}: newer versions have already been deleted"); 249 | return Ok(()); 250 | } 251 | } 252 | 253 | log::debug!("accepting snapshot for version {version_id}"); 254 | txn.set_snapshot( 255 | Snapshot { 256 | version_id, 257 | timestamp: Utc::now(), 258 | versions_since: 0, 259 | }, 260 | data, 261 | ) 262 | .await?; 263 | txn.commit().await?; 264 | Ok(()) 265 | } 266 | 267 | /// Implementation of the GetSnapshot protocol transaction 268 | pub async fn get_snapshot( 269 | &self, 270 | client_id: ClientId, 271 | ) -> Result)>, ServerError> { 272 | let mut txn = self.txn(client_id).await?; 273 | let client = txn.get_client().await?.ok_or(ServerError::NoSuchClient)?; 274 | 275 | Ok(if let Some(snap) = client.snapshot { 276 | txn.get_snapshot_data(snap.version_id) 277 | .await? 278 | .map(|data| (snap.version_id, data)) 279 | } else { 280 | None 281 | }) 282 | } 283 | 284 | /// Convenience method to get a transaction for the embedded storage. 285 | pub async fn txn(&self, client_id: Uuid) -> Result, ServerError> { 286 | Ok(self.storage.txn(client_id).await?) 287 | } 288 | } 289 | 290 | #[cfg(test)] 291 | mod test { 292 | use super::*; 293 | use crate::inmemory::InMemoryStorage; 294 | use crate::storage::{Snapshot, Storage}; 295 | use chrono::{Duration, TimeZone, Utc}; 296 | use pretty_assertions::assert_eq; 297 | 298 | /// Set up for a test, returning storage and a client_id. 299 | fn setup() -> (InMemoryStorage, Uuid) { 300 | let _ = env_logger::builder().is_test(true).try_init(); 301 | let storage = InMemoryStorage::new(); 302 | let client_id = Uuid::new_v4(); 303 | (storage, client_id) 304 | } 305 | 306 | /// Convert storage into a Server. 307 | fn into_server(storage: InMemoryStorage) -> Server { 308 | Server::new(ServerConfig::default(), storage) 309 | } 310 | 311 | /// Add versions to the DB for the given client. 312 | async fn add_versions( 313 | storage: &InMemoryStorage, 314 | client_id: Uuid, 315 | num_versions: u32, 316 | snapshot_version: Option, 317 | snapshot_days_ago: Option, 318 | ) -> anyhow::Result> { 319 | let mut txn = storage.txn(client_id).await?; 320 | let mut versions = vec![]; 321 | 322 | let mut version_id = Uuid::nil(); 323 | txn.new_client(Uuid::nil()).await?; 324 | assert!( 325 | num_versions < u8::MAX.into(), 326 | "we cast the version number to u8" 327 | ); 328 | for vnum in 0..num_versions { 329 | let parent_version_id = version_id; 330 | version_id = Uuid::new_v4(); 331 | versions.push(version_id); 332 | txn.add_version( 333 | version_id, 334 | parent_version_id, 335 | // Generate some unique data for this version. 336 | vec![0, 0, vnum as u8], 337 | ) 338 | .await?; 339 | if Some(vnum) == snapshot_version { 340 | txn.set_snapshot( 341 | Snapshot { 342 | version_id, 343 | versions_since: 0, 344 | timestamp: Utc::now() - Duration::days(snapshot_days_ago.unwrap_or(0)), 345 | }, 346 | // Generate some unique data for this snapshot. 347 | vec![vnum as u8], 348 | ) 349 | .await?; 350 | } 351 | } 352 | txn.commit().await?; 353 | Ok(versions) 354 | } 355 | 356 | /// Utility function to check the results of an add_version call 357 | async fn av_success_check( 358 | server: &Server, 359 | client_id: Uuid, 360 | existing_versions: &[Uuid], 361 | (add_version_result, snapshot_urgency): (AddVersionResult, SnapshotUrgency), 362 | expected_history: Vec, 363 | expected_urgency: SnapshotUrgency, 364 | ) -> anyhow::Result<()> { 365 | if let AddVersionResult::Ok(new_version_id) = add_version_result { 366 | // check that it invented a new version ID 367 | for v in existing_versions { 368 | assert_ne!(&new_version_id, v); 369 | } 370 | 371 | // verify that the storage was updated 372 | let mut txn = server.txn(client_id).await?; 373 | let client = txn.get_client().await?.unwrap(); 374 | assert_eq!(client.latest_version_id, new_version_id); 375 | 376 | let parent_version_id = existing_versions.last().cloned().unwrap_or_else(Uuid::nil); 377 | let version = txn.get_version(new_version_id).await?.unwrap(); 378 | assert_eq!(version.version_id, new_version_id); 379 | assert_eq!(version.parent_version_id, parent_version_id); 380 | assert_eq!(version.history_segment, expected_history); 381 | } else { 382 | panic!("did not get Ok from add_version: {add_version_result:?}"); 383 | } 384 | 385 | assert_eq!(snapshot_urgency, expected_urgency); 386 | 387 | Ok(()) 388 | } 389 | 390 | #[test] 391 | fn snapshot_urgency_max() { 392 | use SnapshotUrgency::*; 393 | assert_eq!(std::cmp::max(None, None), None); 394 | assert_eq!(std::cmp::max(None, Low), Low); 395 | assert_eq!(std::cmp::max(None, High), High); 396 | assert_eq!(std::cmp::max(Low, None), Low); 397 | assert_eq!(std::cmp::max(Low, Low), Low); 398 | assert_eq!(std::cmp::max(Low, High), High); 399 | assert_eq!(std::cmp::max(High, None), High); 400 | assert_eq!(std::cmp::max(High, Low), High); 401 | assert_eq!(std::cmp::max(High, High), High); 402 | } 403 | 404 | #[test] 405 | fn snapshot_urgency_for_days() { 406 | use SnapshotUrgency::*; 407 | let config = ServerConfig::default(); 408 | assert_eq!(SnapshotUrgency::for_days(&config, 0), None); 409 | assert_eq!( 410 | SnapshotUrgency::for_days(&config, config.snapshot_days), 411 | Low 412 | ); 413 | assert_eq!( 414 | SnapshotUrgency::for_days(&config, config.snapshot_days * 2), 415 | High 416 | ); 417 | } 418 | 419 | #[test] 420 | fn snapshot_urgency_for_versions_since() { 421 | use SnapshotUrgency::*; 422 | let config = ServerConfig::default(); 423 | assert_eq!(SnapshotUrgency::for_versions_since(&config, 0), None); 424 | assert_eq!( 425 | SnapshotUrgency::for_versions_since(&config, config.snapshot_versions), 426 | Low 427 | ); 428 | assert_eq!( 429 | SnapshotUrgency::for_versions_since(&config, config.snapshot_versions * 2), 430 | High 431 | ); 432 | } 433 | 434 | #[tokio::test] 435 | async fn get_child_version_not_found_initial_nil() -> anyhow::Result<()> { 436 | let (storage, client_id) = setup(); 437 | { 438 | let mut txn = storage.txn(client_id).await?; 439 | txn.new_client(NIL_VERSION_ID).await?; 440 | txn.commit().await?; 441 | } 442 | 443 | let server = into_server(storage); 444 | 445 | // when no latest version exists, the first version is NotFound 446 | assert_eq!( 447 | server.get_child_version(client_id, NIL_VERSION_ID).await?, 448 | GetVersionResult::NotFound 449 | ); 450 | Ok(()) 451 | } 452 | 453 | #[tokio::test] 454 | async fn get_child_version_not_found_initial_continuing() -> anyhow::Result<()> { 455 | let (storage, client_id) = setup(); 456 | { 457 | let mut txn = storage.txn(client_id).await?; 458 | txn.new_client(NIL_VERSION_ID).await?; 459 | txn.commit().await?; 460 | } 461 | 462 | let server = into_server(storage); 463 | 464 | // when no latest version exists, _any_ child version is NOT_FOUND. This allows syncs to 465 | // start to a new server even if the client already has been uploading to another service. 466 | assert_eq!( 467 | server.get_child_version(client_id, Uuid::new_v4(),).await?, 468 | GetVersionResult::NotFound 469 | ); 470 | Ok(()) 471 | } 472 | 473 | #[tokio::test] 474 | async fn get_child_version_not_found_up_to_date() -> anyhow::Result<()> { 475 | let (storage, client_id) = setup(); 476 | let parent_version_id = Uuid::new_v4(); 477 | { 478 | let mut txn = storage.txn(client_id).await?; 479 | // add a parent version, but not the requested child version 480 | txn.new_client(parent_version_id).await?; 481 | txn.add_version(parent_version_id, NIL_VERSION_ID, vec![]) 482 | .await?; 483 | txn.commit().await?; 484 | } 485 | 486 | let server = into_server(storage); 487 | assert_eq!( 488 | server 489 | .get_child_version(client_id, parent_version_id) 490 | .await?, 491 | GetVersionResult::NotFound 492 | ); 493 | Ok(()) 494 | } 495 | 496 | #[tokio::test] 497 | async fn get_child_version_gone_not_latest() -> anyhow::Result<()> { 498 | let (storage, client_id) = setup(); 499 | let parent_version_id = Uuid::new_v4(); 500 | { 501 | let mut txn = storage.txn(client_id).await?; 502 | // Add a parent version, but not the requested parent version 503 | txn.new_client(parent_version_id).await?; 504 | txn.add_version(parent_version_id, NIL_VERSION_ID, vec![]) 505 | .await?; 506 | txn.commit().await?; 507 | } 508 | 509 | let server = into_server(storage); 510 | assert_eq!( 511 | server.get_child_version(client_id, Uuid::new_v4(),).await?, 512 | GetVersionResult::Gone 513 | ); 514 | Ok(()) 515 | } 516 | 517 | #[tokio::test] 518 | async fn get_child_version_found() -> anyhow::Result<()> { 519 | let (storage, client_id) = setup(); 520 | let version_id = Uuid::new_v4(); 521 | let parent_version_id = Uuid::new_v4(); 522 | let history_segment = b"abcd".to_vec(); 523 | { 524 | let mut txn = storage.txn(client_id).await?; 525 | txn.new_client(version_id).await?; 526 | txn.add_version(version_id, parent_version_id, history_segment.clone()) 527 | .await?; 528 | txn.commit().await?; 529 | } 530 | 531 | let server = into_server(storage); 532 | assert_eq!( 533 | server 534 | .get_child_version(client_id, parent_version_id) 535 | .await?, 536 | GetVersionResult::Success { 537 | version_id, 538 | parent_version_id, 539 | history_segment, 540 | } 541 | ); 542 | Ok(()) 543 | } 544 | 545 | #[tokio::test] 546 | async fn add_version_conflict() -> anyhow::Result<()> { 547 | let (storage, client_id) = setup(); 548 | let versions = add_versions(&storage, client_id, 3, None, None).await?; 549 | 550 | // try to add a child of a version other than the latest 551 | let server = into_server(storage); 552 | assert_eq!( 553 | server 554 | .add_version(client_id, versions[1], vec![3, 6, 9]) 555 | .await? 556 | .0, 557 | AddVersionResult::ExpectedParentVersion(versions[2]) 558 | ); 559 | 560 | // verify that the storage wasn't updated 561 | let mut txn = server.txn(client_id).await?; 562 | assert_eq!( 563 | txn.get_client().await?.unwrap().latest_version_id, 564 | versions[2] 565 | ); 566 | assert_eq!(txn.get_version_by_parent(versions[2]).await?, None); 567 | 568 | Ok(()) 569 | } 570 | 571 | #[tokio::test] 572 | async fn add_version_with_existing_history() -> anyhow::Result<()> { 573 | let (storage, client_id) = setup(); 574 | let versions = add_versions(&storage, client_id, 1, None, None).await?; 575 | 576 | let server = into_server(storage); 577 | let result = server 578 | .add_version(client_id, versions[0], vec![3, 6, 9]) 579 | .await?; 580 | 581 | av_success_check( 582 | &server, 583 | client_id, 584 | &versions, 585 | result, 586 | vec![3, 6, 9], 587 | // urgency=high because there are no snapshots yet 588 | SnapshotUrgency::High, 589 | ) 590 | .await?; 591 | 592 | Ok(()) 593 | } 594 | 595 | #[tokio::test] 596 | async fn add_version_with_no_history() -> anyhow::Result<()> { 597 | let (storage, client_id) = setup(); 598 | let versions = add_versions(&storage, client_id, 0, None, None).await?; 599 | 600 | let server = into_server(storage); 601 | let parent_version_id = Uuid::nil(); 602 | let result = server 603 | .add_version(client_id, parent_version_id, vec![3, 6, 9]) 604 | .await?; 605 | 606 | av_success_check( 607 | &server, 608 | client_id, 609 | &versions, 610 | result, 611 | vec![3, 6, 9], 612 | // urgency=high because there are no snapshots yet 613 | SnapshotUrgency::High, 614 | ) 615 | .await?; 616 | 617 | Ok(()) 618 | } 619 | 620 | #[tokio::test] 621 | async fn add_version_success_recent_snapshot() -> anyhow::Result<()> { 622 | let (storage, client_id) = setup(); 623 | let versions = add_versions(&storage, client_id, 1, Some(0), None).await?; 624 | 625 | let server = into_server(storage); 626 | let result = server 627 | .add_version(client_id, versions[0], vec![1, 2, 3]) 628 | .await?; 629 | 630 | av_success_check( 631 | &server, 632 | client_id, 633 | &versions, 634 | result, 635 | vec![1, 2, 3], 636 | // no snapshot request since the previous version has a snapshot 637 | SnapshotUrgency::None, 638 | ) 639 | .await?; 640 | 641 | Ok(()) 642 | } 643 | 644 | #[tokio::test] 645 | async fn add_version_success_aged_snapshot() -> anyhow::Result<()> { 646 | // one snapshot, but it was 50 days ago 647 | let (storage, client_id) = setup(); 648 | let versions = add_versions(&storage, client_id, 1, Some(0), Some(50)).await?; 649 | 650 | let server = into_server(storage); 651 | let result = server 652 | .add_version(client_id, versions[0], vec![1, 2, 3]) 653 | .await?; 654 | 655 | av_success_check( 656 | &server, 657 | client_id, 658 | &versions, 659 | result, 660 | vec![1, 2, 3], 661 | // urgency=high due to days since the snapshot 662 | SnapshotUrgency::High, 663 | ) 664 | .await?; 665 | 666 | Ok(()) 667 | } 668 | 669 | #[tokio::test] 670 | async fn add_version_success_snapshot_many_versions_ago() -> anyhow::Result<()> { 671 | // one snapshot, but it was 50 versions ago 672 | let (storage, client_id) = setup(); 673 | let versions = add_versions(&storage, client_id, 50, Some(0), None).await?; 674 | 675 | let mut server = into_server(storage); 676 | server.config.snapshot_versions = 30; 677 | 678 | let result = server 679 | .add_version(client_id, versions[49], vec![1, 2, 3]) 680 | .await?; 681 | 682 | av_success_check( 683 | &server, 684 | client_id, 685 | &versions, 686 | result, 687 | vec![1, 2, 3], 688 | // urgency=high due to number of versions since the snapshot 689 | SnapshotUrgency::High, 690 | ) 691 | .await?; 692 | 693 | Ok(()) 694 | } 695 | 696 | #[tokio::test] 697 | async fn add_snapshot_success_latest() -> anyhow::Result<()> { 698 | let (storage, client_id) = setup(); 699 | let version_id = Uuid::new_v4(); 700 | 701 | { 702 | let mut txn = storage.txn(client_id).await?; 703 | // set up a task DB with one version in it 704 | txn.new_client(version_id).await?; 705 | txn.add_version(version_id, NIL_VERSION_ID, vec![]).await?; 706 | 707 | txn.commit().await?; 708 | } 709 | 710 | let server = into_server(storage); 711 | server 712 | .add_snapshot(client_id, version_id, vec![1, 2, 3]) 713 | .await?; 714 | 715 | // verify the snapshot 716 | let mut txn = server.txn(client_id).await?; 717 | let client = txn.get_client().await?.unwrap(); 718 | let snapshot = client.snapshot.unwrap(); 719 | assert_eq!(snapshot.version_id, version_id); 720 | assert_eq!(snapshot.versions_since, 0); 721 | assert_eq!( 722 | txn.get_snapshot_data(version_id).await.unwrap(), 723 | Some(vec![1, 2, 3]) 724 | ); 725 | 726 | Ok(()) 727 | } 728 | 729 | #[tokio::test] 730 | async fn add_snapshot_success_older() -> anyhow::Result<()> { 731 | let (storage, client_id) = setup(); 732 | let version_id_1 = Uuid::new_v4(); 733 | let version_id_2 = Uuid::new_v4(); 734 | 735 | { 736 | let mut txn = storage.txn(client_id).await?; 737 | // set up a task DB with two versions in it 738 | txn.new_client(version_id_2).await?; 739 | txn.add_version(version_id_1, NIL_VERSION_ID, vec![]) 740 | .await?; 741 | txn.add_version(version_id_2, version_id_1, vec![]).await?; 742 | 743 | txn.commit().await?; 744 | } 745 | 746 | // add a snapshot for version 1 747 | let server = into_server(storage); 748 | server 749 | .add_snapshot(client_id, version_id_1, vec![1, 2, 3]) 750 | .await?; 751 | 752 | // verify the snapshot 753 | let mut txn = server.txn(client_id).await?; 754 | let client = txn.get_client().await?.unwrap(); 755 | let snapshot = client.snapshot.unwrap(); 756 | assert_eq!(snapshot.version_id, version_id_1); 757 | assert_eq!(snapshot.versions_since, 0); 758 | assert_eq!( 759 | txn.get_snapshot_data(version_id_1).await.unwrap(), 760 | Some(vec![1, 2, 3]) 761 | ); 762 | 763 | Ok(()) 764 | } 765 | 766 | #[tokio::test] 767 | async fn add_snapshot_fails_no_such() -> anyhow::Result<()> { 768 | let (storage, client_id) = setup(); 769 | let version_id_1 = Uuid::new_v4(); 770 | let version_id_2 = Uuid::new_v4(); 771 | 772 | { 773 | let mut txn = storage.txn(client_id).await?; 774 | // set up a task DB with two versions in it 775 | txn.new_client(version_id_2).await?; 776 | txn.add_version(version_id_1, NIL_VERSION_ID, vec![]) 777 | .await?; 778 | txn.add_version(version_id_2, version_id_1, vec![]).await?; 779 | 780 | txn.commit().await?; 781 | } 782 | 783 | // add a snapshot for unknown version 784 | let server = into_server(storage); 785 | let version_id_unk = Uuid::new_v4(); 786 | server 787 | .add_snapshot(client_id, version_id_unk, vec![1, 2, 3]) 788 | .await?; 789 | 790 | // verify the snapshot does not exist 791 | let mut txn = server.txn(client_id).await?; 792 | let client = txn.get_client().await?.unwrap(); 793 | assert!(client.snapshot.is_none()); 794 | 795 | Ok(()) 796 | } 797 | 798 | #[tokio::test] 799 | async fn add_snapshot_fails_too_old() -> anyhow::Result<()> { 800 | let (storage, client_id) = setup(); 801 | let mut version_id = Uuid::new_v4(); 802 | let mut parent_version_id = Uuid::nil(); 803 | let mut version_ids = vec![]; 804 | 805 | { 806 | let mut txn = storage.txn(client_id).await?; 807 | // set up a task DB with 10 versions in it (oldest to newest) 808 | txn.new_client(Uuid::nil()).await?; 809 | for _ in 0..10 { 810 | txn.add_version(version_id, parent_version_id, vec![]) 811 | .await?; 812 | version_ids.push(version_id); 813 | parent_version_id = version_id; 814 | version_id = Uuid::new_v4(); 815 | } 816 | 817 | txn.commit().await?; 818 | } 819 | 820 | // add a snapshot for the earliest of those 821 | let server = into_server(storage); 822 | server 823 | .add_snapshot(client_id, version_ids[0], vec![1, 2, 3]) 824 | .await?; 825 | 826 | // verify the snapshot does not exist 827 | let mut txn = server.txn(client_id).await?; 828 | let client = txn.get_client().await?.unwrap(); 829 | assert!(client.snapshot.is_none()); 830 | 831 | Ok(()) 832 | } 833 | 834 | #[tokio::test] 835 | async fn add_snapshot_fails_newer_exists() -> anyhow::Result<()> { 836 | let (storage, client_id) = setup(); 837 | let mut version_id = Uuid::new_v4(); 838 | let mut parent_version_id = Uuid::nil(); 839 | let mut version_ids = vec![]; 840 | 841 | { 842 | let mut txn = storage.txn(client_id).await?; 843 | // set up a task DB with 5 versions in it (oldest to newest) and a snapshot of the 844 | // middle one 845 | txn.new_client(Uuid::nil()).await?; 846 | for _ in 0..5 { 847 | txn.add_version(version_id, parent_version_id, vec![]) 848 | .await?; 849 | version_ids.push(version_id); 850 | parent_version_id = version_id; 851 | version_id = Uuid::new_v4(); 852 | } 853 | txn.set_snapshot( 854 | Snapshot { 855 | version_id: version_ids[2], 856 | versions_since: 2, 857 | timestamp: Utc.with_ymd_and_hms(2001, 9, 9, 1, 46, 40).unwrap(), 858 | }, 859 | vec![1, 2, 3], 860 | ) 861 | .await?; 862 | 863 | txn.commit().await?; 864 | } 865 | 866 | // add a snapshot for the earliest of those 867 | let server = into_server(storage); 868 | server 869 | .add_snapshot(client_id, version_ids[0], vec![9, 9, 9]) 870 | .await?; 871 | 872 | // verify the snapshot was not replaced 873 | let mut txn = server.txn(client_id).await?; 874 | let client = txn.get_client().await?.unwrap(); 875 | let snapshot = client.snapshot.unwrap(); 876 | assert_eq!(snapshot.version_id, version_ids[2]); 877 | assert_eq!(snapshot.versions_since, 2); 878 | assert_eq!( 879 | txn.get_snapshot_data(version_ids[2]).await.unwrap(), 880 | Some(vec![1, 2, 3]) 881 | ); 882 | 883 | Ok(()) 884 | } 885 | 886 | #[tokio::test] 887 | async fn add_snapshot_fails_nil_version() -> anyhow::Result<()> { 888 | let (storage, client_id) = setup(); 889 | { 890 | let mut txn = storage.txn(client_id).await?; 891 | // just set up the client 892 | txn.new_client(NIL_VERSION_ID).await?; 893 | txn.commit().await?; 894 | } 895 | 896 | let server = into_server(storage); 897 | server 898 | .add_snapshot(client_id, NIL_VERSION_ID, vec![9, 9, 9]) 899 | .await?; 900 | 901 | // verify the snapshot does not exist 902 | let mut txn = server.txn(client_id).await?; 903 | let client = txn.get_client().await?.unwrap(); 904 | assert!(client.snapshot.is_none()); 905 | 906 | Ok(()) 907 | } 908 | 909 | #[tokio::test] 910 | async fn get_snapshot_found() -> anyhow::Result<()> { 911 | let (storage, client_id) = setup(); 912 | let data = vec![1, 2, 3]; 913 | let snapshot_version_id = Uuid::new_v4(); 914 | 915 | { 916 | let mut txn = storage.txn(client_id).await?; 917 | txn.new_client(snapshot_version_id).await?; 918 | txn.set_snapshot( 919 | Snapshot { 920 | version_id: snapshot_version_id, 921 | versions_since: 3, 922 | timestamp: Utc.with_ymd_and_hms(2001, 9, 9, 1, 46, 40).unwrap(), 923 | }, 924 | data.clone(), 925 | ) 926 | .await?; 927 | txn.commit().await?; 928 | } 929 | 930 | let server = into_server(storage); 931 | assert_eq!( 932 | server.get_snapshot(client_id).await?, 933 | Some((snapshot_version_id, data)) 934 | ); 935 | 936 | Ok(()) 937 | } 938 | 939 | #[tokio::test] 940 | async fn get_snapshot_not_found() -> anyhow::Result<()> { 941 | let (storage, client_id) = setup(); 942 | { 943 | let mut txn = storage.txn(client_id).await?; 944 | txn.new_client(NIL_VERSION_ID).await?; 945 | txn.commit().await?; 946 | } 947 | 948 | let server = into_server(storage); 949 | assert_eq!(server.get_snapshot(client_id).await?, None); 950 | 951 | Ok(()) 952 | } 953 | } 954 | --------------------------------------------------------------------------------