├── .codecov.yml ├── .github ├── dependabot.yml └── workflows │ ├── build.yml │ ├── dependabot-automerge.yml │ └── lint.yml ├── .gitignore ├── .gitmodules ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── aw-client-rust ├── .gitignore ├── Cargo.toml ├── README.md ├── src │ ├── blocking.rs │ └── lib.rs └── tests │ └── test.rs ├── aw-datastore ├── Cargo.toml ├── src │ ├── datastore.rs │ ├── legacy_import.rs │ ├── lib.rs │ └── worker.rs └── tests │ └── datastore.rs ├── aw-models ├── Cargo.toml ├── examples │ └── schema.rs └── src │ ├── bucket.rs │ ├── duration.rs │ ├── event.rs │ ├── export.rs │ ├── info.rs │ ├── lib.rs │ ├── query.rs │ ├── timeinterval.rs │ └── tryvec.rs ├── aw-query ├── Cargo.toml ├── benches │ └── benchmark.rs ├── src │ ├── ast.rs │ ├── datatype.rs │ ├── functions.rs │ ├── grammar.rs │ ├── interpret.rs │ ├── lexer.rs │ ├── lib.rs │ └── parser.rs └── tests │ └── query.rs ├── aw-server.service ├── aw-server ├── Cargo.toml ├── build.rs ├── src │ ├── android │ │ └── mod.rs │ ├── config.rs │ ├── device_id.rs │ ├── dirs.rs │ ├── endpoints │ │ ├── bucket.rs │ │ ├── cors.rs │ │ ├── export.rs │ │ ├── hostcheck.rs │ │ ├── import.rs │ │ ├── mod.rs │ │ ├── query.rs │ │ ├── settings.rs │ │ └── util.rs │ ├── lib.rs │ ├── logging.rs │ ├── macros.rs │ └── main.rs └── tests │ ├── api.rs │ └── macros.rs ├── aw-sync ├── Cargo.toml ├── README.md ├── src │ ├── accessmethod.rs │ ├── dirs.rs │ ├── lib.rs │ ├── main.rs │ ├── sync.rs │ ├── sync_wrapper.rs │ └── util.rs ├── test-server.sh ├── test-sync-pull.sh ├── test-sync-push.sh └── tests │ └── sync.rs ├── aw-transform ├── Cargo.toml ├── benches │ └── bench.rs └── src │ ├── chunk.rs │ ├── classify.rs │ ├── filter_keyvals.rs │ ├── filter_period.rs │ ├── find_bucket.rs │ ├── flood.rs │ ├── heartbeat.rs │ ├── lib.rs │ ├── merge.rs │ ├── period_union.rs │ ├── sort.rs │ ├── split_url.rs │ └── union_no_overlap.rs ├── compile-android.sh ├── install-ndk.sh ├── scripts └── create-cargo-config.sh └── todo.md /.codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: 4 | target: auto 5 | threshold: 1% 6 | patch: 7 | target: auto 8 | threshold: 10% 9 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "monthly" 7 | 8 | # Maintain submodule versions 9 | - package-ecosystem: "gitsubmodule" 10 | directory: "/" 11 | schedule: 12 | interval: "monthly" 13 | 14 | # Too spammy, easier to just manually update lockfile very now and then 15 | #- package-ecosystem: cargo 16 | # directory: "/" 17 | # schedule: 18 | # interval: monthly 19 | # open-pull-requests-limit: 10 20 | # ignore: 21 | # - dependency-name: libc 22 | # versions: 23 | # - 0.2.84 24 | # - 0.2.86 25 | # - dependency-name: serde_json 26 | # versions: 27 | # - 1.0.64 28 | # - dependency-name: reqwest 29 | # versions: 30 | # - 0.11.1 31 | # - dependency-name: rocket 32 | # versions: 33 | # - 0.4.7 34 | # - dependency-name: rocket_contrib 35 | # versions: 36 | # - 0.4.7 37 | # - dependency-name: regex 38 | # versions: 39 | # - 1.4.3 40 | # - dependency-name: uuid 41 | # versions: 42 | # - 0.8.2 43 | # - dependency-name: serde 44 | # versions: 45 | # - 1.0.123 46 | # - dependency-name: multipart 47 | # versions: 48 | # - 0.17.1 49 | # - dependency-name: log 50 | # versions: 51 | # - 0.4.14 52 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | name: ${{ matrix.os }} 12 | runs-on: ${{ matrix.os }} 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | os: [ubuntu-latest, windows-latest, macOS-latest] 17 | 18 | steps: 19 | - uses: actions/checkout@v3 20 | - name: Set up Rust 21 | id: toolchain 22 | uses: dtolnay/rust-toolchain@stable 23 | - name: Cache cargo build 24 | uses: actions/cache@v3 25 | if: runner.os != 'macOS' # cache doesn't seem to behave nicely on macOS, see: https://github.com/ActivityWatch/aw-server-rust/issues/180 26 | env: 27 | cache-name: cargo-build-target 28 | with: 29 | path: target 30 | # key needs to contain cachekey due to https://github.com/ActivityWatch/aw-server-rust/issues/180 31 | key: ${{ runner.os }}-${{ env.cache-name }}-${{ steps.toolchain.outputs.cachekey }}-${{ hashFiles('**/Cargo.lock') }} 32 | restore-keys: | 33 | ${{ runner.os }}-${{ env.cache-name }}-${{ steps.toolchain.outputs.cachekey }}- 34 | - name: Build 35 | run: cargo build --workspace --verbose 36 | - name: Run tests 37 | run: cargo test --workspace --verbose 38 | - uses: actions/upload-artifact@v4 39 | with: 40 | # TODO: These binaries are debug builds 41 | name: binaries-${{runner.os}} 42 | path: | 43 | target/*/aw-server 44 | target/*/aw-server.exe 45 | target/*/aw-sync 46 | target/*/aw-sync.exe 47 | 48 | build-android: 49 | name: Android 50 | runs-on: ubuntu-latest 51 | steps: 52 | - uses: actions/checkout@v3 53 | - name: Set up Rust 54 | uses: dtolnay/rust-toolchain@stable 55 | - name: Cache cargo build 56 | uses: actions/cache@v3 57 | env: 58 | cache-name: cargo-build-target-android 59 | with: 60 | path: target 61 | key: ${{ runner.os }}-${{ env.cache-name }}-${{ hashFiles('**/Cargo.lock') }} 62 | restore-keys: | 63 | ${{ runner.os }}-${{ env.cache-name }}- 64 | - name: Build 65 | run: | 66 | export ANDROID_NDK_HOME= # needed because GitHub Actions sets it by default... 67 | make android 68 | - uses: actions/upload-artifact@v4 69 | with: 70 | # TODO: These binaries are debug builds 71 | name: binaries-android 72 | path: | 73 | target/*/*/libaw_server.so 74 | 75 | # Code coverage using tarpaulin 76 | # Works better than grcov, but not as many fancy features (no branch coverage, no LLVM) 77 | # See: https://shift.click/blog/github-actions-rust/#code-coverage 78 | build-coverage-tarpaulin: 79 | name: Code coverage 80 | runs-on: ubuntu-latest 81 | steps: 82 | - uses: actions/checkout@v3 83 | - name: Set up Rust 84 | uses: dtolnay/rust-toolchain@stable 85 | 86 | # Note: If you need to combine the coverage info of multiple 87 | # feature sets, you need a `.tarpaulin.toml` config file, see 88 | # the link above for those docs. 89 | # NOTE: actions-rs is unmaintained, using fork with fix for update to node 16 90 | # https://github.com/actions-rs/tarpaulin/pull/22 91 | - name: Install OpenSSL 1.1 92 | run: | 93 | wget https://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb 94 | sudo dpkg -i libssl1.1_1.1.1f-1ubuntu2_amd64.deb 95 | 96 | - uses: FreeMasen/tarpaulin-action@9f7e03f06fea8f374c85a95c2ecff6a4d5805845 97 | with: 98 | version: "0.22.0" # not latest, due to error/bug in action (after release artifacts changed name?) 99 | 100 | # Note: closed-source code needs to provide a token, 101 | # but open source code does not. 102 | - name: Upload to codecov.io 103 | uses: codecov/codecov-action@v4 104 | 105 | # Code coverage using grcov 106 | #build-coverage-grcov: 107 | # name: Build with coverage 108 | # runs-on: ubuntu-latest 109 | 110 | # steps: 111 | # - uses: actions/checkout@v3 112 | # - name: Set up Rust nightly 113 | # uses: actions-rs/toolchain@v1 114 | # with: 115 | # profile: minimal 116 | # toolchain: nightly 117 | # override: true 118 | # - name: Cache cargo build 119 | # uses: actions/cache@v3 120 | # env: 121 | # cache-name: cargo-build-target-coverage 122 | # with: 123 | # path: target 124 | # key: ${{ runner.os }}-${{ env.cache-name }}-${{ hashFiles('**/Cargo.lock') }} 125 | # restore-keys: | 126 | # ${{ runner.os }}-${{ env.cache-name }}- 127 | # - name: Install llvm-tools 128 | # run: | 129 | # rustup component add llvm-tools-preview 130 | # - name: Download grcov 131 | # run: | 132 | # curl -L https://github.com/mozilla/grcov/releases/latest/download/grcov-x86_64-unknown-linux-gnu.tar.bz2 | tar jxf - 133 | # - name: Run tests with coverage 134 | # run: | 135 | # # Add cwd to path to find grcov 136 | # export PATH=$PATH:. 137 | # make coverage-lcov COVERAGE_CACHE=1 138 | # - name: Upload coverage files 139 | # run: bash <(curl -s https://codecov.io/bash) -f target/debug/lcov.info; 140 | -------------------------------------------------------------------------------- /.github/workflows/dependabot-automerge.yml: -------------------------------------------------------------------------------- 1 | name: Dependabot Auto-merge 2 | 3 | # NOTE: This workflow relies on a Personal Access Token from the @ActivityWatchBot user 4 | # See this issue for details: https://github.com/ridedott/merge-me-action/issues/1581 5 | 6 | on: 7 | workflow_run: 8 | types: 9 | - completed 10 | workflows: 11 | # List all required workflow names here. 12 | - Build 13 | 14 | permissions: 15 | contents: write 16 | pull-requests: read 17 | 18 | jobs: 19 | auto_merge: 20 | name: Auto-merge 21 | runs-on: ubuntu-latest 22 | if: github.event.workflow_run.conclusion == 'success' && github.actor == 'dependabot[bot]' 23 | 24 | steps: 25 | - uses: ridedott/merge-me-action@v2 26 | with: 27 | GITHUB_TOKEN: ${{ secrets.AWBOT_GH_TOKEN }} 28 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | format: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v3 15 | - name: Set up Rust nightly 16 | uses: actions-rs/toolchain@v1 17 | with: 18 | profile: minimal 19 | toolchain: nightly 20 | components: rustfmt 21 | override: true 22 | - name: Check formatting 23 | run: cargo fmt -- --check 24 | 25 | clippy: 26 | runs-on: ubuntu-latest 27 | 28 | steps: 29 | - uses: actions/checkout@v3 30 | - name: Set up Rust nightly 31 | uses: actions-rs/toolchain@v1 32 | with: 33 | profile: minimal 34 | toolchain: nightly 35 | components: clippy 36 | override: true 37 | - name: Run clippy 38 | run: cargo clippy 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | dist 3 | .cargo/config 4 | NDK 5 | 6 | **/*.rs.bk 7 | *.zip 8 | *.profraw 9 | 10 | *.sqlite* 11 | *.db 12 | *.db-journal 13 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "aw-webui"] 2 | path = aw-webui 3 | url = https://github.com/ActivityWatch/aw-webui.git 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "aw-client-rust", 4 | "aw-datastore", 5 | "aw-models", 6 | "aw-transform", 7 | "aw-server", 8 | "aw-sync", 9 | "aw-query", 10 | ] 11 | resolver = "2" 12 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all aw-server aw-webui build install package set-version test test-coverage test-coverage-tarpaulin test-coverage-grcov coverage coverage-html coverage-lcov 2 | 3 | all: build 4 | build: aw-server aw-sync 5 | 6 | DESTDIR := 7 | ifeq ($(SUDO_USER),) 8 | PREFIX := $(HOME)/.local 9 | else 10 | PREFIX := /usr/local 11 | endif 12 | 13 | 14 | # Build in release mode by default, unless RELEASE=false 15 | ifeq ($(RELEASE), false) 16 | cargoflag := 17 | targetdir := debug 18 | else 19 | cargoflag := --release 20 | targetdir := release 21 | endif 22 | 23 | aw-server: set-version aw-webui 24 | cargo build $(cargoflag) --bin aw-server 25 | 26 | aw-sync: set-version 27 | cargo build $(cargoflag) --bin aw-sync 28 | 29 | aw-webui: 30 | ifeq ($(SKIP_WEBUI),true) # Skip building webui if SKIP_WEBUI is true 31 | @echo "Skipping building webui" 32 | else 33 | make -C ./aw-webui build 34 | endif 35 | 36 | android: 37 | ./install-ndk.sh 38 | ./compile-android.sh 39 | 40 | test: 41 | cargo test 42 | 43 | fix: 44 | cargo fmt 45 | cargo clippy --fix 46 | 47 | set-version: 48 | @# if GITHUB_REF_TYPE is tag and GITHUB_REF_NAME is not empty, then we are building a release 49 | @# as such, we then need to set the Cargo.toml version to the tag name (with leading 'v' stripped) 50 | @# if tag is on Python-format (short pre-release suffixes), then we need to convert it to Rust-format (long pre-release suffixes) 51 | @# Example: v0.12.0b3 should become 0.12.0-beta.3 52 | @# Can't use sed with `-i` on macOS due to: https://stackoverflow.com/a/4247319/965332 53 | @if [ "$(GITHUB_REF_TYPE)" = "tag" ] && [ -n "$(GITHUB_REF_NAME)" ]; then \ 54 | VERSION_SEMVER=$(shell echo $(GITHUB_REF_NAME:v%=%) | sed -E 's/([0-9]+)\.([0-9]+)\.([0-9]+)-?(a|alpha|b|beta|rc)([0-9]+)/\1.\2.\3-\4.\5/; s/-b(.[0-9]+)/-beta\1/; s/-a(.[0-9+])/-alpha\1/'); \ 55 | echo "Building release $(GITHUB_REF_NAME) ($$VERSION_SEMVER), setting version in Cargo.toml"; \ 56 | perl -i -pe "s/^version = .*/version = \"$$VERSION_SEMVER\"/" aw-server/Cargo.toml; \ 57 | fi 58 | 59 | test-coverage-grcov: 60 | ifndef COVERAGE_CACHE 61 | # We need to remove build files in case a non-coverage test has been run 62 | # before without RUST/CARGO flags needed for coverage 63 | rm -rf target/debug 64 | endif 65 | rm -rf **/*.profraw 66 | # Build and test 67 | env RUSTFLAGS="-C instrument-coverage -C link-dead-code -C opt-level=0" \ 68 | LLVM_PROFILE_FILE=".cov/grcov-%p-%m.profraw" \ 69 | cargo test --verbose 70 | 71 | coverage-tarpaulin-html: 72 | cargo tarpaulin -o html --output-dir coverage-html 73 | 74 | GRCOV_PARAMS=$(shell find .cov -name "grcov-*.profraw" -print) --binary-path=./target/debug/aw-server -s . --llvm --branch --ignore-not-existing 75 | 76 | coverage-grcov-html: test-coverage-grcov 77 | grcov ${GRCOV_PARAMS} -t html -o ./target/debug/$@/ 78 | rm -rf **/*.profraw 79 | 80 | coverage-grcov-lcov: test-coverage-grcov 81 | grcov ${GRCOV_PARAMS} -t lcov -o ./target/debug/lcov.info 82 | rm -rf **/*.profraw 83 | 84 | coverage: coverage-tarpaulin-html 85 | 86 | package: 87 | # Clean and prepare target/package folder 88 | rm -rf target/package 89 | mkdir -p target/package 90 | # Copy binaries 91 | cp target/$(targetdir)/aw-server target/package/aw-server-rust 92 | cp target/$(targetdir)/aw-sync target/package/aw-sync 93 | # Copy service file 94 | cp -f aw-server.service target/package/aw-server.service 95 | # Copy everything into `dist/aw-server-rust` 96 | mkdir -p dist 97 | rm -rf dist/aw-server-rust 98 | cp -rf target/package dist/aw-server-rust 99 | 100 | install: 101 | # Install aw-server and aw-sync executables 102 | mkdir -p $(DESTDIR)$(PREFIX)/bin/ 103 | install -m 755 target/$(targetdir)/aw-server $(DESTDIR)$(PREFIX)/bin/aw-server 104 | install -m 755 target/$(targetdir)/aw-sync $(DESTDIR)$(PREFIX)/bin/aw-sync 105 | # Install systemd user service 106 | mkdir -p $(DESTDIR)$(PREFIX)/lib/systemd/user 107 | install -m 644 aw-server.service $(DESTDIR)$(PREFIX)/lib/systemd/user/aw-server.service 108 | 109 | clean: 110 | cargo clean 111 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | aw-server-rust 2 | ============== 3 | 4 | [![Build Status](https://github.com/ActivityWatch/aw-server-rust/workflows/Build/badge.svg?branch=master)](https://github.com/ActivityWatch/aw-server-rust/actions?query=workflow%3ABuild+branch%3Amaster) 5 | [![Coverage Status](https://codecov.io/gh/ActivityWatch/aw-server-rust/branch/master/graph/badge.svg)](https://codecov.io/gh/ActivityWatch/aw-server-rust) 6 | [![Dependency Status](https://deps.rs/repo/github/activitywatch/aw-server-rust/status.svg)](https://deps.rs/repo/github/activitywatch/aw-server-rust) 7 | 8 | A reimplementation of aw-server in Rust. 9 | 10 | Features missing compared to the Python implementation of aw-server: 11 | 12 | - API explorer (Swagger/OpenAPI) 13 | 14 | ### How to compile 15 | 16 | Build with `cargo`: 17 | 18 | ```sh 19 | cargo build --release 20 | ``` 21 | 22 | You can also build with make, which will build the web assets as well: 23 | 24 | ``` 25 | make build 26 | ``` 27 | 28 | Your built executable will be located in `./target/release/aw-server-rust`. If you want to use it with a development version of `aw-qt` you'll want to copy this binary into your `venv`: 29 | 30 | ```shell 31 | cp target/release/aw-server ../venv/bin/aw-server-rust 32 | ``` 33 | 34 | 35 | ### How to run 36 | 37 | If you want to quick-compile for debugging, run cargo run from the project root: 38 | 39 | ```sh 40 | cargo run --bin aw-server 41 | ``` 42 | 43 | *NOTE:* This will start aw-server-rust in testing mode (on port 5666 instead of port 5600). 44 | 45 | ### Syncing 46 | 47 | For details about aw-sync-rust, see the [README](./aw-sync/README.md) in its subdirectory. 48 | -------------------------------------------------------------------------------- /aw-client-rust/.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | -------------------------------------------------------------------------------- /aw-client-rust/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aw-client-rust" 3 | version = "0.1.0" 4 | edition = "2021" 5 | authors = ["Johan Bjäreholt "] 6 | 7 | [dependencies] 8 | reqwest = { version = "0.11", features = ["json", "blocking"] } 9 | gethostname = "0.4" 10 | serde = "1.0" 11 | serde_json = "1.0" 12 | chrono = { version = "0.4", features = ["serde"] } 13 | aw-models = { path = "../aw-models" } 14 | tokio = { version = "1.28.2", features = ["rt"] } 15 | 16 | [dev-dependencies] 17 | aw-datastore = { path = "../aw-datastore" } 18 | aw-server = { path = "../aw-server", default-features = false, features=[] } 19 | rocket = "0.5.0-rc.1" 20 | tokio-test = "*" 21 | -------------------------------------------------------------------------------- /aw-client-rust/README.md: -------------------------------------------------------------------------------- 1 | aw-client-rust 2 | ============== 3 | 4 | WIP: aw-client implementation in Rust 5 | 6 | TODO: Better error handling (requests currently never fail?) 7 | -------------------------------------------------------------------------------- /aw-client-rust/src/blocking.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::{collections::HashMap, error::Error}; 3 | 4 | use chrono::{DateTime, Utc}; 5 | 6 | use aw_models::{Bucket, Event}; 7 | 8 | use super::AwClient as AsyncAwClient; 9 | 10 | pub struct AwClient { 11 | client: AsyncAwClient, 12 | pub baseurl: reqwest::Url, 13 | pub name: String, 14 | pub hostname: String, 15 | } 16 | 17 | impl std::fmt::Debug for AwClient { 18 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 19 | write!(f, "AwClient(baseurl={:?})", self.client.baseurl) 20 | } 21 | } 22 | 23 | fn block_on(f: F) -> F::Output { 24 | tokio::runtime::Builder::new_current_thread() 25 | .enable_all() 26 | .build() 27 | .expect("build shell runtime") 28 | .block_on(f) 29 | } 30 | 31 | macro_rules! proxy_method 32 | { 33 | ($name:tt, $ret:ty, $($v:ident: $t:ty),*) => { 34 | pub fn $name(&self, $($v: $t),*) -> Result<$ret, reqwest::Error> 35 | { block_on(self.client.$name($($v),*)) } 36 | }; 37 | } 38 | 39 | impl AwClient { 40 | pub fn new(host: &str, port: u16, name: &str) -> Result> { 41 | let async_client = AsyncAwClient::new(host, port, name)?; 42 | 43 | Ok(AwClient { 44 | baseurl: async_client.baseurl.clone(), 45 | name: async_client.name.clone(), 46 | hostname: async_client.hostname.clone(), 47 | client: async_client, 48 | }) 49 | } 50 | 51 | proxy_method!(get_bucket, Bucket, bucketname: &str); 52 | proxy_method!(get_buckets, HashMap,); 53 | proxy_method!(create_bucket, (), bucket: &Bucket); 54 | proxy_method!(create_bucket_simple, (), bucketname: &str, buckettype: &str); 55 | proxy_method!(delete_bucket, (), bucketname: &str); 56 | proxy_method!( 57 | get_events, 58 | Vec, 59 | bucketname: &str, 60 | start: Option>, 61 | stop: Option>, 62 | limit: Option 63 | ); 64 | proxy_method!( 65 | query, 66 | Vec, 67 | query: &str, 68 | timeperiods: Vec<(DateTime, DateTime)> 69 | ); 70 | proxy_method!(insert_event, (), bucketname: &str, event: &Event); 71 | proxy_method!(insert_events, (), bucketname: &str, events: Vec); 72 | proxy_method!( 73 | heartbeat, 74 | (), 75 | bucketname: &str, 76 | event: &Event, 77 | pulsetime: f64 78 | ); 79 | proxy_method!(delete_event, (), bucketname: &str, event_id: i64); 80 | proxy_method!(get_event_count, i64, bucketname: &str); 81 | proxy_method!(get_info, aw_models::Info,); 82 | 83 | pub fn wait_for_start(&self) -> Result<(), Box> { 84 | self.client.wait_for_start() 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /aw-client-rust/tests/test.rs: -------------------------------------------------------------------------------- 1 | extern crate aw_client_rust; 2 | extern crate aw_datastore; 3 | extern crate aw_server; 4 | extern crate chrono; 5 | extern crate rocket; 6 | extern crate serde_json; 7 | extern crate tokio_test; 8 | 9 | #[cfg(test)] 10 | mod test { 11 | use aw_client_rust::blocking::AwClient; 12 | use aw_client_rust::Event; 13 | use chrono::{DateTime, Duration, Utc}; 14 | use serde_json::Map; 15 | use std::sync::Mutex; 16 | use std::thread; 17 | use tokio_test::block_on; 18 | 19 | // A random port, but still not guaranteed to not be bound 20 | // FIXME: Bind to a port that is free for certain and use that for the client instead 21 | static PORT: u16 = 41293; 22 | 23 | fn wait_for_server(timeout_s: u32, client: &AwClient) { 24 | for i in 0.. { 25 | match client.get_info() { 26 | Ok(_) => break, 27 | Err(err) => { 28 | if i == timeout_s - 1 { 29 | panic!("Timed out starting aw-server after {timeout_s}s: {err:?}"); 30 | } 31 | } 32 | } 33 | use std::time; 34 | let duration = time::Duration::from_secs(1); 35 | thread::sleep(duration); 36 | } 37 | } 38 | 39 | fn setup_testserver() -> rocket::Shutdown { 40 | use aw_server::endpoints::AssetResolver; 41 | use aw_server::endpoints::ServerState; 42 | 43 | let state = ServerState { 44 | datastore: Mutex::new(aw_datastore::Datastore::new_in_memory(false)), 45 | asset_resolver: AssetResolver::new(None), 46 | device_id: "test_id".to_string(), 47 | }; 48 | let mut aw_config = aw_server::config::AWConfig::default(); 49 | aw_config.port = PORT; 50 | let server = aw_server::endpoints::build_rocket(state, aw_config); 51 | let server = block_on(server.ignite()).unwrap(); 52 | let shutdown_handler = server.shutdown(); 53 | 54 | thread::spawn(move || { 55 | let _ = block_on(server.launch()).unwrap(); 56 | }); 57 | 58 | shutdown_handler 59 | } 60 | 61 | #[test] 62 | fn test_full() { 63 | let clientname = "aw-client-rust-test"; 64 | 65 | let client: AwClient = 66 | AwClient::new("127.0.0.1", PORT, clientname).expect("Client creation failed"); 67 | 68 | let shutdown_handler = setup_testserver(); 69 | 70 | wait_for_server(20, &client); 71 | 72 | let info = client.get_info().unwrap(); 73 | assert!(info.testing); 74 | 75 | let bucketname = format!("aw-client-rust-test_{}", client.hostname); 76 | let buckettype = "test-type"; 77 | client 78 | .create_bucket_simple(&bucketname, buckettype) 79 | .unwrap(); 80 | 81 | let bucket = client.get_bucket(&bucketname).unwrap(); 82 | assert!(bucket.id == bucketname); 83 | println!("{}", bucket.id); 84 | 85 | let buckets = client.get_buckets().unwrap(); 86 | println!("Buckets: {buckets:?}"); 87 | let mut event = Event { 88 | id: None, 89 | timestamp: DateTime::from_utc( 90 | DateTime::parse_from_rfc3339("2017-12-30T01:00:00+00:00") 91 | .unwrap() 92 | .naive_utc(), 93 | Utc, 94 | ), 95 | duration: Duration::seconds(0), 96 | data: Map::new(), 97 | }; 98 | println!("{event:?}"); 99 | client.insert_event(&bucketname, &event).unwrap(); 100 | // Ugly way to create a UTC from timestamp, see https://github.com/chronotope/chrono/issues/263 101 | event.timestamp = DateTime::from_utc( 102 | DateTime::parse_from_rfc3339("2017-12-30T01:00:01+00:00") 103 | .unwrap() 104 | .naive_utc(), 105 | Utc, 106 | ); 107 | client.heartbeat(&bucketname, &event, 10.0).unwrap(); 108 | 109 | let events = client.get_events(&bucketname, None, None, None).unwrap(); 110 | println!("Events: {events:?}"); 111 | assert!(events[0].duration == Duration::seconds(1)); 112 | 113 | // Query 114 | let query = format!( 115 | "events = query_bucket(\"{}\"); 116 | RETURN = events;", 117 | bucket.id 118 | ); 119 | let start: DateTime = DateTime::parse_from_rfc3339("1996-12-19T00:00:00-08:00") 120 | .unwrap() 121 | .into(); 122 | let end: DateTime = DateTime::parse_from_rfc3339("2020-12-19T00:00:00-08:00") 123 | .unwrap() 124 | .into(); 125 | let timeperiods = (start, end); 126 | let query_result = client.query(&query, vec![timeperiods]).unwrap(); 127 | println!("Query result: {query_result:?}"); 128 | 129 | client 130 | .delete_event(&bucketname, events[0].id.unwrap()) 131 | .unwrap(); 132 | 133 | let count = client.get_event_count(&bucketname).unwrap(); 134 | assert_eq!(count, 0); 135 | 136 | client.delete_bucket(&bucketname).unwrap(); 137 | 138 | shutdown_handler.notify(); 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /aw-datastore/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aw-datastore" 3 | version = "0.1.0" 4 | authors = ["Johan Bjäreholt "] 5 | edition = "2021" 6 | 7 | [features] 8 | default = [] # no features by default 9 | legacy_import_tests = [] 10 | 11 | [dependencies] 12 | appdirs = "0.2" 13 | serde = "1.0" 14 | serde_json = "1.0" 15 | chrono = { version = "0.4", features = ["serde"] } 16 | rusqlite = { version = "0.30", features = ["chrono", "serde_json", "bundled"] } 17 | mpsc_requests = "0.3" 18 | log = "0.4" 19 | 20 | aw-models = { path = "../aw-models" } 21 | aw-transform = { path = "../aw-transform" } 22 | -------------------------------------------------------------------------------- /aw-datastore/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | 4 | #[macro_export] 5 | macro_rules! json_map { 6 | { $( $key:literal : $value:expr),* } => {{ 7 | use serde_json::Value; 8 | use serde_json::map::Map; 9 | #[allow(unused_mut)] 10 | let mut map : Map = Map::new(); 11 | $( 12 | map.insert( $key.to_string(), json!($value) ); 13 | )* 14 | map 15 | }}; 16 | } 17 | 18 | mod datastore; 19 | mod legacy_import; 20 | mod worker; 21 | 22 | pub use self::datastore::DatastoreInstance; 23 | pub use self::worker::Datastore; 24 | 25 | #[derive(Debug, Clone)] 26 | pub enum DatastoreMethod { 27 | Memory(), 28 | File(String), 29 | } 30 | 31 | /* TODO: Implement this as a proper error */ 32 | #[derive(Debug, Clone)] 33 | pub enum DatastoreError { 34 | NoSuchBucket(String), 35 | BucketAlreadyExists(String), 36 | NoSuchKey(String), 37 | MpscError, 38 | InternalError(String), 39 | // Errors specific to when migrate is disabled 40 | Uninitialized(String), 41 | OldDbVersion(String), 42 | } 43 | -------------------------------------------------------------------------------- /aw-models/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aw-models" 3 | version = "0.1.0" 4 | authors = ["Johan Bjäreholt "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [lib] 10 | name = "aw_models" 11 | crate-type = ["lib", "cdylib"] 12 | path = "src/lib.rs" 13 | 14 | 15 | [dependencies] 16 | chrono = { version = "0.4", features = ["serde"] } 17 | log = "0.4" 18 | serde = { version = "1.0", features = ["derive"] } 19 | serde_json = "1.0" 20 | schemars = { version = "0.8", features = ["chrono"] } 21 | -------------------------------------------------------------------------------- /aw-models/examples/schema.rs: -------------------------------------------------------------------------------- 1 | extern crate aw_models; 2 | 3 | use schemars::schema_for; 4 | 5 | fn main() { 6 | let schema = schema_for!(aw_models::Bucket); 7 | println!("{}", serde_json::to_string_pretty(&schema).unwrap()); 8 | } 9 | -------------------------------------------------------------------------------- /aw-models/src/bucket.rs: -------------------------------------------------------------------------------- 1 | use chrono::DateTime; 2 | use chrono::Utc; 3 | use schemars::JsonSchema; 4 | use serde::{Deserialize, Serialize}; 5 | use serde_json::map::Map; 6 | use serde_json::value::Value; 7 | use std::collections::HashMap; 8 | 9 | use crate::Event; 10 | use crate::TryVec; 11 | 12 | #[derive(Serialize, Deserialize, JsonSchema, Clone, Debug)] 13 | pub struct Bucket { 14 | #[serde(skip)] 15 | pub bid: Option, 16 | #[serde(default)] 17 | pub id: String, 18 | #[serde(rename = "type")] /* type is a reserved Rust keyword */ pub _type: String, 19 | pub client: String, 20 | pub hostname: String, 21 | pub created: Option>, 22 | #[serde(default)] 23 | pub data: Map, 24 | #[serde(default, skip_deserializing)] 25 | pub metadata: BucketMetadata, 26 | // Events should only be "Some" during import/export 27 | // It's using a TryVec to discard only the events which were failed to be serialized so only a 28 | // few events are being dropped during import instead of failing the whole import 29 | pub events: Option>, 30 | pub last_updated: Option>, // TODO: Should probably be moved into metadata field 31 | } 32 | 33 | #[derive(Serialize, Deserialize, JsonSchema, Clone, Debug, Default)] 34 | pub struct BucketMetadata { 35 | #[serde(default)] 36 | pub start: Option>, 37 | pub end: Option>, 38 | } 39 | 40 | #[derive(Serialize, Deserialize, JsonSchema, Clone)] 41 | pub struct BucketsExport { 42 | pub buckets: HashMap, 43 | } 44 | 45 | #[test] 46 | fn test_bucket() { 47 | let b = Bucket { 48 | bid: None, 49 | id: "id".to_string(), 50 | _type: "type".to_string(), 51 | client: "client".to_string(), 52 | hostname: "hostname".into(), 53 | created: None, 54 | data: json_map! {}, 55 | metadata: BucketMetadata::default(), 56 | events: None, 57 | last_updated: None, 58 | }; 59 | debug!("bucket: {:?}", b); 60 | } 61 | -------------------------------------------------------------------------------- /aw-models/src/duration.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | // Max duration of a i64 nanosecond is 2562047.7880152157 hours 4 | // ((2**64)/2)/1000000000/60/60 5 | 6 | fn get_nanos(duration: &chrono::Duration) -> f64 { 7 | (duration.num_nanoseconds().unwrap() as f64) / 1_000_000_000.0 8 | } 9 | 10 | #[derive(Serialize, Deserialize)] 11 | #[serde(remote = "chrono::Duration")] 12 | pub struct DurationSerialization(#[serde(getter = "get_nanos")] f64); 13 | 14 | // Provide a conversion to construct the remote type. 15 | impl From for chrono::Duration { 16 | fn from(def: DurationSerialization) -> chrono::Duration { 17 | chrono::Duration::nanoseconds((def.0 * 1_000_000_000.0) as i64) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /aw-models/src/event.rs: -------------------------------------------------------------------------------- 1 | use chrono::DateTime; 2 | use chrono::Duration; 3 | use chrono::Utc; 4 | use schemars::JsonSchema; 5 | use serde::{Deserialize, Serialize}; 6 | use serde_json::Map; 7 | use serde_json::Value; 8 | 9 | use crate::duration::DurationSerialization; 10 | use crate::TimeInterval; 11 | 12 | #[derive(Serialize, Deserialize, JsonSchema, Clone, Debug)] 13 | pub struct Event { 14 | /// An unique id for this event. 15 | /// Will be assigned once the event has reached the servers datastore. 16 | /// 17 | /// **WARNING:** If you set the ID and insert the event to the server it will replace the previous 18 | /// event with that ID. Only do this if you are completely sure what you are doing. 19 | pub id: Option, 20 | /// An rfc3339 timestamp which represents the start of the event 21 | pub timestamp: DateTime, 22 | /// Duration of the event as a floating point number in seconds. 23 | /// Appended to the timestamp it can represent the end of the event 24 | /// Maximum precision is nanoseconds. 25 | #[serde(with = "DurationSerialization", default = "default_duration")] 26 | #[schemars(with = "f64")] 27 | pub duration: Duration, 28 | /// Can contain any arbitrary JSON data that represents the value of the event. 29 | /// All events in a bucket should follow the format of it's respective bucket-type. 30 | pub data: Map, 31 | } 32 | 33 | impl Event { 34 | pub fn new(timestamp: DateTime, duration: Duration, data: Map) -> Self { 35 | Event { 36 | id: None, 37 | timestamp, 38 | duration, 39 | data, 40 | } 41 | } 42 | pub fn calculate_endtime(&self) -> DateTime { 43 | self.timestamp + chrono::Duration::nanoseconds(self.duration.num_nanoseconds().unwrap()) 44 | } 45 | pub fn interval(&self) -> TimeInterval { 46 | TimeInterval::new(self.timestamp, self.calculate_endtime()) 47 | } 48 | } 49 | 50 | impl PartialEq for Event { 51 | fn eq(&self, other: &Event) -> bool { 52 | !(self.timestamp != other.timestamp 53 | || self.duration != other.duration 54 | || self.data != other.data) 55 | } 56 | } 57 | 58 | impl Default for Event { 59 | fn default() -> Self { 60 | Event { 61 | id: None, 62 | timestamp: Utc::now(), 63 | duration: Duration::seconds(0), 64 | data: serde_json::Map::new(), 65 | } 66 | } 67 | } 68 | 69 | fn default_duration() -> Duration { 70 | Duration::seconds(0) 71 | } 72 | 73 | #[test] 74 | fn test_event() { 75 | use serde_json::json; 76 | 77 | let e = Event { 78 | id: None, 79 | timestamp: Utc::now(), 80 | duration: Duration::seconds(1), 81 | data: json_map! {"test": json!(1)}, 82 | }; 83 | debug!("event: {:?}", e); 84 | } 85 | -------------------------------------------------------------------------------- /aw-models/src/export.rs: -------------------------------------------------------------------------------- 1 | #[derive(Serialize, Deserialize, JsonSchema, Clone)] 2 | pub struct BucketsExport { 3 | pub buckets: HashMap, 4 | } 5 | -------------------------------------------------------------------------------- /aw-models/src/info.rs: -------------------------------------------------------------------------------- 1 | use schemars::JsonSchema; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | #[derive(Serialize, Deserialize, JsonSchema)] 5 | pub struct Info { 6 | pub hostname: String, 7 | pub version: String, 8 | pub testing: bool, 9 | pub device_id: String, 10 | } 11 | -------------------------------------------------------------------------------- /aw-models/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | 4 | // TODO: Move me to an appropriate place 5 | #[cfg(test)] // Only macro use for tests 6 | macro_rules! json_map { 7 | { $( $key:literal : $value:expr),* } => {{ 8 | use serde_json::{Value}; 9 | use serde_json::map::Map; 10 | #[allow(unused_mut)] 11 | let mut map : Map = Map::new(); 12 | $( 13 | map.insert( $key.to_string(), json!($value) ); 14 | )* 15 | map 16 | }}; 17 | } 18 | 19 | mod bucket; 20 | mod duration; 21 | mod event; 22 | mod info; 23 | mod query; 24 | mod timeinterval; 25 | mod tryvec; 26 | 27 | pub use self::bucket::Bucket; 28 | pub use self::bucket::BucketMetadata; 29 | pub use self::bucket::BucketsExport; 30 | pub use self::event::Event; 31 | pub use self::info::Info; 32 | pub use self::query::Query; 33 | pub use self::timeinterval::TimeInterval; 34 | pub use self::tryvec::TryVec; 35 | -------------------------------------------------------------------------------- /aw-models/src/query.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | 3 | use crate::TimeInterval; 4 | 5 | // TODO Implement serialize once TimeInterval has implemented it 6 | #[derive(Deserialize, Clone, Debug)] 7 | pub struct Query { 8 | //#[serde(with = "DurationSerialization")] 9 | pub timeperiods: Vec, 10 | pub query: Vec, 11 | } 12 | -------------------------------------------------------------------------------- /aw-models/src/timeinterval.rs: -------------------------------------------------------------------------------- 1 | use crate::Event; 2 | use std::cmp::{max, min}; 3 | use std::fmt; 4 | 5 | use serde::de::{self, Deserialize, Deserializer, Visitor}; 6 | 7 | use chrono::DateTime; 8 | use chrono::Duration; 9 | use chrono::Utc; 10 | 11 | // TODO: Implement serialize 12 | 13 | #[derive(Clone, Debug)] 14 | pub struct TimeInterval { 15 | start: DateTime, 16 | end: DateTime, 17 | } 18 | 19 | #[derive(Debug)] 20 | pub enum TimeIntervalError { 21 | ParseError(), 22 | } 23 | 24 | /// Python versions of many of these functions can be found at https://github.com/ErikBjare/timeslot 25 | impl TimeInterval { 26 | pub fn new(start: DateTime, end: DateTime) -> TimeInterval { 27 | TimeInterval { start, end } 28 | } 29 | 30 | pub fn new_from_string(period: &str) -> Result { 31 | let splits = period.split('/').collect::>(); 32 | if splits.len() != 2 { 33 | return Err(TimeIntervalError::ParseError()); 34 | } 35 | let start = match DateTime::parse_from_rfc3339(splits[0]) { 36 | Ok(dt) => dt.with_timezone(&Utc), 37 | Err(_e) => return Err(TimeIntervalError::ParseError()), 38 | }; 39 | let end = match DateTime::parse_from_rfc3339(splits[1]) { 40 | Ok(dt) => dt.with_timezone(&Utc), 41 | Err(_e) => return Err(TimeIntervalError::ParseError()), 42 | }; 43 | 44 | Ok(TimeInterval::new(start, end)) 45 | } 46 | 47 | pub fn start(&self) -> &DateTime { 48 | &self.start 49 | } 50 | 51 | pub fn end(&self) -> &DateTime { 52 | &self.end 53 | } 54 | 55 | pub fn duration(&self) -> Duration { 56 | self.end - self.start 57 | } 58 | 59 | /// If intervals are separated by a non-zero gap, return the gap as a new TimeInterval, else None 60 | pub fn gap(&self, other: &TimeInterval) -> Option { 61 | if self.end < other.start { 62 | Some(TimeInterval::new(self.end, other.start)) 63 | } else if other.end < self.start { 64 | Some(TimeInterval::new(other.end, self.start)) 65 | } else { 66 | None 67 | } 68 | } 69 | 70 | /// Joins two intervals together if they don't have a gap, else None 71 | pub fn union(&self, other: &TimeInterval) -> Option { 72 | match self.gap(other) { 73 | Some(_) => None, 74 | None => Some(TimeInterval::new( 75 | min(self.start, other.start), 76 | max(self.end, other.end), 77 | )), 78 | } 79 | } 80 | 81 | /// The intersection of two intervals 82 | pub fn intersection(&self, other: &TimeInterval) -> Option { 83 | let last_start = max(self.start, other.start); 84 | let first_end = min(self.end, other.end); 85 | if last_start < first_end { 86 | Some(TimeInterval::new(last_start, first_end)) 87 | } else { 88 | None 89 | } 90 | } 91 | 92 | /// A boolean whether the two intervals intersect 93 | pub fn intersects(&self, other: &TimeInterval) -> bool { 94 | self.intersection(other).is_some() 95 | } 96 | } 97 | 98 | impl From<&Event> for TimeInterval { 99 | fn from(e: &Event) -> TimeInterval { 100 | TimeInterval::new(e.timestamp, e.timestamp + e.duration) 101 | } 102 | } 103 | 104 | impl fmt::Display for TimeInterval { 105 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 106 | write!(f, "{}/{}", self.start.to_rfc3339(), self.end.to_rfc3339()) 107 | } 108 | } 109 | 110 | struct TimeIntervalVisitor; 111 | use serde::de::Unexpected; 112 | 113 | impl<'de> Visitor<'de> for TimeIntervalVisitor { 114 | type Value = TimeInterval; 115 | 116 | fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 117 | formatter.write_str("an string in ISO timeinterval format (such as 2000-01-01T00:00:00+01:00/2001-02-02T01:01:01+01:00)") 118 | } 119 | 120 | fn visit_str(self, value: &str) -> Result 121 | where 122 | E: de::Error, 123 | { 124 | match TimeInterval::new_from_string(value) { 125 | Ok(ti) => Ok(ti), 126 | Err(e) => { 127 | warn!("{:?}", e); 128 | Err(de::Error::invalid_value(Unexpected::Str(value), &self)) 129 | } 130 | } 131 | } 132 | } 133 | 134 | impl<'de> Deserialize<'de> for TimeInterval { 135 | fn deserialize(deserializer: D) -> Result 136 | where 137 | D: Deserializer<'de>, 138 | { 139 | deserializer.deserialize_str(TimeIntervalVisitor) 140 | } 141 | } 142 | 143 | #[test] 144 | fn test_timeinterval() { 145 | use std::str::FromStr; 146 | 147 | let start = DateTime::from_str("2000-01-01T00:00:00Z").unwrap(); 148 | let end = DateTime::from_str("2000-01-02T00:00:00Z").unwrap(); 149 | let period_str = "2000-01-01T00:00:00+00:00/2000-01-02T00:00:00+00:00"; 150 | let duration = end - start; 151 | let tp = TimeInterval::new(start, end); 152 | assert_eq!(tp.start(), &start); 153 | assert_eq!(tp.end(), &end); 154 | assert_eq!(tp.duration(), duration); 155 | assert_eq!(tp.to_string(), period_str); 156 | 157 | let tp = TimeInterval::new_from_string(period_str).unwrap(); 158 | assert_eq!(tp.start(), &start); 159 | assert_eq!(tp.end(), &end); 160 | assert_eq!(tp.duration(), duration); 161 | assert_eq!(tp.to_string(), period_str); 162 | } 163 | 164 | #[test] 165 | fn test_timeinterval_intersection() { 166 | use std::str::FromStr; 167 | 168 | // Check that two exactly adjacent events don't intersect 169 | let tp1 = TimeInterval::new( 170 | DateTime::from_str("2000-01-01T00:00:00Z").unwrap(), 171 | DateTime::from_str("2000-01-01T00:01:00Z").unwrap(), 172 | ); 173 | let tp2 = TimeInterval::new( 174 | DateTime::from_str("2000-01-01T00:01:00Z").unwrap(), 175 | DateTime::from_str("2000-01-01T00:02:00Z").unwrap(), 176 | ); 177 | assert!(!tp1.intersects(&tp2)); 178 | } 179 | -------------------------------------------------------------------------------- /aw-models/src/tryvec.rs: -------------------------------------------------------------------------------- 1 | use core::marker::PhantomData; 2 | use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; 3 | use serde::de::{DeserializeOwned, SeqAccess, Visitor}; 4 | use serde::ser::SerializeSeq; 5 | use serde::{Deserialize, Deserializer, Serialize, Serializer}; 6 | use serde_json::Value; 7 | use std::fmt; 8 | use std::fmt::Debug; 9 | 10 | #[derive(Debug, Clone)] 11 | pub enum TryParse { 12 | Parsed(T), 13 | Unparsed(Value), 14 | NotPresent, 15 | } 16 | 17 | impl JsonSchema for TryParse { 18 | fn schema_name() -> String { 19 | format!("Try<{}>", std::any::type_name::()) 20 | } 21 | 22 | fn json_schema(gen: &mut SchemaGenerator) -> Schema { 23 | gen.subschema_for::() 24 | } 25 | } 26 | 27 | impl<'de, T: DeserializeOwned + JsonSchema> Deserialize<'de> for TryParse { 28 | fn deserialize>(deserializer: D) -> Result { 29 | match Option::::deserialize(deserializer)? { 30 | None => Ok(TryParse::NotPresent), 31 | Some(value) => match T::deserialize(&value) { 32 | Ok(t) => Ok(TryParse::Parsed(t)), 33 | Err(_) => Ok(TryParse::Unparsed(value)), 34 | }, 35 | } 36 | } 37 | } 38 | 39 | #[derive(Debug, Clone, JsonSchema)] 40 | #[serde(transparent)] 41 | pub struct TryVec { 42 | inner: Vec>, 43 | } 44 | 45 | impl TryVec { 46 | pub fn new(mut vec: Vec) -> Self { 47 | let mut vec_marked: Vec> = Vec::new(); 48 | for item in vec.drain(..) { 49 | vec_marked.push(TryParse::Parsed(item)); 50 | } 51 | TryVec { inner: vec_marked } 52 | } 53 | 54 | pub fn new_empty() -> Self { 55 | TryVec { inner: Vec::new() } 56 | } 57 | 58 | pub fn take_inner(self) -> Vec { 59 | let mut vec: Vec = Vec::new(); 60 | for item in self.inner { 61 | match item { 62 | TryParse::Parsed(i) => vec.push(i), 63 | _ => continue, 64 | }; 65 | } 66 | vec 67 | } 68 | } 69 | 70 | impl Serialize for TryVec 71 | where 72 | T: Serialize, 73 | { 74 | fn serialize(&self, serializer: S) -> Result 75 | where 76 | S: Serializer, 77 | { 78 | let mut seq = serializer.serialize_seq(Some(self.inner.len()))?; 79 | for element in &self.inner { 80 | match element { 81 | TryParse::Parsed(t) => seq.serialize_element(t)?, 82 | _ => continue, 83 | }; 84 | } 85 | seq.end() 86 | } 87 | } 88 | 89 | struct TryVecVisitor { 90 | marker: PhantomData TryVec>, 91 | } 92 | 93 | impl TryVecVisitor { 94 | fn new() -> Self { 95 | TryVecVisitor { 96 | marker: PhantomData, 97 | } 98 | } 99 | } 100 | 101 | impl<'de, T> Visitor<'de> for TryVecVisitor 102 | where 103 | T: DeserializeOwned + JsonSchema, 104 | { 105 | type Value = TryVec; 106 | 107 | // Format a message stating what data this Visitor expects to receive. 108 | fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 109 | formatter.write_str("a sequence") 110 | } 111 | 112 | fn visit_seq(self, mut access: M) -> Result 113 | where 114 | M: SeqAccess<'de>, 115 | { 116 | let mut vec = Vec::new(); 117 | 118 | loop { 119 | let res = match access.next_element() { 120 | Ok(val) => val, 121 | Err(err) => { 122 | println!("Failed to parse event because '{err}', the event will be discarded"); 123 | continue; 124 | } 125 | }; 126 | match res { 127 | Some(item) => vec.push(item), 128 | None => break, 129 | }; 130 | } 131 | 132 | Ok(TryVec { inner: vec }) 133 | } 134 | } 135 | 136 | impl<'de, T: JsonSchema> Deserialize<'de> for TryVec 137 | where 138 | T: DeserializeOwned, 139 | { 140 | fn deserialize(deserializer: D) -> Result 141 | where 142 | D: Deserializer<'de>, 143 | { 144 | deserializer.deserialize_seq(TryVecVisitor::new()) 145 | } 146 | } 147 | 148 | #[cfg(test)] 149 | mod test { 150 | use schemars::JsonSchema; 151 | use serde::{Deserialize, Serialize}; 152 | 153 | use super::TryVec; 154 | 155 | #[derive(Deserialize, Serialize, JsonSchema, Debug)] 156 | struct TestEvent { 157 | data: String, 158 | } 159 | 160 | fn assert_serialized_deserialized_eq(data: &str, eq: &str) { 161 | let deserialized = serde_json::from_str::>(data).unwrap(); 162 | let serialized = serde_json::to_string(&deserialized).unwrap(); 163 | assert_eq!(serialized, eq); 164 | } 165 | 166 | #[test] 167 | fn test_serialize_deserialize() { 168 | println!("test empty array"); 169 | assert_serialized_deserialized_eq(r#"[]"#, r#"[]"#); 170 | 171 | println!("test one valid event"); 172 | assert_serialized_deserialized_eq(r#"[{"data":"test"}]"#, r#"[{"data":"test"}]"#); 173 | 174 | println!("test invalid type int, skip event"); 175 | assert_serialized_deserialized_eq(r#"[{ "data": 1 }]"#, r#"[]"#); 176 | 177 | println!("test invalid type dict, skip event"); 178 | assert_serialized_deserialized_eq(r#"[{"data":{}}]"#, r#"[]"#); 179 | 180 | println!("test invalid type arr, skip event"); 181 | assert_serialized_deserialized_eq(r#"[{"data":[]}]"#, r#"[]"#); 182 | 183 | println!("test multiple valid events"); 184 | assert_serialized_deserialized_eq( 185 | r#"[{"data":"test"},{"data":"test2"},{"data":"test3"}]"#, 186 | r#"[{"data":"test"},{"data":"test2"},{"data":"test3"}]"#, 187 | ); 188 | 189 | println!("test invalid event in middle of sequence, skip one event"); 190 | assert_serialized_deserialized_eq( 191 | r#"[{"data":"test"},{"data":2},{"data":"test3"}]"#, 192 | r#"[{"data":"test"},{"data":"test3"}]"#, 193 | ); 194 | 195 | println!("test utf-16 character"); 196 | assert_serialized_deserialized_eq(r#"[{"data":"\ud835\udc47"}]"#, r#"[{"data":"𝑇"}]"#); 197 | 198 | println!("test invalid utf-8/16, skip event"); 199 | assert_serialized_deserialized_eq(r#"[{"data":"\ud835"}]"#, r#"[]"#); 200 | } 201 | 202 | #[test] 203 | fn test_methods() { 204 | let tryvec = TryVec::::new_empty(); 205 | assert_eq!(tryvec.take_inner().len(), Vec::::new().len()); 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /aw-query/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aw-query" 3 | version = "0.1.0" 4 | authors = ["Johan Bjäreholt "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | serde = { version = "1.0", features = ["derive"] } 11 | serde_json = "1.0" 12 | chrono = { version = "0.4", features = ["serde"] } 13 | plex = "0.3.0" 14 | log = "0.4" 15 | fancy-regex = "0.12.0" 16 | aw-datastore = { path = "../aw-datastore" } 17 | aw-models = { path = "../aw-models" } 18 | aw-transform = { path = "../aw-transform" } 19 | 20 | [dev-dependencies] 21 | criterion = "0.5.1" 22 | 23 | [[bench]] 24 | name = "benchmark" 25 | harness = false 26 | -------------------------------------------------------------------------------- /aw-query/benches/benchmark.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main}; 2 | 3 | // TODO: Move me to an appropriate place 4 | #[macro_export] 5 | macro_rules! json_map { 6 | { $( $key:literal : $value:expr),* } => {{ 7 | use serde_json::Value; 8 | use serde_json::map::Map; 9 | #[allow(unused_mut)] 10 | let mut map : Map = Map::new(); 11 | $( 12 | map.insert( $key.to_string(), json!($value) ); 13 | )* 14 | map 15 | }}; 16 | } 17 | 18 | #[cfg(test)] 19 | mod query_benchmarks { 20 | use chrono::Duration; 21 | use criterion::Criterion; 22 | use serde_json::json; 23 | use serde_json::Map; 24 | use serde_json::Value; 25 | 26 | use aw_datastore::Datastore; 27 | use aw_models::Bucket; 28 | use aw_models::BucketMetadata; 29 | use aw_models::Event; 30 | use aw_models::TimeInterval; 31 | 32 | static BUCKETNAME: &str = "testbucket"; 33 | static TIME_INTERVAL: &str = "1980-01-01T00:00:00Z/2080-01-02T00:00:00Z"; 34 | 35 | fn setup_datastore() -> Datastore { 36 | Datastore::new_in_memory(false) 37 | } 38 | 39 | fn create_bucket(ds: &Datastore, bucketname: String) { 40 | let bucket = Bucket { 41 | bid: None, 42 | id: bucketname, 43 | _type: "testtype".to_string(), 44 | client: "testclient".to_string(), 45 | hostname: "testhost".to_string(), 46 | created: Some(chrono::Utc::now()), 47 | data: json_map! {}, 48 | metadata: BucketMetadata::default(), 49 | events: None, 50 | last_updated: None, 51 | }; 52 | ds.create_bucket(&bucket).unwrap(); 53 | } 54 | 55 | fn insert_events(ds: &Datastore, bucketname: &str, num_events: i64) { 56 | let mut possible_data = Vec::>::new(); 57 | for i in 0..20 { 58 | possible_data.push(json_map! {"number": i}); 59 | } 60 | let mut event_list = Vec::new(); 61 | for i in 0..num_events { 62 | let e = Event { 63 | id: None, 64 | timestamp: chrono::Utc::now() + Duration::seconds(i), 65 | duration: Duration::seconds(10), 66 | data: possible_data[i as usize % 20].clone(), 67 | }; 68 | event_list.push(e); 69 | } 70 | ds.insert_events(bucketname, &event_list).unwrap(); 71 | } 72 | 73 | pub fn bench_assign(c: &mut Criterion) { 74 | let ds = setup_datastore(); 75 | let interval = TimeInterval::new_from_string(TIME_INTERVAL).unwrap(); 76 | c.bench_function("bench assign", |b| { 77 | b.iter(|| { 78 | let code = String::from("return a=1;"); 79 | match aw_query::query(&code, &interval, &ds).unwrap() { 80 | aw_query::DataType::None() => (), 81 | ref data => panic!("Wrong datatype, {data:?}"), 82 | }; 83 | }) 84 | }); 85 | } 86 | 87 | pub fn bench_many_events(c: &mut Criterion) { 88 | let ds = setup_datastore(); 89 | create_bucket(&ds, BUCKETNAME.to_string()); 90 | insert_events(&ds, BUCKETNAME, 5000); 91 | 92 | let interval = TimeInterval::new_from_string(TIME_INTERVAL).unwrap(); 93 | c.bench_function("bench many events", |b| { 94 | b.iter(|| { 95 | let code = String::from( 96 | " 97 | events = query_bucket(\"testbucket\"); 98 | return events; 99 | ", 100 | ); 101 | aw_query::query(&code, &interval, &ds).unwrap(); 102 | }) 103 | }); 104 | } 105 | } 106 | 107 | criterion_group!( 108 | benches, 109 | query_benchmarks::bench_assign, 110 | query_benchmarks::bench_many_events 111 | ); 112 | criterion_main!(benches); 113 | -------------------------------------------------------------------------------- /aw-query/src/ast.rs: -------------------------------------------------------------------------------- 1 | use crate::lexer::Span; 2 | 3 | use std::collections::HashMap; 4 | 5 | #[derive(Debug)] 6 | pub struct Program { 7 | pub stmts: Vec, 8 | } 9 | 10 | #[derive(Debug, Clone)] 11 | pub struct Expr { 12 | pub span: Span, 13 | pub node: Expr_, 14 | } 15 | 16 | #[derive(Debug, Clone)] 17 | pub enum Expr_ { 18 | Add(Box, Box), 19 | Sub(Box, Box), 20 | Mul(Box, Box), 21 | Div(Box, Box), 22 | Mod(Box, Box), 23 | 24 | Equal(Box, Box), 25 | 26 | Var(String), 27 | Assign(String, Box), 28 | Function(String, Box), 29 | If(Vec<(Box, Vec)>), 30 | Return(Box), 31 | 32 | Bool(bool), 33 | Number(f64), 34 | String(String), 35 | List(Vec), 36 | Dict(HashMap), 37 | } 38 | -------------------------------------------------------------------------------- /aw-query/src/grammar.rs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ActivityWatch/aw-server-rust/c6409796f3859c1c610339682d90e0a03659ccdd/aw-query/src/grammar.rs -------------------------------------------------------------------------------- /aw-query/src/lexer.rs: -------------------------------------------------------------------------------- 1 | use plex::lexer; 2 | 3 | #[derive(Debug, Clone)] 4 | pub enum Token { 5 | Ident(String), 6 | 7 | If, 8 | ElseIf, 9 | Else, 10 | Return, 11 | 12 | Bool(bool), 13 | Number(f64), 14 | String(String), 15 | Plus, 16 | Minus, 17 | Star, 18 | Slash, 19 | Percent, 20 | Equals, 21 | Assign, 22 | LParen, 23 | RParen, 24 | LBracket, 25 | RBracket, 26 | LBrace, 27 | RBrace, 28 | Comma, 29 | Colon, 30 | Semi, 31 | 32 | Whitespace, 33 | Newline, 34 | Comment, 35 | } 36 | 37 | lexer! { 38 | fn next_token(text: 'a) -> (Token, &'a str); 39 | 40 | r#"[ \t\r]+"# => (Token::Whitespace, text), 41 | r#"\n"# => (Token::Newline, text), 42 | // Python-style comments (# ...) 43 | r#"#[^\n]*"# => (Token::Comment, text), 44 | 45 | r#"if"# => (Token::If, text), 46 | r#"elif"# => (Token::ElseIf, text), 47 | r#"else"# => (Token::Else, text), 48 | r#"return"# => (Token::Return, text), 49 | 50 | r#"true"# => (Token::Bool(true), text), 51 | r#"false"# => (Token::Bool(false), text), 52 | // TODO: Deprecate/Remove? 53 | r#"True"# => (Token::Bool(true), text), 54 | r#"False"# => (Token::Bool(false), text), 55 | 56 | r#"\"([^\"]|(\\\"))*\""# => ( 57 | Token::String(text.to_owned()[1..text.len()-1].replace("\\\"", "\"")), 58 | text 59 | ), 60 | r#"[0-9]+[\.]?[0-9]*"# => { 61 | let tok = match text.parse() { 62 | Ok(n) => Token::Number(n), 63 | Err(e) => panic!("Integer {text} is out of range: {e}"), 64 | }; 65 | (tok, text) 66 | } 67 | 68 | r#"[a-zA-Z_][a-zA-Z0-9_]*"# => (Token::Ident(text.to_owned()), text), 69 | 70 | r#"=="# => (Token::Equals, text), 71 | r#"="# => (Token::Assign, text), 72 | r#"\+"# => (Token::Plus, text), 73 | r#"-"# => (Token::Minus, text), 74 | r#"\*"# => (Token::Star, text), 75 | r#"/"# => (Token::Slash, text), 76 | r#"%"# => (Token::Percent, text), 77 | r#"\("# => (Token::LParen, text), 78 | r#"\)"# => (Token::RParen, text), 79 | r#"\["# => (Token::LBracket, text), 80 | r#"\]"# => (Token::RBracket, text), 81 | r#"\{"# => (Token::LBrace, text), 82 | r#"\}"# => (Token::RBrace, text), 83 | r#","# => (Token::Comma, text), 84 | r#":"# => (Token::Colon, text), 85 | r#";"# => (Token::Semi, text), 86 | } 87 | 88 | pub struct Lexer<'a> { 89 | original: &'a str, 90 | remaining: &'a str, 91 | line: usize, 92 | } 93 | 94 | impl<'a> Lexer<'a> { 95 | pub fn new(s: &'a str) -> Lexer<'a> { 96 | Lexer { 97 | original: s, 98 | remaining: s, 99 | line: 1, 100 | } 101 | } 102 | } 103 | 104 | #[derive(Debug, Clone, Copy)] 105 | pub struct Span { 106 | pub lo: usize, 107 | pub hi: usize, 108 | pub line: usize, 109 | } 110 | 111 | fn span_in(s: &str, t: &str, l: usize) -> Span { 112 | let lo = s.as_ptr() as usize - t.as_ptr() as usize; 113 | Span { 114 | lo, 115 | hi: lo + s.len(), 116 | line: l, 117 | } 118 | } 119 | 120 | impl<'a> Iterator for Lexer<'a> { 121 | type Item = (Token, Span); 122 | fn next(&mut self) -> Option<(Token, Span)> { 123 | loop { 124 | let tok = if let Some((tok, new_remaining)) = next_token(self.remaining) { 125 | self.remaining = new_remaining; 126 | tok 127 | } else { 128 | return None; 129 | }; 130 | match tok { 131 | (Token::Whitespace, _) | (Token::Comment, _) => { 132 | continue; 133 | } 134 | (Token::Newline, _) => { 135 | self.line += 1; 136 | continue; 137 | } 138 | (tok, span) => { 139 | return Some((tok, span_in(span, self.original, self.line))); 140 | } 141 | } 142 | } 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /aw-query/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | extern crate serde; 4 | extern crate serde_json; 5 | 6 | use std::fmt; 7 | 8 | use aw_models::TimeInterval; 9 | 10 | use aw_datastore::Datastore; 11 | 12 | pub mod datatype; 13 | 14 | mod ast; 15 | mod functions; 16 | mod interpret; 17 | mod lexer; 18 | #[allow( 19 | clippy::match_single_binding, 20 | clippy::redundant_closure_call, 21 | unused_braces 22 | )] 23 | mod parser; 24 | 25 | pub use crate::datatype::DataType; 26 | pub use crate::interpret::VarEnv; 27 | 28 | // TODO: add line numbers to errors 29 | // (works during lexing, but not during parsing I believe) 30 | 31 | #[derive(Debug)] 32 | pub enum QueryError { 33 | // Parser 34 | ParsingError(String), 35 | 36 | // Execution 37 | EmptyQuery(), 38 | VariableNotDefined(String), 39 | MathError(String), 40 | InvalidType(String), 41 | InvalidFunctionParameters(String), 42 | TimeIntervalError(String), 43 | BucketQueryError(String), 44 | RegexCompileError(String), 45 | } 46 | 47 | impl fmt::Display for QueryError { 48 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 49 | write!(f, "{self:?}") 50 | } 51 | } 52 | 53 | pub fn query(code: &str, ti: &TimeInterval, ds: &Datastore) -> Result { 54 | let lexer = lexer::Lexer::new(code); 55 | let program = match parser::parse(lexer) { 56 | Ok(p) => p, 57 | Err(e) => { 58 | // TODO: Improve parsing error message 59 | warn!("ParsingError: {:?}", e); 60 | return Err(QueryError::ParsingError(format!("{e:?}"))); 61 | } 62 | }; 63 | interpret::interpret_prog(program, ti, ds) 64 | } 65 | -------------------------------------------------------------------------------- /aw-query/src/parser.rs: -------------------------------------------------------------------------------- 1 | use crate::ast::*; 2 | use crate::lexer::Token::*; 3 | use crate::lexer::*; 4 | use plex::parser; 5 | 6 | use std::collections::HashMap; 7 | 8 | fn merge_if_vecs(lhs: Expr_, rhs: Expr_) -> Expr_ { 9 | let mut ifs = match lhs { 10 | Expr_::If(_ifs) => _ifs, 11 | _ => unreachable!(), 12 | }; 13 | match rhs { 14 | Expr_::If(mut _ifs) => ifs.append(&mut _ifs), 15 | _ => unreachable!(), 16 | }; 17 | Expr_::If(ifs) 18 | } 19 | 20 | parser! { 21 | fn parse_(Token, Span); 22 | 23 | // combine two spans 24 | (a, b) { 25 | Span { 26 | lo: a.lo, 27 | hi: b.hi, 28 | line: a.line, 29 | } 30 | } 31 | 32 | program: Program { 33 | statements[s] => Program { stmts: s } 34 | } 35 | 36 | statements: Vec { 37 | => vec![], 38 | statements[mut st] statement[x] => { 39 | st.push(x); 40 | st 41 | }, 42 | statements[st] Semi => st, 43 | } 44 | 45 | statement: Expr { 46 | ifs[x] => x, 47 | ret[x] Semi => x, 48 | } 49 | 50 | ifs: Expr { 51 | _if[l_ifs] => l_ifs, 52 | _elif[l_ifs] => l_ifs, 53 | _else[l_ifs] => l_ifs, 54 | } 55 | 56 | _cond_block: Expr { 57 | binop[cond] LBrace statements[block] RBrace => Expr { 58 | span: span!(), 59 | node: { 60 | let mut ifs = Vec::new(); 61 | ifs.push((Box::new(cond), block)); 62 | Expr_::If(ifs) 63 | } 64 | }, 65 | } 66 | 67 | _if: Expr { 68 | If _cond_block[x] => x 69 | } 70 | 71 | _elif: Expr { 72 | // Else if 73 | _if[l_ifs] ElseIf _cond_block[l_preceding_ifs] => Expr { 74 | span: span!(), 75 | node: merge_if_vecs(l_ifs.node, l_preceding_ifs.node), 76 | }, 77 | // Else if else if 78 | _elif[l_ifs] ElseIf _cond_block[l_preceding_ifs] => Expr { 79 | span: span!(), 80 | node: merge_if_vecs(l_ifs.node, l_preceding_ifs.node), 81 | }, 82 | } 83 | 84 | _else: Expr { 85 | // if else 86 | _if[l_ifs] Else LBrace statements[l_else_block] RBrace => Expr { 87 | span: span!(), 88 | node: { 89 | let mut l_new = match l_ifs.node { 90 | Expr_::If(l_ifs) => l_ifs, 91 | _ => unreachable!(), 92 | }; 93 | let true_expr = Expr { span: span!(), node: Expr_::Bool(true) }; 94 | l_new.push((Box::new(true_expr), l_else_block)); 95 | Expr_::If(l_new) 96 | } 97 | }, 98 | // else if else 99 | _elif[l_ifs] Else LBrace statements[l_else_block] RBrace => Expr { 100 | span: span!(), 101 | node: { 102 | let mut l_new = match l_ifs.node { 103 | Expr_::If(l_ifs) => l_ifs, 104 | _ => unreachable!(), 105 | }; 106 | let true_expr = Expr { span: span!(), node: Expr_::Bool(true) }; 107 | l_new.push((Box::new(true_expr), l_else_block)); 108 | Expr_::If(l_new) 109 | } 110 | }, 111 | } 112 | 113 | ret: Expr { 114 | Return assign[a] => Expr { 115 | span: span!(), 116 | node: Expr_::Return(Box::new(a)), 117 | }, 118 | assign[x] => x, 119 | } 120 | 121 | assign: Expr { 122 | Ident(var) Assign binop[rhs] => Expr { 123 | span: span!(), 124 | node: Expr_::Assign(var, Box::new(rhs)), 125 | }, 126 | binop[x] => x 127 | } 128 | 129 | binop: Expr { 130 | binop[lhs] Plus func[rhs] => Expr { 131 | span: span!(), 132 | node: Expr_::Add(Box::new(lhs), Box::new(rhs)), 133 | }, 134 | binop[lhs] Minus func[rhs] => Expr { 135 | span: span!(), 136 | node: Expr_::Sub(Box::new(lhs), Box::new(rhs)), 137 | }, 138 | binop[lhs] Star func[rhs] => Expr { 139 | span: span!(), 140 | node: Expr_::Mul(Box::new(lhs), Box::new(rhs)), 141 | }, 142 | binop[lhs] Slash func[rhs] => Expr { 143 | span: span!(), 144 | node: Expr_::Div(Box::new(lhs), Box::new(rhs)), 145 | }, 146 | binop[lhs] Percent func[rhs] => Expr { 147 | span: span!(), 148 | node: Expr_::Mod(Box::new(lhs), Box::new(rhs)), 149 | }, 150 | binop[lhs] Equals func[rhs] => Expr { 151 | span: span!(), 152 | node: Expr_::Equal(Box::new(lhs), Box::new(rhs)), 153 | }, 154 | func[x] => x 155 | } 156 | 157 | func: Expr { 158 | Ident(fname) LParen _inner_list[l] RParen => Expr { 159 | span: span!(), 160 | node: Expr_::Function(fname, Box::new(l)), 161 | }, 162 | Ident(fname) LParen RParen => Expr { 163 | span: span!(), 164 | node: { 165 | let empty_expr_list = Expr { 166 | span: span!(), 167 | node: Expr_::List(Vec::new()) 168 | }; 169 | Expr_::Function(fname, Box::new(empty_expr_list)) 170 | }, 171 | }, 172 | object[o] => o, 173 | } 174 | 175 | object: Expr { 176 | LBrace dict[d] RBrace => d, 177 | LBrace RBrace => Expr { 178 | span: span!(), 179 | node: { 180 | Expr_::Dict(HashMap::new()) 181 | } 182 | }, 183 | list[l] => l 184 | } 185 | 186 | list: Expr { 187 | LBracket _inner_list[l] RBracket => l, 188 | LBracket RBracket => Expr { 189 | span: span!(), 190 | node: { 191 | Expr_::List(Vec::new()) 192 | } 193 | }, 194 | atom[a] => a 195 | } 196 | 197 | _inner_list: Expr { 198 | binop[o] => Expr { 199 | span: span!(), 200 | node: { 201 | let mut list = Vec::new(); 202 | list.push(o); 203 | Expr_::List(list) 204 | } 205 | }, 206 | _inner_list[l] Comma binop[o] => Expr { 207 | span: span!(), 208 | node: { 209 | match l.node { 210 | Expr_::List(mut l) => { 211 | l.push(o); 212 | Expr_::List(l) 213 | }, 214 | _ => unreachable!(), 215 | } 216 | } 217 | }, 218 | } 219 | 220 | dict: Expr { 221 | String(k) Colon binop[v] => Expr { 222 | span: span!(), 223 | node: { 224 | let mut dict = HashMap::new(); 225 | dict.insert(k, v); 226 | Expr_::Dict(dict) 227 | } 228 | }, 229 | dict[d] Comma String(k) Colon binop[v] => Expr { 230 | span: span!(), 231 | node: { 232 | match d.node { 233 | Expr_::Dict(mut d) => { 234 | d.insert(k, v); 235 | Expr_::Dict(d) 236 | }, 237 | _ => unreachable!(), 238 | } 239 | } 240 | }, 241 | } 242 | 243 | atom: Expr { 244 | // round brackets to destructure tokens 245 | Ident(v) => Expr { 246 | span: span!(), 247 | node: Expr_::Var(v), 248 | }, 249 | Bool(b) => Expr { 250 | span: span!(), 251 | node: Expr_::Bool(b), 252 | }, 253 | Number(i) => Expr { 254 | span: span!(), 255 | node: Expr_::Number(i), 256 | }, 257 | String(s) => Expr { 258 | span: span!(), 259 | node: Expr_::String(s), 260 | }, 261 | LParen binop[x] RParen => x 262 | } 263 | } 264 | 265 | pub type ParseError = (Option<(Token, Span)>, &'static str); 266 | 267 | pub fn parse>(i: I) -> Result { 268 | parse_(i) 269 | } 270 | -------------------------------------------------------------------------------- /aw-server.service: -------------------------------------------------------------------------------- 1 | ########################## 2 | # aw-server-rust.service # 3 | ########################## 4 | # 5 | # 1. Build and package aw-server-rust and aw-webui with "make" 6 | # 2. Install aw-server-rust with "sudo make install" 7 | # 3. Run 'systemctl --user daemon-reload' to make systemd load in the new service file 8 | # 4. Run 'systemctl --user start aw-server-rust.service' to start aw-server-rust 9 | # 5. (Optional) Run 'systemctl --user enable aw-server-rust.service' to always start aw-server-rust when you log in 10 | # 11 | # Now aw-server-rust should be running, you can now start whatever 12 | # ActivityWatch watchers you desire and can find the WebUI at 13 | # http://localhost:5600 14 | # 15 | 16 | [Service] 17 | Type=notify 18 | ExecStart=aw-server 19 | 20 | [Unit] 21 | Description=ActivityWatch Server (Rust implementation) 22 | After=network.target 23 | 24 | [Install] 25 | WantedBy=default.target 26 | -------------------------------------------------------------------------------- /aw-server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aw-server" 3 | version = "0.13.1" 4 | authors = ["Johan Bjäreholt ", "Erik Bjäreholt "] 5 | edition = "2021" 6 | 7 | [lib] 8 | name = "aw_server" 9 | crate-type = ["lib", "cdylib"] 10 | path = "src/lib.rs" 11 | 12 | [[bin]] 13 | name = "aw-server" 14 | path = "src/main.rs" 15 | 16 | [dependencies] 17 | rocket = { version = "0.5.0", features = ["json"] } 18 | rocket_cors = { version = "0.6.0" } 19 | serde = { version = "1.0", features = ["derive"] } 20 | serde_json = "1.0" 21 | chrono = { version = "0.4", features = ["serde"] } 22 | appdirs = "0.2.0" 23 | lazy_static = "1.4" 24 | log = "0.4" 25 | fern = { version = "0.7", features = ["colored"] } 26 | toml = "0.8" 27 | gethostname = "0.4" 28 | uuid = { version = "1.3", features = ["serde", "v4"] } 29 | clap = { version = "4.1", features = ["derive", "cargo"] } 30 | log-panics = { version = "2", features = ["with-backtrace"]} 31 | rust-embed = { version = "8.0.0", features = ["interpolate-folder-path", "debug-embed"] } 32 | 33 | aw-datastore = { path = "../aw-datastore" } 34 | aw-models = { path = "../aw-models" } 35 | aw-transform = { path = "../aw-transform" } 36 | aw-query = { path = "../aw-query" } 37 | 38 | [target.'cfg(target_os="linux")'.dependencies] 39 | sd-notify = "0.4.2" 40 | 41 | [target.'cfg(all(target_os="linux", target_arch="x86"))'.dependencies] 42 | jemallocator = "0.5.0" 43 | 44 | [target.'cfg(target_os="android")'.dependencies] 45 | jni = { version = "0.20", default-features = false } 46 | libc = "0.2" 47 | android_logger = "0.13" 48 | openssl-sys = { version = "0.9.82", features = ["vendored"]} 49 | -------------------------------------------------------------------------------- /aw-server/build.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | 3 | fn main() -> Result<(), Box> { 4 | let webui_var = std::env::var("AW_WEBUI_DIR"); 5 | let path = if let Ok(var_path) = &webui_var { 6 | std::path::Path::new(var_path) 7 | } else { 8 | let path = std::path::Path::new("../aw-webui/dist"); 9 | // ensure folder exists, since macro requires it 10 | std::fs::create_dir_all(path)?; 11 | println!("cargo:rustc-env=AW_WEBUI_DIR={}", path.display()); 12 | path 13 | }; 14 | 15 | let path_index = path.join("index.html"); 16 | if !path_index.exists() { 17 | println!( 18 | "cargo:warning=`{}` is not built, compiling without webui", 19 | path.display() 20 | ); 21 | } 22 | 23 | // Rebuild if the webui directory changes 24 | println!("cargo:rerun-if-env-changed=AW_WEBUI_DIR"); 25 | if webui_var.is_ok() { 26 | println!("cargo:rerun-if-changed={}", webui_var.unwrap()); 27 | } 28 | 29 | Ok(()) 30 | } 31 | -------------------------------------------------------------------------------- /aw-server/src/config.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::io::{Read, Write}; 3 | 4 | use rocket::config::Config; 5 | use rocket::data::{Limits, ToByteUnit}; 6 | use rocket::log::LogLevel; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | use crate::dirs; 10 | 11 | // Far from an optimal way to solve it, but works and is simple 12 | static mut TESTING: bool = true; 13 | pub fn set_testing(testing: bool) { 14 | unsafe { 15 | TESTING = testing; 16 | } 17 | } 18 | pub fn is_testing() -> bool { 19 | unsafe { TESTING } 20 | } 21 | 22 | #[derive(Serialize, Deserialize)] 23 | pub struct AWConfig { 24 | #[serde(default = "default_address")] 25 | pub address: String, 26 | 27 | #[serde(default = "default_port")] 28 | pub port: u16, 29 | 30 | #[serde(skip, default = "default_testing")] 31 | pub testing: bool, // This is not written to the config file (serde(skip)) 32 | 33 | #[serde(default = "default_cors")] 34 | pub cors: Vec, 35 | 36 | // A mapping of watcher names to paths where the 37 | // custom visualizations are located. 38 | #[serde(default = "default_custom_static")] 39 | pub custom_static: std::collections::HashMap, 40 | } 41 | 42 | impl Default for AWConfig { 43 | fn default() -> AWConfig { 44 | AWConfig { 45 | address: default_address(), 46 | port: default_port(), 47 | testing: default_testing(), 48 | cors: default_cors(), 49 | custom_static: default_custom_static(), 50 | } 51 | } 52 | } 53 | 54 | impl AWConfig { 55 | pub fn to_rocket_config(&self) -> rocket::Config { 56 | let mut config; 57 | if self.testing { 58 | config = Config::debug_default(); 59 | config.log_level = LogLevel::Debug; 60 | } else { 61 | config = Config::release_default() 62 | }; 63 | 64 | // Needed for bucket imports 65 | let limits = Limits::default() 66 | .limit("json", 1000u64.megabytes()) 67 | .limit("data-form", 1000u64.megabytes()); 68 | 69 | config.address = self.address.parse().unwrap(); 70 | config.port = self.port; 71 | config.keep_alive = 0; 72 | config.limits = limits; 73 | 74 | config 75 | } 76 | } 77 | 78 | fn default_address() -> String { 79 | "127.0.0.1".to_string() 80 | } 81 | 82 | fn default_cors() -> Vec { 83 | Vec::::new() 84 | } 85 | 86 | fn default_testing() -> bool { 87 | is_testing() 88 | } 89 | 90 | fn default_port() -> u16 { 91 | if is_testing() { 92 | 5666 93 | } else { 94 | 5600 95 | } 96 | } 97 | 98 | fn default_custom_static() -> std::collections::HashMap { 99 | std::collections::HashMap::new() 100 | } 101 | 102 | pub fn create_config(testing: bool) -> AWConfig { 103 | set_testing(testing); 104 | let mut config_path = dirs::get_config_dir().unwrap(); 105 | if !testing { 106 | config_path.push("config.toml") 107 | } else { 108 | config_path.push("config-testing.toml") 109 | } 110 | 111 | /* If there is no config file, create a new config file with default values but every value is 112 | * commented out by default in case we would change a default value at some point in the future */ 113 | if !config_path.is_file() { 114 | debug!("Writing default commented out config at {:?}", config_path); 115 | let mut wfile = File::create(config_path.clone()).expect("Unable to create config file"); 116 | let default_config = AWConfig::default(); 117 | let default_config_str = 118 | toml::to_string(&default_config).expect("Failed to convert default config to string"); 119 | let mut default_config_str_commented = String::new(); 120 | default_config_str_commented.push_str("### DEFAULT SETTINGS ###\n"); 121 | for line in default_config_str.lines() { 122 | default_config_str_commented.push_str(&format!("#{line}\n")); 123 | } 124 | wfile 125 | .write_all(&default_config_str_commented.into_bytes()) 126 | .expect("Failed to write config to file"); 127 | wfile.sync_all().expect("Unable to sync config file"); 128 | } 129 | 130 | debug!("Reading config at {:?}", config_path); 131 | let mut rfile = File::open(config_path).expect("Failed to open config file for reading"); 132 | let mut content = String::new(); 133 | rfile 134 | .read_to_string(&mut content) 135 | .expect("Failed to read config as a string"); 136 | let aw_config: AWConfig = toml::from_str(&content).expect("Failed to parse config file"); 137 | 138 | aw_config 139 | } 140 | -------------------------------------------------------------------------------- /aw-server/src/device_id.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | 3 | use uuid::Uuid; 4 | 5 | use crate::dirs; 6 | 7 | /// Retrieves the device ID, if none exists it generates one (using UUID v4) 8 | pub fn get_device_id() -> String { 9 | // I chose get_data_dir over get_config_dir since the latter isn't yet supported on Android. 10 | let mut path = dirs::get_data_dir().unwrap(); 11 | path.push("device_id"); 12 | if path.exists() { 13 | fs::read_to_string(path).unwrap() 14 | } else { 15 | let uuid = Uuid::new_v4().as_hyphenated().to_string(); 16 | fs::write(path, &uuid).unwrap(); 17 | uuid 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /aw-server/src/dirs.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | #[cfg(not(target_os = "android"))] 4 | use std::fs; 5 | 6 | #[cfg(target_os = "android")] 7 | use std::sync::Mutex; 8 | 9 | #[cfg(target_os = "android")] 10 | lazy_static! { 11 | static ref ANDROID_DATA_DIR: Mutex = Mutex::new(PathBuf::from( 12 | "/data/user/0/net.activitywatch.android/files" 13 | )); 14 | } 15 | 16 | #[cfg(not(target_os = "android"))] 17 | pub fn get_config_dir() -> Result { 18 | let mut dir = appdirs::user_config_dir(Some("activitywatch"), None, false)?; 19 | dir.push("aw-server-rust"); 20 | fs::create_dir_all(dir.clone()).expect("Unable to create config dir"); 21 | Ok(dir) 22 | } 23 | 24 | #[cfg(target_os = "android")] 25 | pub fn get_config_dir() -> Result { 26 | panic!("not implemented on Android"); 27 | } 28 | 29 | #[cfg(not(target_os = "android"))] 30 | pub fn get_data_dir() -> Result { 31 | let mut dir = appdirs::user_data_dir(Some("activitywatch"), None, false)?; 32 | dir.push("aw-server-rust"); 33 | fs::create_dir_all(dir.clone()).expect("Unable to create data dir"); 34 | Ok(dir) 35 | } 36 | 37 | #[cfg(target_os = "android")] 38 | pub fn get_data_dir() -> Result { 39 | return Ok(ANDROID_DATA_DIR.lock().unwrap().to_path_buf()); 40 | } 41 | 42 | #[cfg(not(target_os = "android"))] 43 | pub fn get_cache_dir() -> Result { 44 | let mut dir = appdirs::user_cache_dir(Some("activitywatch"), None)?; 45 | dir.push("aw-server-rust"); 46 | fs::create_dir_all(dir.clone()).expect("Unable to create cache dir"); 47 | Ok(dir) 48 | } 49 | 50 | #[cfg(target_os = "android")] 51 | pub fn get_cache_dir() -> Result { 52 | panic!("not implemented on Android"); 53 | } 54 | 55 | #[cfg(not(target_os = "android"))] 56 | pub fn get_log_dir(module: &str) -> Result { 57 | let mut dir = appdirs::user_log_dir(Some("activitywatch"), None)?; 58 | dir.push(module); 59 | fs::create_dir_all(dir.clone()).expect("Unable to create log dir"); 60 | Ok(dir) 61 | } 62 | 63 | #[cfg(target_os = "android")] 64 | pub fn get_log_dir(module: &str) -> Result { 65 | panic!("not implemented on Android"); 66 | } 67 | 68 | pub fn db_path(testing: bool) -> Result { 69 | let mut db_path = get_data_dir()?; 70 | if testing { 71 | db_path.push("sqlite-testing.db"); 72 | } else { 73 | db_path.push("sqlite.db"); 74 | } 75 | Ok(db_path) 76 | } 77 | 78 | #[cfg(target_os = "android")] 79 | pub fn set_android_data_dir(path: &str) { 80 | let mut android_data_dir = ANDROID_DATA_DIR.lock().unwrap(); 81 | *android_data_dir = PathBuf::from(path); 82 | } 83 | 84 | #[test] 85 | fn test_get_dirs() { 86 | #[cfg(target_os = "android")] 87 | set_android_data_dir("/test"); 88 | 89 | get_cache_dir().unwrap(); 90 | get_log_dir("aw-server-rust").unwrap(); 91 | db_path(true).unwrap(); 92 | db_path(false).unwrap(); 93 | } 94 | -------------------------------------------------------------------------------- /aw-server/src/endpoints/bucket.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use gethostname::gethostname; 4 | use rocket::serde::json::Json; 5 | 6 | use chrono::DateTime; 7 | use chrono::Utc; 8 | 9 | use aw_models::Bucket; 10 | use aw_models::BucketsExport; 11 | use aw_models::Event; 12 | use aw_models::TryVec; 13 | 14 | use rocket::http::Status; 15 | use rocket::State; 16 | 17 | use crate::endpoints::util::BucketsExportRocket; 18 | use crate::endpoints::{HttpErrorJson, ServerState}; 19 | 20 | #[get("/")] 21 | pub fn buckets_get( 22 | state: &State, 23 | ) -> Result>, HttpErrorJson> { 24 | let datastore = endpoints_get_lock!(state.datastore); 25 | match datastore.get_buckets() { 26 | Ok(bucketlist) => Ok(Json(bucketlist)), 27 | Err(err) => Err(err.into()), 28 | } 29 | } 30 | 31 | #[get("/")] 32 | pub fn bucket_get( 33 | bucket_id: &str, 34 | state: &State, 35 | ) -> Result, HttpErrorJson> { 36 | let datastore = endpoints_get_lock!(state.datastore); 37 | match datastore.get_bucket(&bucket_id) { 38 | Ok(bucket) => Ok(Json(bucket)), 39 | Err(e) => Err(e.into()), 40 | } 41 | } 42 | 43 | /// Create a new bucket 44 | /// 45 | /// If hostname is "!local", the hostname and device_id will be set from the server info. 46 | /// This is useful for watchers which are known/assumed to run locally but might not know their hostname (like aw-watcher-web). 47 | #[post("/", data = "", format = "application/json")] 48 | pub fn bucket_new( 49 | bucket_id: &str, 50 | message: Json, 51 | state: &State, 52 | ) -> Result<(), HttpErrorJson> { 53 | let mut bucket = message.into_inner(); 54 | if bucket.id != bucket_id { 55 | bucket.id = bucket_id.to_string(); 56 | } 57 | if bucket.hostname == "!local" { 58 | bucket.hostname = gethostname() 59 | .into_string() 60 | .unwrap_or_else(|_| "unknown".to_string()); 61 | bucket 62 | .data 63 | .insert("device_id".to_string(), state.device_id.clone().into()); 64 | } 65 | let datastore = endpoints_get_lock!(state.datastore); 66 | let ret = datastore.create_bucket(&bucket); 67 | match ret { 68 | Ok(_) => Ok(()), 69 | Err(err) => Err(err.into()), 70 | } 71 | } 72 | 73 | #[get("//events?&&")] 74 | pub fn bucket_events_get( 75 | bucket_id: &str, 76 | start: Option, 77 | end: Option, 78 | limit: Option, 79 | state: &State, 80 | ) -> Result>, HttpErrorJson> { 81 | let starttime: Option> = match start { 82 | Some(dt_str) => match DateTime::parse_from_rfc3339(&dt_str) { 83 | Ok(dt) => Some(dt.with_timezone(&Utc)), 84 | Err(e) => { 85 | let err_msg = format!( 86 | "Failed to parse starttime, datetime needs to be in rfc3339 format: {e}" 87 | ); 88 | warn!("{}", err_msg); 89 | return Err(HttpErrorJson::new(Status::BadRequest, err_msg)); 90 | } 91 | }, 92 | None => None, 93 | }; 94 | let endtime: Option> = match end { 95 | Some(dt_str) => match DateTime::parse_from_rfc3339(&dt_str) { 96 | Ok(dt) => Some(dt.with_timezone(&Utc)), 97 | Err(e) => { 98 | let err_msg = 99 | format!("Failed to parse endtime, datetime needs to be in rfc3339 format: {e}"); 100 | warn!("{}", err_msg); 101 | return Err(HttpErrorJson::new(Status::BadRequest, err_msg)); 102 | } 103 | }, 104 | None => None, 105 | }; 106 | let datastore = endpoints_get_lock!(state.datastore); 107 | let res = datastore.get_events(bucket_id, starttime, endtime, limit); 108 | match res { 109 | Ok(events) => Ok(Json(events)), 110 | Err(err) => Err(err.into()), 111 | } 112 | } 113 | 114 | // Needs unused parameter, otherwise there'll be a route collision 115 | // See: https://api.rocket.rs/master/rocket/struct.Route.html#resolving-collisions 116 | #[get("//events/?<_unused..>")] 117 | pub fn bucket_events_get_single( 118 | bucket_id: &str, 119 | event_id: i64, 120 | _unused: Option, 121 | state: &State, 122 | ) -> Result, HttpErrorJson> { 123 | let datastore = endpoints_get_lock!(state.datastore); 124 | let res = datastore.get_event(bucket_id, event_id); 125 | match res { 126 | Ok(events) => Ok(Json(events)), 127 | Err(err) => Err(err.into()), 128 | } 129 | } 130 | 131 | #[post("//events", data = "", format = "application/json")] 132 | pub fn bucket_events_create( 133 | bucket_id: &str, 134 | events: Json>, 135 | state: &State, 136 | ) -> Result>, HttpErrorJson> { 137 | let datastore = endpoints_get_lock!(state.datastore); 138 | let res = datastore.insert_events(bucket_id, &events); 139 | match res { 140 | Ok(events) => Ok(Json(events)), 141 | Err(err) => Err(err.into()), 142 | } 143 | } 144 | 145 | #[post( 146 | "//heartbeat?", 147 | data = "", 148 | format = "application/json" 149 | )] 150 | pub fn bucket_events_heartbeat( 151 | bucket_id: &str, 152 | heartbeat_json: Json, 153 | pulsetime: f64, 154 | state: &State, 155 | ) -> Result, HttpErrorJson> { 156 | let heartbeat = heartbeat_json.into_inner(); 157 | let datastore = endpoints_get_lock!(state.datastore); 158 | match datastore.heartbeat(bucket_id, heartbeat, pulsetime) { 159 | Ok(e) => Ok(Json(e)), 160 | Err(err) => Err(err.into()), 161 | } 162 | } 163 | 164 | #[get("//events/count")] 165 | pub fn bucket_event_count( 166 | bucket_id: &str, 167 | state: &State, 168 | ) -> Result, HttpErrorJson> { 169 | let datastore = endpoints_get_lock!(state.datastore); 170 | let res = datastore.get_event_count(bucket_id, None, None); 171 | match res { 172 | Ok(eventcount) => Ok(Json(eventcount as u64)), 173 | Err(err) => Err(err.into()), 174 | } 175 | } 176 | 177 | #[delete("//events/")] 178 | pub fn bucket_events_delete_by_id( 179 | bucket_id: &str, 180 | event_id: i64, 181 | state: &State, 182 | ) -> Result<(), HttpErrorJson> { 183 | let datastore = endpoints_get_lock!(state.datastore); 184 | match datastore.delete_events_by_id(bucket_id, vec![event_id]) { 185 | Ok(_) => Ok(()), 186 | Err(err) => Err(err.into()), 187 | } 188 | } 189 | 190 | #[get("//export")] 191 | pub fn bucket_export( 192 | bucket_id: &str, 193 | state: &State, 194 | ) -> Result { 195 | let datastore = endpoints_get_lock!(state.datastore); 196 | let mut export = BucketsExport { 197 | buckets: HashMap::new(), 198 | }; 199 | let mut bucket = match datastore.get_bucket(bucket_id) { 200 | Ok(bucket) => bucket, 201 | Err(err) => return Err(err.into()), 202 | }; 203 | /* TODO: Replace expect with http error */ 204 | let events = datastore 205 | .get_events(bucket_id, None, None, None) 206 | .expect("Failed to get events for bucket"); 207 | bucket.events = Some(TryVec::new(events)); 208 | export.buckets.insert(bucket_id.into(), bucket); 209 | 210 | Ok(export.into()) 211 | } 212 | 213 | #[delete("/")] 214 | pub fn bucket_delete(bucket_id: &str, state: &State) -> Result<(), HttpErrorJson> { 215 | let datastore = endpoints_get_lock!(state.datastore); 216 | match datastore.delete_bucket(bucket_id) { 217 | Ok(_) => Ok(()), 218 | Err(err) => Err(err.into()), 219 | } 220 | } 221 | -------------------------------------------------------------------------------- /aw-server/src/endpoints/cors.rs: -------------------------------------------------------------------------------- 1 | use rocket::http::Method; 2 | use rocket_cors::{AllowedHeaders, AllowedOrigins}; 3 | 4 | use crate::config::AWConfig; 5 | 6 | pub fn cors(config: &AWConfig) -> rocket_cors::Cors { 7 | let root_url = format!("http://127.0.0.1:{}", config.port); 8 | let root_url_localhost = format!("http://localhost:{}", config.port); 9 | let mut allowed_exact_origins = vec![root_url, root_url_localhost]; 10 | allowed_exact_origins.extend(config.cors.clone()); 11 | 12 | if config.testing { 13 | allowed_exact_origins.push("http://127.0.0.1:27180".to_string()); 14 | allowed_exact_origins.push("http://localhost:27180".to_string()); 15 | } 16 | let mut allowed_regex_origins = vec![ 17 | "chrome-extension://nglaklhklhcoonedhgnpgddginnjdadi".to_string(), 18 | // Every version of a mozilla extension has its own ID to avoid fingerprinting, so we 19 | // unfortunately have to allow all extensions to have access to aw-server 20 | "moz-extension://.*".to_string(), 21 | ]; 22 | if config.testing { 23 | allowed_regex_origins.push("chrome-extension://.*".to_string()); 24 | } 25 | 26 | let allowed_origins = AllowedOrigins::some(&allowed_exact_origins, &allowed_regex_origins); 27 | let allowed_methods = vec![Method::Get, Method::Post, Method::Delete] 28 | .into_iter() 29 | .map(From::from) 30 | .collect(); 31 | let allowed_headers = AllowedHeaders::all(); // TODO: is this unsafe? 32 | 33 | // You can also deserialize this 34 | rocket_cors::CorsOptions { 35 | allowed_origins, 36 | allowed_methods, 37 | allowed_headers, 38 | allow_credentials: false, 39 | ..Default::default() 40 | } 41 | .to_cors() 42 | .expect("Failed to set up CORS") 43 | } 44 | -------------------------------------------------------------------------------- /aw-server/src/endpoints/export.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use rocket::State; 4 | 5 | use aw_models::BucketsExport; 6 | use aw_models::TryVec; 7 | 8 | use crate::endpoints::util::BucketsExportRocket; 9 | use crate::endpoints::{HttpErrorJson, ServerState}; 10 | 11 | #[get("/")] 12 | pub fn buckets_export(state: &State) -> Result { 13 | let datastore = endpoints_get_lock!(state.datastore); 14 | let mut export = BucketsExport { 15 | buckets: HashMap::new(), 16 | }; 17 | let mut buckets = match datastore.get_buckets() { 18 | Ok(buckets) => buckets, 19 | Err(err) => return Err(err.into()), 20 | }; 21 | for (bid, mut bucket) in buckets.drain() { 22 | let events = match datastore.get_events(&bid, None, None, None) { 23 | Ok(events) => events, 24 | Err(err) => return Err(err.into()), 25 | }; 26 | bucket.events = Some(TryVec::new(events)); 27 | export.buckets.insert(bid, bucket); 28 | } 29 | 30 | Ok(export.into()) 31 | } 32 | -------------------------------------------------------------------------------- /aw-server/src/endpoints/hostcheck.rs: -------------------------------------------------------------------------------- 1 | //! Host header check needs to be performed to protect against DNS poisoning 2 | //! attacks[1]. 3 | //! 4 | //! Uses a Request Fairing to intercept the request before it's handled. 5 | //! If the Host header is not valid, the request will be rerouted to a 6 | //! BadRequest 7 | //! 8 | //! [1]: https://github.com/ActivityWatch/activitywatch/security/advisories/GHSA-v9fg-6g9j-h4x4 9 | use rocket::fairing::Fairing; 10 | use rocket::http::uri::Origin; 11 | use rocket::http::{Method, Status}; 12 | use rocket::route::Outcome; 13 | use rocket::{Data, Request, Rocket, Route}; 14 | 15 | use crate::config::AWConfig; 16 | use crate::endpoints::HttpErrorJson; 17 | 18 | static FAIRING_ROUTE_BASE: &str = "/checkheader_fairing"; 19 | 20 | pub struct HostCheck { 21 | validate: bool, 22 | } 23 | 24 | impl HostCheck { 25 | pub fn new(config: &AWConfig) -> HostCheck { 26 | // We only validate requests if the server binds a local address 27 | let validate = config.address == "127.0.0.1" || config.address == "localhost"; 28 | HostCheck { validate } 29 | } 30 | } 31 | 32 | /// Create a `Handler` for Fairing error handling 33 | #[derive(Clone)] 34 | struct FairingErrorRoute {} 35 | 36 | #[rocket::async_trait] 37 | impl rocket::route::Handler for FairingErrorRoute { 38 | async fn handle<'r>( 39 | &self, 40 | request: &'r Request<'_>, 41 | _: rocket::Data<'r>, 42 | ) -> rocket::route::Outcome<'r> { 43 | let err = HttpErrorJson::new(Status::BadRequest, "Host header is invalid".to_string()); 44 | Outcome::from(request, err) 45 | } 46 | } 47 | 48 | /// Create a new `Route` for Fairing handling 49 | fn fairing_route() -> Route { 50 | Route::ranked(1, Method::Get, "/", FairingErrorRoute {}) 51 | } 52 | 53 | fn redirect_bad_request(request: &mut Request) { 54 | let uri = FAIRING_ROUTE_BASE.to_string(); 55 | let origin = Origin::parse_owned(uri).unwrap(); 56 | request.set_method(Method::Get); 57 | request.set_uri(origin); 58 | } 59 | 60 | #[rocket::async_trait] 61 | impl Fairing for HostCheck { 62 | fn info(&self) -> rocket::fairing::Info { 63 | rocket::fairing::Info { 64 | name: "HostCheck", 65 | kind: rocket::fairing::Kind::Ignite | rocket::fairing::Kind::Request, 66 | } 67 | } 68 | 69 | async fn on_ignite(&self, rocket: Rocket) -> rocket::fairing::Result { 70 | match self.validate { 71 | true => Ok(rocket.mount(FAIRING_ROUTE_BASE, vec![fairing_route()])), 72 | false => { 73 | warn!("Host header validation is turned off, this is a security risk"); 74 | Ok(rocket) 75 | } 76 | } 77 | } 78 | 79 | async fn on_request(&self, request: &mut Request<'_>, _: &mut Data<'_>) { 80 | if !self.validate { 81 | // host header check is disabled 82 | return; 83 | } 84 | 85 | // Fetch header 86 | let hostheader_opt = request.headers().get_one("host"); 87 | if hostheader_opt.is_none() { 88 | info!("Missing 'Host' header, denying request"); 89 | redirect_bad_request(request); 90 | return; 91 | } 92 | 93 | // Parse hostname from host header 94 | // hostname contains port, which we don't care about and filter out 95 | let hostheader = hostheader_opt.unwrap(); 96 | let host_opt = hostheader.split(':').next(); 97 | if host_opt.is_none() { 98 | info!("Host header '{}' not allowed, denying request", hostheader); 99 | redirect_bad_request(request); 100 | return; 101 | } 102 | 103 | // Deny requests to hosts that are not localhost 104 | let valid_hosts: Vec<&str> = vec!["127.0.0.1", "localhost"]; 105 | let host = host_opt.unwrap(); 106 | if !valid_hosts.contains(&host) { 107 | info!("Host header '{}' not allowed, denying request", hostheader); 108 | redirect_bad_request(request); 109 | } 110 | 111 | // host header is verified, proceed with request 112 | } 113 | } 114 | 115 | #[cfg(test)] 116 | mod tests { 117 | use std::sync::Mutex; 118 | 119 | use rocket::http::{ContentType, Header, Status}; 120 | use rocket::Rocket; 121 | 122 | use crate::config::AWConfig; 123 | use crate::endpoints; 124 | 125 | fn setup_testserver(address: String) -> Rocket { 126 | let state = endpoints::ServerState { 127 | datastore: Mutex::new(aw_datastore::Datastore::new_in_memory(false)), 128 | asset_resolver: endpoints::AssetResolver::new(None), 129 | device_id: "test_id".to_string(), 130 | }; 131 | let mut aw_config = AWConfig::default(); 132 | aw_config.address = address; 133 | endpoints::build_rocket(state, aw_config) 134 | } 135 | 136 | #[test] 137 | fn test_public_address() { 138 | let server = setup_testserver("0.0.0.0".to_string()); 139 | let client = rocket::local::blocking::Client::tracked(server).expect("valid instance"); 140 | 141 | // When a public address is used, request should always pass, regardless 142 | // if the Host header is missing 143 | let res = client 144 | .get("/api/0/info") 145 | .header(ContentType::JSON) 146 | .dispatch(); 147 | assert_eq!(res.status(), Status::Ok); 148 | } 149 | 150 | #[test] 151 | fn test_localhost_address() { 152 | let server = setup_testserver("127.0.0.1".to_string()); 153 | let client = rocket::local::blocking::Client::tracked(server).expect("valid instance"); 154 | 155 | // If Host header is missing we should get a BadRequest 156 | let res = client 157 | .get("/api/0/info") 158 | .header(ContentType::JSON) 159 | .dispatch(); 160 | assert_eq!(res.status(), Status::BadRequest); 161 | 162 | // If Host header is not 127.0.0.1 or localhost we should get BadRequest 163 | let res = client 164 | .get("/api/0/info") 165 | .header(ContentType::JSON) 166 | .header(Header::new("Host", "192.168.0.1:1234")) 167 | .dispatch(); 168 | assert_eq!(res.status(), Status::BadRequest); 169 | 170 | // If Host header is 127.0.0.1:5600 we should get OK 171 | let res = client 172 | .get("/api/0/info") 173 | .header(ContentType::JSON) 174 | .header(Header::new("Host", "127.0.0.1:5600")) 175 | .dispatch(); 176 | assert_eq!(res.status(), Status::Ok); 177 | 178 | // If Host header is localhost:5600 we should get OK 179 | let res = client 180 | .get("/api/0/info") 181 | .header(ContentType::JSON) 182 | .header(Header::new("Host", "localhost:5600")) 183 | .dispatch(); 184 | assert_eq!(res.status(), Status::Ok); 185 | 186 | // If Host header is missing port, we should still get OK 187 | let res = client 188 | .get("/api/0/info") 189 | .header(ContentType::JSON) 190 | .header(Header::new("Host", "localhost")) 191 | .dispatch(); 192 | assert_eq!(res.status(), Status::Ok); 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /aw-server/src/endpoints/import.rs: -------------------------------------------------------------------------------- 1 | use rocket::form::Form; 2 | use rocket::http::Status; 3 | use rocket::serde::json::Json; 4 | use rocket::State; 5 | 6 | use std::sync::Mutex; 7 | 8 | use aw_models::BucketsExport; 9 | 10 | use aw_datastore::Datastore; 11 | 12 | use crate::endpoints::{HttpErrorJson, ServerState}; 13 | 14 | fn import(datastore_mutex: &Mutex, import: BucketsExport) -> Result<(), HttpErrorJson> { 15 | let datastore = endpoints_get_lock!(datastore_mutex); 16 | for (_bucketname, bucket) in import.buckets { 17 | match datastore.create_bucket(&bucket) { 18 | Ok(_) => (), 19 | Err(e) => { 20 | let err_msg = format!("Failed to import bucket: {e:?}"); 21 | warn!("{}", err_msg); 22 | return Err(HttpErrorJson::new(Status::InternalServerError, err_msg)); 23 | } 24 | } 25 | } 26 | Ok(()) 27 | } 28 | 29 | #[post("/", data = "", format = "application/json")] 30 | pub fn bucket_import_json( 31 | state: &State, 32 | json_data: Json, 33 | ) -> Result<(), HttpErrorJson> { 34 | import(&state.datastore, json_data.into_inner()) 35 | } 36 | 37 | #[derive(FromForm)] 38 | pub struct ImportForm { 39 | // FIXME: In the web-ui the name of this field is buckets.json, but "." is not allowed in field 40 | // names in Rocket and just simply "buckets" seems to work apparently but not sure why. 41 | // FIXME: In aw-server python it will import all fields rather just the one named 42 | // "buckets.json", that should probably be done here as well. 43 | #[field(name = "buckets")] 44 | import: Json, 45 | } 46 | 47 | #[post("/", data = "
", format = "multipart/form-data")] 48 | pub fn bucket_import_form( 49 | state: &State, 50 | form: Form, 51 | ) -> Result<(), HttpErrorJson> { 52 | import(&state.datastore, form.into_inner().import.into_inner()) 53 | } 54 | -------------------------------------------------------------------------------- /aw-server/src/endpoints/mod.rs: -------------------------------------------------------------------------------- 1 | use rust_embed::RustEmbed; 2 | use std::ffi::OsStr; 3 | use std::path::{Path, PathBuf}; 4 | use std::sync::Mutex; 5 | 6 | use gethostname::gethostname; 7 | use rocket::fs::FileServer; 8 | use rocket::http::ContentType; 9 | use rocket::serde::json::Json; 10 | use rocket::State; 11 | 12 | use crate::config::AWConfig; 13 | 14 | use aw_datastore::Datastore; 15 | use aw_models::Info; 16 | 17 | #[derive(RustEmbed)] 18 | #[folder = "$AW_WEBUI_DIR"] 19 | struct EmbeddedAssets; 20 | 21 | pub struct AssetResolver { 22 | asset_path: Option, 23 | } 24 | 25 | impl AssetResolver { 26 | pub fn new(asset_path: Option) -> Self { 27 | Self { asset_path } 28 | } 29 | 30 | fn resolve(&self, file_path: &str) -> Option> { 31 | if let Some(asset_path) = &self.asset_path { 32 | let content = std::fs::read(asset_path.join(file_path)); 33 | if let Ok(data) = content { 34 | return Some(data); 35 | } 36 | } 37 | Some(EmbeddedAssets::get(file_path)?.data.to_vec()) 38 | } 39 | } 40 | 41 | pub struct ServerState { 42 | pub datastore: Mutex, 43 | pub asset_resolver: AssetResolver, 44 | pub device_id: String, 45 | } 46 | 47 | #[macro_use] 48 | mod util; 49 | mod bucket; 50 | mod cors; 51 | mod export; 52 | mod hostcheck; 53 | mod import; 54 | mod query; 55 | mod settings; 56 | 57 | pub use util::HttpErrorJson; 58 | 59 | #[get("/")] 60 | fn root_index(state: &State) -> Option<(ContentType, Vec)> { 61 | get_file("index.html".into(), state) 62 | } 63 | 64 | #[get("/css/")] 65 | fn root_css(file: PathBuf, state: &State) -> Option<(ContentType, Vec)> { 66 | get_file(Path::new("css").join(file), state) 67 | } 68 | 69 | #[get("/fonts/")] 70 | fn root_fonts(file: PathBuf, state: &State) -> Option<(ContentType, Vec)> { 71 | get_file(Path::new("fonts").join(file), state) 72 | } 73 | 74 | #[get("/js/")] 75 | fn root_js(file: PathBuf, state: &State) -> Option<(ContentType, Vec)> { 76 | get_file(Path::new("js").join(file), state) 77 | } 78 | 79 | #[get("/static/")] 80 | fn root_static(file: PathBuf, state: &State) -> Option<(ContentType, Vec)> { 81 | get_file(Path::new("static").join(file), state) 82 | } 83 | 84 | #[get("/favicon.ico")] 85 | fn root_favicon(state: &State) -> Option<(ContentType, Vec)> { 86 | get_file("favicon.ico".into(), state) 87 | } 88 | 89 | #[get("/dark.css")] 90 | fn root_dark(state: &State) -> Option<(ContentType, Vec)> { 91 | get_file("dark.css".into(), state) 92 | } 93 | 94 | #[get("/logo.png")] 95 | fn root_logo(state: &State) -> Option<(ContentType, Vec)> { 96 | get_file("logo.png".into(), state) 97 | } 98 | 99 | #[get("/manifest.json")] 100 | fn root_manifest(state: &State) -> Option<(ContentType, Vec)> { 101 | get_file("manifest.json".into(), state) 102 | } 103 | 104 | #[get("/")] 105 | fn server_info(config: &State, state: &State) -> Json { 106 | #[allow(clippy::or_fun_call)] 107 | let hostname = gethostname().into_string().unwrap_or("unknown".to_string()); 108 | const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION"); 109 | 110 | Json(Info { 111 | hostname, 112 | version: format!("v{} (rust)", VERSION.unwrap_or("(unknown)")), 113 | testing: config.testing, 114 | device_id: state.device_id.clone(), 115 | }) 116 | } 117 | 118 | fn get_file(file: PathBuf, state: &State) -> Option<(ContentType, Vec)> { 119 | let asset = state.asset_resolver.resolve(&file.display().to_string())?; 120 | 121 | let content_type = file 122 | .extension() 123 | .and_then(OsStr::to_str) 124 | .and_then(ContentType::from_extension) 125 | .unwrap_or(ContentType::Bytes); 126 | 127 | Some((content_type, asset)) 128 | } 129 | 130 | pub fn build_rocket(server_state: ServerState, config: AWConfig) -> rocket::Rocket { 131 | info!( 132 | "Starting aw-server-rust at {}:{}", 133 | config.address, config.port 134 | ); 135 | let cors = cors::cors(&config); 136 | let hostcheck = hostcheck::HostCheck::new(&config); 137 | let custom_static = config.custom_static.clone(); 138 | 139 | let mut rocket = rocket::custom(config.to_rocket_config()) 140 | .attach(cors.clone()) 141 | .attach(hostcheck) 142 | .manage(cors) 143 | .manage(server_state) 144 | .manage(config) 145 | .mount( 146 | "/", 147 | routes![ 148 | root_index, 149 | root_favicon, 150 | root_fonts, 151 | root_css, 152 | root_js, 153 | root_static, 154 | // custom static files 155 | root_dark, 156 | root_logo, 157 | root_manifest 158 | ], 159 | ) 160 | .mount("/api/0/info", routes![server_info]) 161 | .mount( 162 | "/api/0/buckets", 163 | routes![ 164 | bucket::bucket_new, 165 | bucket::bucket_delete, 166 | bucket::buckets_get, 167 | bucket::bucket_get, 168 | bucket::bucket_events_get, 169 | bucket::bucket_events_create, 170 | bucket::bucket_events_heartbeat, 171 | bucket::bucket_event_count, 172 | bucket::bucket_events_get_single, 173 | bucket::bucket_events_delete_by_id, 174 | bucket::bucket_export 175 | ], 176 | ) 177 | .mount("/api/0/query", routes![query::query]) 178 | .mount( 179 | "/api/0/import", 180 | routes![import::bucket_import_json, import::bucket_import_form], 181 | ) 182 | .mount("/api/0/export", routes![export::buckets_export]) 183 | .mount( 184 | "/api/0/settings", 185 | routes![ 186 | settings::setting_get, 187 | settings::setting_set, 188 | settings::setting_delete, 189 | settings::settings_get, 190 | ], 191 | ) 192 | .mount("/", rocket_cors::catch_all_options_routes()); 193 | 194 | // for each custom static directory, mount it at the given name 195 | for (name, dir) in custom_static { 196 | info!( 197 | "Serving /pages/{} custom static directory from {}", 198 | name, dir 199 | ); 200 | rocket = rocket.mount(&format!("/pages/{name}"), FileServer::from(dir)); 201 | } 202 | rocket 203 | } 204 | 205 | mod tests { 206 | #[test] 207 | fn test_filesystem_resolver() { 208 | let resolver = super::AssetResolver::new(Some(".".into())); 209 | 210 | let content = resolver.resolve("Cargo.toml").unwrap(); 211 | 212 | assert!(String::from_utf8(content).unwrap().contains("aw-server")); 213 | } 214 | 215 | #[test] 216 | fn test_resolver_without_asset() { 217 | let resolver = super::AssetResolver::new(Some(".".into())); 218 | 219 | let content = resolver.resolve("Cargo.json"); 220 | 221 | assert!(content.is_none()); 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /aw-server/src/endpoints/query.rs: -------------------------------------------------------------------------------- 1 | use rocket::http::Status; 2 | use rocket::serde::json::{json, Json, Value}; 3 | use rocket::State; 4 | 5 | use aw_models::Query; 6 | 7 | use crate::endpoints::{HttpErrorJson, ServerState}; 8 | 9 | #[post("/", data = "", format = "application/json")] 10 | pub fn query(query_req: Json, state: &State) -> Result { 11 | let query_code = query_req.0.query.join("\n"); 12 | let intervals = &query_req.0.timeperiods; 13 | let mut results = Vec::new(); 14 | let datastore = endpoints_get_lock!(state.datastore); 15 | for interval in intervals { 16 | let result = match aw_query::query(&query_code, interval, &datastore) { 17 | Ok(data) => data, 18 | Err(e) => { 19 | warn!("Query failed: {:?}", e); 20 | return Err(HttpErrorJson::new( 21 | Status::InternalServerError, 22 | e.to_string(), 23 | )); 24 | } 25 | }; 26 | results.push(result); 27 | } 28 | Ok(json!(results)) 29 | } 30 | -------------------------------------------------------------------------------- /aw-server/src/endpoints/settings.rs: -------------------------------------------------------------------------------- 1 | use crate::endpoints::ServerState; 2 | use rocket::http::Status; 3 | use rocket::serde::json::Json; 4 | use rocket::State; 5 | use std::collections::HashMap; 6 | use std::sync::MutexGuard; 7 | 8 | use aw_datastore::{Datastore, DatastoreError}; 9 | 10 | use crate::endpoints::HttpErrorJson; 11 | 12 | fn parse_key(key: String) -> Result { 13 | let namespace: String = "settings.".to_string(); 14 | if key.len() >= 128 { 15 | Err(HttpErrorJson::new( 16 | Status::BadRequest, 17 | "Too long key".to_string(), 18 | )) 19 | } else { 20 | Ok(namespace + key.as_str()) 21 | } 22 | } 23 | 24 | #[get("/")] 25 | pub fn settings_get( 26 | state: &State, 27 | ) -> Result>, HttpErrorJson> { 28 | let datastore = endpoints_get_lock!(state.datastore); 29 | let queryresults = match datastore.get_key_values("settings.%") { 30 | Ok(result) => Ok(result), 31 | Err(err) => Err(err.into()), 32 | }; 33 | 34 | match queryresults { 35 | Ok(settings) => { 36 | // strip 'settings.' prefix from keys 37 | let mut map: HashMap = HashMap::new(); 38 | for (key, value) in settings.iter() { 39 | map.insert( 40 | key.strip_prefix("settings.").unwrap_or(key).to_string(), 41 | serde_json::from_str(value.clone().as_str()).unwrap(), 42 | ); 43 | } 44 | Ok(Json(map)) 45 | } 46 | Err(err) => Err(err), 47 | } 48 | } 49 | 50 | #[get("/")] 51 | pub fn setting_get( 52 | state: &State, 53 | key: String, 54 | ) -> Result, HttpErrorJson> { 55 | let setting_key = parse_key(key)?; 56 | let datastore = endpoints_get_lock!(state.datastore); 57 | 58 | match datastore.get_key_value(&setting_key) { 59 | Ok(value) => Ok(Json(serde_json::from_str(&value).unwrap())), 60 | Err(DatastoreError::NoSuchKey(_)) => Ok(Json(serde_json::from_str("null").unwrap())), 61 | Err(err) => Err(err.into()), 62 | } 63 | } 64 | 65 | #[post("/", data = "", format = "application/json")] 66 | pub fn setting_set( 67 | state: &State, 68 | key: String, 69 | value: Json, 70 | ) -> Result { 71 | let setting_key = parse_key(key)?; 72 | let value_str = match serde_json::to_string(&value.0) { 73 | Ok(value) => value, 74 | Err(err) => { 75 | return Err(HttpErrorJson::new( 76 | Status::BadRequest, 77 | format!("Invalid JSON: {}", err), 78 | )) 79 | } 80 | }; 81 | 82 | let datastore: MutexGuard<'_, Datastore> = endpoints_get_lock!(state.datastore); 83 | let result = datastore.set_key_value(&setting_key, &value_str); 84 | 85 | match result { 86 | Ok(_) => Ok(Status::Created), 87 | Err(err) => Err(err.into()), 88 | } 89 | } 90 | 91 | #[delete("/")] 92 | pub fn setting_delete(state: &State, key: String) -> Result<(), HttpErrorJson> { 93 | let setting_key = parse_key(key)?; 94 | 95 | let datastore = endpoints_get_lock!(state.datastore); 96 | let result = datastore.delete_key_value(&setting_key); 97 | 98 | match result { 99 | Ok(_) => Ok(()), 100 | Err(err) => Err(err.into()), 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /aw-server/src/endpoints/util.rs: -------------------------------------------------------------------------------- 1 | use std::io::Cursor; 2 | 3 | use rocket::http::ContentType; 4 | use rocket::http::Header; 5 | use rocket::http::Status; 6 | use rocket::request::Request; 7 | use rocket::response::{self, Responder, Response}; 8 | use serde::Serialize; 9 | 10 | use aw_models::BucketsExport; 11 | 12 | #[derive(Serialize, Debug)] 13 | pub struct HttpErrorJson { 14 | #[serde(skip_serializing)] 15 | status: Status, 16 | message: String, 17 | } 18 | 19 | impl HttpErrorJson { 20 | pub fn new(status: Status, err: String) -> HttpErrorJson { 21 | HttpErrorJson { 22 | status, 23 | message: err, 24 | } 25 | } 26 | } 27 | 28 | impl<'r> Responder<'r, 'static> for HttpErrorJson { 29 | fn respond_to(self, _: &Request) -> response::Result<'static> { 30 | // TODO: Fix unwrap 31 | let body = serde_json::to_string(&self).unwrap(); 32 | Response::build() 33 | .status(self.status) 34 | .sized_body(body.len(), Cursor::new(body)) 35 | .header(ContentType::new("application", "json")) 36 | .ok() 37 | } 38 | } 39 | 40 | pub struct BucketsExportRocket { 41 | inner: BucketsExport, 42 | } 43 | 44 | impl From for BucketsExportRocket { 45 | fn from(val: BucketsExport) -> Self { 46 | BucketsExportRocket { inner: val } 47 | } 48 | } 49 | 50 | impl<'r> Responder<'r, 'static> for BucketsExportRocket { 51 | fn respond_to(self, _: &Request) -> response::Result<'static> { 52 | let body = serde_json::to_string(&self.inner).unwrap(); 53 | let header_content = match self.inner.buckets.len() == 1 { 54 | true => format!( 55 | "attachment; filename=aw-bucket-export_{}.json", 56 | self.inner.buckets.into_keys().next().unwrap() 57 | ), 58 | false => "attachment; filename=aw-buckets-export.json".to_string(), 59 | }; 60 | // TODO: Fix unwrap 61 | Response::build() 62 | .status(Status::Ok) 63 | .header(Header::new("Content-Disposition", header_content)) 64 | .sized_body(body.len(), Cursor::new(body)) 65 | //.header(ContentType::new("application", "json")) 66 | .ok() 67 | } 68 | } 69 | 70 | use aw_datastore::DatastoreError; 71 | 72 | impl From for HttpErrorJson { 73 | fn from(val: DatastoreError) -> Self { 74 | match val { 75 | DatastoreError::NoSuchBucket(bucket_id) => HttpErrorJson::new( 76 | Status::NotFound, 77 | format!("The requested bucket '{bucket_id}' does not exist"), 78 | ), 79 | DatastoreError::BucketAlreadyExists(bucket_id) => HttpErrorJson::new( 80 | Status::NotModified, 81 | format!("Bucket '{bucket_id}' already exists"), 82 | ), 83 | DatastoreError::NoSuchKey(key) => HttpErrorJson::new( 84 | Status::NotFound, 85 | format!("The requested key(s) '{key}' do not exist"), 86 | ), 87 | DatastoreError::MpscError => HttpErrorJson::new( 88 | Status::InternalServerError, 89 | "Unexpected Mpsc error!".to_string(), 90 | ), 91 | DatastoreError::InternalError(msg) => { 92 | HttpErrorJson::new(Status::InternalServerError, msg) 93 | } 94 | // When upgrade is disabled 95 | DatastoreError::Uninitialized(msg) => { 96 | HttpErrorJson::new(Status::InternalServerError, msg) 97 | } 98 | DatastoreError::OldDbVersion(msg) => { 99 | HttpErrorJson::new(Status::InternalServerError, msg) 100 | } 101 | } 102 | } 103 | } 104 | 105 | #[macro_export] 106 | macro_rules! endpoints_get_lock { 107 | ( $lock:expr ) => { 108 | match $lock.lock() { 109 | Ok(r) => r, 110 | Err(e) => { 111 | use rocket::http::Status; 112 | let err_msg = format!("Taking datastore lock failed, returning 504: {}", e); 113 | warn!("{}", err_msg); 114 | return Err(HttpErrorJson::new(Status::ServiceUnavailable, err_msg)); 115 | } 116 | } 117 | }; 118 | } 119 | -------------------------------------------------------------------------------- /aw-server/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate rocket; 3 | extern crate rocket_cors; 4 | 5 | extern crate serde; 6 | extern crate serde_json; 7 | 8 | extern crate chrono; 9 | 10 | #[cfg(not(target_os = "android"))] 11 | extern crate appdirs; 12 | 13 | #[cfg(target_os = "android")] 14 | #[macro_use] 15 | extern crate lazy_static; 16 | 17 | #[macro_use] 18 | extern crate log; 19 | extern crate fern; 20 | 21 | extern crate toml; 22 | 23 | #[macro_use] 24 | pub mod macros; 25 | pub mod config; 26 | pub mod device_id; 27 | pub mod dirs; 28 | pub mod endpoints; 29 | pub mod logging; 30 | 31 | #[cfg(target_os = "android")] 32 | pub mod android; 33 | 34 | extern crate aw_datastore; 35 | extern crate aw_models; 36 | extern crate aw_query; 37 | extern crate aw_transform; 38 | -------------------------------------------------------------------------------- /aw-server/src/logging.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::path::PathBuf; 3 | 4 | use fern::colors::{Color, ColoredLevelConfig}; 5 | 6 | use crate::dirs; 7 | 8 | pub fn setup_logger(module: &str, testing: bool, verbose: bool) -> Result<(), fern::InitError> { 9 | let mut logfile_path: PathBuf = 10 | dirs::get_log_dir(module).expect("Unable to get log dir to store logs in"); 11 | fs::create_dir_all(logfile_path.clone()).expect("Unable to create folder for logs"); 12 | let filename = if !testing { 13 | format!("{}_%Y-%m-%dT%H-%M-%S%z.log", module) 14 | } else { 15 | format!("{}-testing_%Y-%m-%dT%H-%M-%S%z.log", module) 16 | }; 17 | 18 | logfile_path.push(chrono::Local::now().format(&filename).to_string()); 19 | 20 | log_panics::init(); 21 | 22 | let colors = ColoredLevelConfig::new() 23 | .debug(Color::White) 24 | .info(Color::Green) 25 | .warn(Color::Yellow) 26 | .error(Color::Red); 27 | 28 | let default_log_level = if testing || verbose { 29 | log::LevelFilter::Debug 30 | } else { 31 | log::LevelFilter::Info 32 | }; 33 | 34 | let log_level = std::env::var("LOG_LEVEL").map_or(default_log_level, |level| { 35 | match level.to_lowercase().as_str() { 36 | "trace" => log::LevelFilter::Trace, 37 | "debug" => log::LevelFilter::Debug, 38 | "info" => log::LevelFilter::Info, 39 | "warn" => log::LevelFilter::Warn, 40 | "error" => log::LevelFilter::Error, 41 | _ => default_log_level, 42 | } 43 | }); 44 | 45 | let mut dispatch = fern::Dispatch::new().level(log_level); 46 | // Set some Rocket messages to debug level 47 | 48 | let is_debug = matches!(log_level, log::LevelFilter::Trace | log::LevelFilter::Debug); 49 | if !is_debug { 50 | dispatch = dispatch 51 | .level_for("rocket", log::LevelFilter::Warn) 52 | // rocket_cors has a lot of unhelpful info messages that spam the log on every request 53 | // https://github.com/ActivityWatch/activitywatch/issues/975 54 | .level_for("rocket_cors", log::LevelFilter::Warn) 55 | .level_for("_", log::LevelFilter::Warn) // Rocket requests 56 | .level_for("launch_", log::LevelFilter::Warn); // Rocket config info 57 | } 58 | 59 | dispatch 60 | // Formatting 61 | .format(move |out, message, record| { 62 | out.finish(format_args!( 63 | "[{}][{}][{}]: {}", 64 | chrono::Local::now().format("%Y-%m-%d %H:%M:%S"), 65 | colors.color(record.level()), 66 | record.target(), 67 | message, 68 | )) 69 | }) 70 | // Color and higher log levels to stdout 71 | .chain(fern::Dispatch::new().chain(std::io::stdout())) 72 | // No color and lower log levels to logfile 73 | .chain( 74 | fern::Dispatch::new() 75 | .format(|out, message, _record| { 76 | out.finish(format_args!( 77 | // TODO: Strip color info 78 | "{message}", 79 | )) 80 | }) 81 | .chain(fern::log_file(logfile_path)?), 82 | ) 83 | .apply()?; 84 | Ok(()) 85 | } 86 | 87 | #[cfg(test)] 88 | mod tests { 89 | use super::setup_logger; 90 | 91 | /* disable this test. 92 | * This is due to it failing in GitHub actions, claiming that the logger 93 | * has been initialized twice which is not allowed */ 94 | #[ignore] 95 | #[test] 96 | fn test_setup_logger() { 97 | setup_logger("aw-server-rust", true, true).unwrap(); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /aw-server/src/macros.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! json_map { 3 | { $( $key:literal : $value:expr),* } => {{ 4 | use serde_json::Value; 5 | use serde_json::map::Map; 6 | #[allow(unused_mut)] 7 | let mut map : Map = Map::new(); 8 | $( 9 | map.insert( $key.to_string(), json!($value) ); 10 | )* 11 | map 12 | }}; 13 | } 14 | -------------------------------------------------------------------------------- /aw-server/src/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | 4 | use std::env; 5 | use std::path::PathBuf; 6 | 7 | use clap::crate_version; 8 | use clap::Parser; 9 | 10 | use aw_server::*; 11 | 12 | #[cfg(target_os = "linux")] 13 | use sd_notify::NotifyState; 14 | #[cfg(all(target_os = "linux", target_arch = "x86"))] 15 | extern crate jemallocator; 16 | #[cfg(all(target_os = "linux", target_arch = "x86"))] 17 | #[global_allocator] 18 | static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; 19 | 20 | /// Rust server for ActivityWatch 21 | #[derive(Parser)] 22 | #[clap(version = crate_version!(), author = "Johan Bjäreholt, Erik Bjäreholt, et al.")] 23 | struct Opts { 24 | /// Run in testing mode 25 | #[clap(long)] 26 | testing: bool, 27 | 28 | /// Verbose output 29 | #[clap(long)] 30 | verbose: bool, 31 | 32 | /// Address to listen to 33 | #[clap(long)] 34 | host: Option, 35 | 36 | /// Port to listen on 37 | #[clap(long)] 38 | port: Option, 39 | 40 | /// Path to database override 41 | /// Also implies --no-legacy-import if no db found 42 | #[clap(long)] 43 | dbpath: Option, 44 | 45 | /// Path to webui override 46 | #[clap(long)] 47 | webpath: Option, 48 | 49 | /// Mapping of custom static paths to serve, in the format: watcher1=/path,watcher2=/path2 50 | #[clap(long)] 51 | custom_static: Option, 52 | 53 | /// Device ID override 54 | #[clap(long)] 55 | device_id: Option, 56 | 57 | /// Don't import from aw-server-python if no aw-server-rust db found 58 | #[clap(long)] 59 | no_legacy_import: bool, 60 | } 61 | 62 | #[rocket::main] 63 | async fn main() -> Result<(), rocket::Error> { 64 | let opts: Opts = Opts::parse(); 65 | 66 | use std::sync::Mutex; 67 | 68 | let mut testing = opts.testing; 69 | 70 | // Always override environment if --testing is specified 71 | if !testing && cfg!(debug_assertions) { 72 | testing = true; 73 | } 74 | 75 | logging::setup_logger("aw-server-rust", testing, opts.verbose) 76 | .expect("Failed to setup logging"); 77 | 78 | if testing { 79 | info!("Running server in Testing mode"); 80 | } 81 | 82 | let mut config = config::create_config(testing); 83 | 84 | // set host if overridden 85 | if let Some(host) = opts.host { 86 | config.address = host; 87 | } 88 | 89 | // set port if overridden 90 | if let Some(port) = opts.port { 91 | config.port = port.parse().unwrap(); 92 | } 93 | 94 | // set custom_static if overridden, transform into map 95 | if let Some(custom_static_str) = opts.custom_static { 96 | let custom_static_map: std::collections::HashMap = custom_static_str 97 | .split(',') 98 | .map(|s| { 99 | let mut split = s.split('='); 100 | let key = split.next().unwrap().to_string(); 101 | let value = split.next().unwrap().to_string(); 102 | (key, value) 103 | }) 104 | .collect(); 105 | config.custom_static.extend(custom_static_map); 106 | 107 | // validate paths, log error if invalid 108 | // remove invalid paths 109 | for (name, path) in config.custom_static.clone().iter() { 110 | if !std::path::Path::new(path).exists() { 111 | error!("custom_static path for {} does not exist ({})", name, path); 112 | config.custom_static.remove(name); 113 | } 114 | } 115 | } 116 | 117 | // Set db path if overridden 118 | let db_path: String = if let Some(dbpath) = opts.dbpath.clone() { 119 | dbpath 120 | } else { 121 | dirs::db_path(testing) 122 | .expect("Failed to get db path") 123 | .to_str() 124 | .unwrap() 125 | .to_string() 126 | }; 127 | info!("Using DB at path {:?}", db_path); 128 | 129 | let asset_path = opts.webpath.map(|webpath| PathBuf::from(webpath)); 130 | info!("Using aw-webui assets at path {:?}", asset_path); 131 | 132 | // Only use legacy import if opts.dbpath is not set 133 | let legacy_import = !opts.no_legacy_import && opts.dbpath.is_none(); 134 | if opts.dbpath.is_some() { 135 | info!("Since custom dbpath is set, --no-legacy-import is implied"); 136 | } 137 | 138 | let device_id: String = if let Some(id) = opts.device_id { 139 | id 140 | } else { 141 | device_id::get_device_id() 142 | }; 143 | 144 | let server_state = endpoints::ServerState { 145 | // Even if legacy_import is set to true it is disabled on Android so 146 | // it will not happen there 147 | datastore: Mutex::new(aw_datastore::Datastore::new(db_path, legacy_import)), 148 | asset_resolver: endpoints::AssetResolver::new(asset_path), 149 | device_id, 150 | }; 151 | 152 | let _rocket = endpoints::build_rocket(server_state, config) 153 | .ignite() 154 | .await?; 155 | #[cfg(target_os = "linux")] 156 | let _ = sd_notify::notify(true, &[NotifyState::Ready]); 157 | _rocket.launch().await?; 158 | 159 | Ok(()) 160 | } 161 | -------------------------------------------------------------------------------- /aw-server/tests/macros.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate serde_json; 3 | 4 | #[macro_use] 5 | extern crate aw_server; 6 | 7 | #[cfg(test)] 8 | mod macros_tests { 9 | #[test] 10 | fn test_json_map() { 11 | json_map! {}; 12 | json_map! {"a": json!(1)}; 13 | json_map! {"a": json!(1), "b": json!(2)}; 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /aw-sync/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aw-sync" 3 | version = "0.1.0" 4 | authors = ["Erik Bjäreholt "] 5 | edition = "2021" 6 | 7 | [lib] 8 | name = "aw_sync" 9 | path = "src/lib.rs" 10 | 11 | [[bin]] 12 | name = "aw-sync" 13 | path = "src/main.rs" 14 | 15 | [dependencies] 16 | log = "0.4" 17 | toml = "0.8" 18 | chrono = { version = "0.4", features = ["serde"] } 19 | serde = "1.0" 20 | serde_json = "1.0" 21 | reqwest = { version = "0.11", features = ["json", "blocking"] } 22 | clap = { version = "4.1", features = ["derive"] } 23 | appdirs = "0.2.0" 24 | dirs = "5.0.1" 25 | gethostname = "0.4.3" 26 | ctrlc = "3.4.5" 27 | 28 | aw-server = { path = "../aw-server" } 29 | aw-models = { path = "../aw-models" } 30 | aw-datastore = { path = "../aw-datastore" } 31 | aw-client-rust = { path = "../aw-client-rust" } 32 | 33 | [target.'cfg(target_os="linux")'.dependencies] 34 | openssl = { version = "0.10.64", features = ["vendored"] } # https://github.com/ActivityWatch/aw-server-rust/issues/478 35 | -------------------------------------------------------------------------------- /aw-sync/README.md: -------------------------------------------------------------------------------- 1 | aw-sync-rust 2 | ============ 3 | 4 | Synchronization for ActivityWatch. 5 | 6 | Works by syncing local buckets with a special folder, which in turn should be synchronized by rsync/Syncthing/Dropbox/GDrive/etc. 7 | 8 | The latest beta versions of ActivityWatch ship with the `aw-sync` binary, but it's not enabled by default. You can start it from aw-qt or the command line, but due to the early state of development might not have the best UX. Please report issues and submit PRs! 9 | 10 | Was originally prototyped as a PR to aw-server: https://github.com/ActivityWatch/aw-server/pull/50 11 | 12 | 13 | ## Usage 14 | 15 | This will start a daemon which pulls and pushes events with the sync directory (`~/ActivityWatchSync` by default) every 5 minutes: 16 | 17 | ```sh 18 | aw-sync 19 | ``` 20 | 21 | For more options, see `aw-sync --help`. 22 | 23 | ### Setting up sync 24 | 25 | Once you have aw-sync running, you need to set up syncing with the sync directory using your preferred syncing tool. 26 | 27 | The default sync directory is `~/ActivityWatchSync`, but you can change it using the `--sync-dir` option or by setting the `AW_SYNC_DIR` environment variable. 28 | 29 | ### Running from source 30 | 31 | If you want to run it from source, in the root of the repository run: 32 | 33 | ```sh 34 | cargo run --bin aw-sync 35 | ``` 36 | For more options, see `cargo run --bin aw-sync -- --help`. 37 | 38 | ## FAQ 39 | 40 | ### When is it ready? 41 | 42 | It works today, but there is still testing and polishing to be done before it's "click and play". 43 | 44 | ### Why do it this way? 45 | 46 | By essentially only offering a "sync with folder" feature, we give the user a lot of freedom to choose how to store and sync their data. 47 | 48 | We also avoid having to implement complex features such as conflict resolution, by enforcing that each device only writes to files in the sync folder they "own", and other devices may not modify them. 49 | 50 | ### What are the limitations? 51 | 52 | - It only syncs afk and window buckets by default (since bucket IDs need to be unique) 53 | - It will work a lot better once proper `hostname -> device ID` migration is complete. 54 | - It doesn't sync settings 55 | - It doesn't support Android, yet. 56 | - It mirrors events to all devices, 57 | - If you have a lot of devices you'll get a lot of duplicates, taking up a lot of space and potentially impacting performance. 58 | - It doesn't support modifying/deleting events, yet. 59 | 60 | --- 61 | 62 | ## Advanced usage 63 | 64 | ### Syncing real data to a testing instance 65 | 66 | If you want to try sync, you can do so by following these steps. 67 | 68 | We will use a separate testing instance of aw-server(-rust) to store and view the synced data from the sync directory. This is to avoid testing against & potentially pollute production instances in write-scenarios. We will sync all devices with the sync folder, and then sync the sync folder into our testing instance to view. 69 | 70 | To test syncing real events to a sync folder which can then be pulled from. 71 | We will use some helper scripts to do the following: 72 | 73 | 1. `./test-sync-push.sh` 74 | - Creates a sync directory **for you to set up sync** with Syncthing/Dropbox/Gdrive/rclone/whatever 75 | - By default `~/ActivityWatchSync` 76 | - Creates a datastore for the current host in the sync folder 77 | - Sync all local buckets of interest (window & afk buckets, by default) to the sync dir 78 | 79 | 2. `./test-server.sh` 80 | - Starts a testing server **on port 5667** using a temporary directory as datastore (`/tmp/...`) 81 | 82 | 3. `./test-sync-pull.sh` 83 | - Imports all the events from sync folder into the testing server on port 5667 84 | 85 | 4. You should now have all events synced to a local testing instance! 86 | - You can browse [127.0.0.1:5667](http://127.0.0.1:5667) to view testing instance, where you'll see events from synced all hosts. 87 | - You can now set up syncing for `~/ActivityWatchSync` on more devices, and on each one use the script `./test-sync.sh` to push their events into the sync folder, then run `./test-import-sync.sh` on the device where you have the testing instance to update the data there. 88 | 89 | 5. To view data from all devices at once, go into [127.0.0.1:5667/#/settings](127.0.0.1:5667/#/settings) and check the "Use multidevice query" checkbox (near the bottom, under "developer settings"). 90 | - You can now navigate back to the activity view for any device, where you should see data from multiple devices being included in (most of) the visualizations. 91 | 92 | In the end, You should get something like this: https://twitter.com/ErikBjare/status/1519399784234246147 93 | 94 | ### Testing with fake data 95 | 96 | #### Pushing to the sync directory 97 | 98 | First start the sending aw-server instance. For example: 99 | 100 | ```sh 101 | PORT=5667 102 | cargo run --bin aw-server -- --testing --port $PORT --dbpath test-$PORT.sqlite --device-id $PORT --no-legacy-import 103 | ``` 104 | 105 | You can create some test data by opening `http://localhost:5667/#/stopwatch` and creating a few events. 106 | 107 | Then run `cargo run --bin aw-sync-rust -- --port 5667` to sync your instance's buckets with the target directory. 108 | 109 | #### Pulling from the sync directory 110 | 111 | Now to sync things back from the sync directory into another instance. 112 | 113 | First, lets start another instance: 114 | 115 | ```sh 116 | PORT=5668 117 | cargo run --bin aw-server -- --testing --port $PORT --dbpath test-$PORT.sqlite --device-id $PORT --no-legacy-import 118 | ``` 119 | 120 | Now run `cargo run --bin aw-sync-rust -- --port 5668` to pull buckets from the sync dir (retrieving events from the 5667 instance) and push buckets from the 5668 instance to the sync dir. 121 | 122 | -------------------------------------------------------------------------------- /aw-sync/src/accessmethod.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use aw_client_rust::blocking::AwClient; 4 | use chrono::{DateTime, Utc}; 5 | use reqwest::StatusCode; 6 | 7 | use aw_datastore::{Datastore, DatastoreError}; 8 | use aw_models::{Bucket, Event}; 9 | 10 | // This trait should be implemented by both AwClient and Datastore, unifying them under a single API 11 | pub trait AccessMethod: std::fmt::Debug { 12 | fn get_buckets(&self) -> Result, String>; 13 | fn get_bucket(&self, bucket_id: &str) -> Result; 14 | fn create_bucket(&self, bucket: &Bucket) -> Result<(), DatastoreError>; 15 | fn get_events( 16 | &self, 17 | bucket_id: &str, 18 | start: Option>, 19 | end: Option>, 20 | limit: Option, 21 | ) -> Result, String>; 22 | fn insert_events(&self, bucket_id: &str, events: Vec) -> Result<(), String>; 23 | fn get_event_count(&self, bucket_id: &str) -> Result; 24 | fn heartbeat(&self, bucket_id: &str, event: Event, duration: f64) -> Result<(), String>; 25 | fn close(&self); 26 | } 27 | 28 | impl AccessMethod for Datastore { 29 | fn get_buckets(&self) -> Result, String> { 30 | Ok(Datastore::get_buckets(self).unwrap()) 31 | } 32 | fn get_bucket(&self, bucket_id: &str) -> Result { 33 | Datastore::get_bucket(self, bucket_id) 34 | } 35 | fn create_bucket(&self, bucket: &Bucket) -> Result<(), DatastoreError> { 36 | Datastore::create_bucket(self, bucket)?; 37 | self.force_commit().unwrap(); 38 | Ok(()) 39 | } 40 | fn get_events( 41 | &self, 42 | bucket_id: &str, 43 | start: Option>, 44 | end: Option>, 45 | limit: Option, 46 | ) -> Result, String> { 47 | Ok(Datastore::get_events(self, bucket_id, start, end, limit).unwrap()) 48 | } 49 | fn heartbeat(&self, bucket_id: &str, event: Event, duration: f64) -> Result<(), String> { 50 | Datastore::heartbeat(self, bucket_id, event, duration).unwrap(); 51 | self.force_commit().unwrap(); 52 | Ok(()) 53 | } 54 | fn insert_events(&self, bucket_id: &str, events: Vec) -> Result<(), String> { 55 | Datastore::insert_events(self, bucket_id, &events[..]).unwrap(); 56 | self.force_commit().unwrap(); 57 | Ok(()) 58 | } 59 | fn get_event_count(&self, bucket_id: &str) -> Result { 60 | Ok(Datastore::get_event_count(self, bucket_id, None, None).unwrap()) 61 | } 62 | fn close(&self) { 63 | Datastore::close(self); 64 | } 65 | } 66 | 67 | impl AccessMethod for AwClient { 68 | fn get_buckets(&self) -> Result, String> { 69 | Ok(AwClient::get_buckets(self).unwrap()) 70 | } 71 | fn get_bucket(&self, bucket_id: &str) -> Result { 72 | let bucket = AwClient::get_bucket(self, bucket_id); 73 | match bucket { 74 | Ok(bucket) => Ok(bucket), 75 | Err(e) => { 76 | warn!("{:?}", e); 77 | let code = e.status().unwrap(); 78 | if code == StatusCode::NOT_FOUND { 79 | Err(DatastoreError::NoSuchBucket(bucket_id.into())) 80 | } else { 81 | panic!("Unexpected error"); 82 | } 83 | } 84 | } 85 | } 86 | fn get_events( 87 | &self, 88 | bucket_id: &str, 89 | start: Option>, 90 | end: Option>, 91 | limit: Option, 92 | ) -> Result, String> { 93 | Ok(AwClient::get_events(self, bucket_id, start, end, limit).unwrap()) 94 | } 95 | fn insert_events(&self, bucket_id: &str, events: Vec) -> Result<(), String> { 96 | AwClient::insert_events(self, bucket_id, events).map_err(|e| e.to_string()) 97 | } 98 | fn get_event_count(&self, bucket_id: &str) -> Result { 99 | Ok(AwClient::get_event_count(self, bucket_id).unwrap()) 100 | } 101 | fn create_bucket(&self, bucket: &Bucket) -> Result<(), DatastoreError> { 102 | AwClient::create_bucket(self, bucket).unwrap(); 103 | Ok(()) 104 | } 105 | fn heartbeat(&self, bucket_id: &str, event: Event, duration: f64) -> Result<(), String> { 106 | AwClient::heartbeat(self, bucket_id, &event, duration).map_err(|e| format!("{e:?}")) 107 | } 108 | fn close(&self) { 109 | // NOP 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /aw-sync/src/dirs.rs: -------------------------------------------------------------------------------- 1 | use dirs::home_dir; 2 | use std::error::Error; 3 | use std::fs; 4 | use std::path::PathBuf; 5 | 6 | // TODO: This could be refactored to share logic with aw-server/src/dirs.rs 7 | // TODO: add proper config support 8 | #[allow(dead_code)] 9 | pub fn get_config_dir() -> Result> { 10 | let mut dir = appdirs::user_config_dir(Some("activitywatch"), None, false) 11 | .map_err(|_| "Unable to read user config dir")?; 12 | dir.push("aw-sync"); 13 | fs::create_dir_all(dir.clone())?; 14 | Ok(dir) 15 | } 16 | 17 | pub fn get_server_config_path(testing: bool) -> Result { 18 | let dir = aw_server::dirs::get_config_dir()?; 19 | Ok(dir.join(if testing { 20 | "config-testing.toml" 21 | } else { 22 | "config.toml" 23 | })) 24 | } 25 | 26 | pub fn get_sync_dir() -> Result> { 27 | // if AW_SYNC_DIR is set, use that 28 | if let Ok(dir) = std::env::var("AW_SYNC_DIR") { 29 | return Ok(PathBuf::from(dir)); 30 | } 31 | let home_dir = home_dir().ok_or("Unable to read home_dir")?; 32 | Ok(home_dir.join("ActivityWatchSync")) 33 | } 34 | -------------------------------------------------------------------------------- /aw-sync/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | extern crate chrono; 4 | extern crate serde; 5 | extern crate serde_json; 6 | 7 | mod sync; 8 | pub use sync::create_datastore; 9 | pub use sync::sync_datastores; 10 | pub use sync::sync_run; 11 | pub use sync::SyncSpec; 12 | 13 | mod sync_wrapper; 14 | pub use sync_wrapper::push; 15 | pub use sync_wrapper::{pull, pull_all}; 16 | 17 | mod accessmethod; 18 | pub use accessmethod::AccessMethod; 19 | 20 | mod dirs; 21 | mod util; 22 | -------------------------------------------------------------------------------- /aw-sync/src/main.rs: -------------------------------------------------------------------------------- 1 | // What needs to be done: 2 | // - [x] Setup local sync bucket 3 | // - [x] Import local buckets and sync events from aw-server (either through API or through creating a read-only Datastore) 4 | // - [x] Import buckets and sync events from remotes 5 | // - [ ] Add CLI arguments 6 | // - [x] For which local server to use 7 | // - [x] For which sync dir to use 8 | // - [ ] Date to start syncing from 9 | 10 | #[macro_use] 11 | extern crate log; 12 | extern crate chrono; 13 | extern crate serde; 14 | extern crate serde_json; 15 | 16 | use chrono::{DateTime, Utc}; 17 | use clap::{Parser, Subcommand}; 18 | use std::error::Error; 19 | use std::path::PathBuf; 20 | use std::sync::mpsc::{channel, RecvTimeoutError}; 21 | use std::time::Duration; 22 | 23 | use aw_client_rust::blocking::AwClient; 24 | 25 | mod accessmethod; 26 | mod dirs; 27 | mod sync; 28 | mod sync_wrapper; 29 | mod util; 30 | 31 | #[derive(Parser)] 32 | #[clap(version = "0.1", author = "Erik Bjäreholt")] 33 | struct Opts { 34 | #[clap(subcommand)] 35 | command: Option, 36 | 37 | /// Host of instance to connect to. 38 | #[clap(long, default_value = "127.0.0.1")] 39 | host: String, 40 | 41 | /// Port of instance to connect to. 42 | #[clap(long)] 43 | port: Option, 44 | 45 | /// Convenience option for using the default testing host and port. 46 | #[clap(long)] 47 | testing: bool, 48 | 49 | /// Full path to sync directory. 50 | /// If not specified, use AW_SYNC_DIR env var, or default to ~/ActivityWatchSync 51 | #[clap(long)] 52 | sync_dir: Option, 53 | 54 | /// Enable debug logging. 55 | #[clap(long)] 56 | verbose: bool, 57 | } 58 | 59 | #[derive(Subcommand)] 60 | enum Commands { 61 | /// Daemon subcommand 62 | /// Starts aw-sync as a daemon, which will sync every 5 minutes. 63 | Daemon {}, 64 | 65 | /// Sync subcommand (basic) 66 | /// 67 | /// Pulls remote buckets then pushes local buckets. 68 | Sync { 69 | /// Host(s) to pull from, comma separated. Will pull from all hosts if not specified. 70 | #[clap(long, value_parser=parse_list)] 71 | host: Option>, 72 | }, 73 | 74 | /// Sync subcommand (advanced) 75 | /// 76 | /// Pulls remote buckets then pushes local buckets. 77 | /// First pulls remote buckets in the sync directory to the local aw-server. 78 | /// Then pushes local buckets from the aw-server to the local sync directory. 79 | #[clap(arg_required_else_help = true)] 80 | SyncAdvanced { 81 | /// Date to start syncing from. 82 | /// If not specified, start from beginning. 83 | /// NOTE: might be unstable, as count cannot be used to verify integrity of sync. 84 | /// Format: YYYY-MM-DD 85 | #[clap(long, value_parser=parse_start_date)] 86 | start_date: Option>, 87 | 88 | /// Specify buckets to sync using a comma-separated list. 89 | /// If not specified, all buckets will be synced. 90 | #[clap(long, value_parser=parse_list)] 91 | buckets: Option>, 92 | 93 | /// Mode to sync in. Can be "push", "pull", or "both". 94 | /// Defaults to "both". 95 | #[clap(long, default_value = "both")] 96 | mode: sync::SyncMode, 97 | 98 | /// Full path to sync db file 99 | /// Useful for syncing buckets from a specific db file in the sync directory. 100 | /// Must be a valid absolute path to a file in the sync directory. 101 | #[clap(long)] 102 | sync_db: Option, 103 | }, 104 | /// List buckets and their sync status. 105 | List {}, 106 | } 107 | 108 | fn parse_start_date(arg: &str) -> Result, chrono::ParseError> { 109 | chrono::NaiveDate::parse_from_str(arg, "%Y-%m-%d") 110 | .map(|nd| nd.and_time(chrono::NaiveTime::MIN).and_utc()) 111 | } 112 | 113 | fn parse_list(arg: &str) -> Result, clap::Error> { 114 | Ok(arg.split(',').map(|s| s.to_string()).collect()) 115 | } 116 | 117 | fn main() -> Result<(), Box> { 118 | let opts: Opts = Opts::parse(); 119 | let verbose = opts.verbose; 120 | 121 | info!("Started aw-sync..."); 122 | 123 | aw_server::logging::setup_logger("aw-sync", opts.testing, verbose)?; 124 | 125 | // if sync_dir, set env var 126 | if let Some(sync_dir) = opts.sync_dir { 127 | if !sync_dir.is_absolute() { 128 | Err("Sync dir must be absolute")? 129 | } 130 | 131 | info!("Using sync dir: {}", &sync_dir.display()); 132 | std::env::set_var("AW_SYNC_DIR", sync_dir); 133 | } 134 | 135 | let port = opts 136 | .port 137 | .map(|a| Ok(a)) 138 | .unwrap_or_else(|| util::get_server_port(opts.testing))?; 139 | 140 | let client = AwClient::new(&opts.host, port, "aw-sync")?; 141 | 142 | // if opts.command is None, then we're using the default subcommand (Sync) 143 | match opts.command.unwrap_or(Commands::Daemon {}) { 144 | // Start daemon 145 | Commands::Daemon {} => { 146 | info!("Starting daemon..."); 147 | daemon(&client)?; 148 | } 149 | // Perform basic sync 150 | Commands::Sync { host } => { 151 | // Pull 152 | match host { 153 | Some(hosts) => { 154 | for host in hosts.iter() { 155 | info!("Pulling from host: {}", host); 156 | sync_wrapper::pull(host, &client)?; 157 | } 158 | } 159 | None => { 160 | info!("Pulling from all hosts"); 161 | sync_wrapper::pull_all(&client)?; 162 | } 163 | } 164 | 165 | // Push 166 | info!("Pushing local data"); 167 | sync_wrapper::push(&client)? 168 | } 169 | // Perform two-way sync 170 | Commands::SyncAdvanced { 171 | start_date, 172 | buckets, 173 | mode, 174 | sync_db, 175 | } => { 176 | let sync_dir = dirs::get_sync_dir()?; 177 | if let Some(db_path) = &sync_db { 178 | info!("Using sync db: {}", &db_path.display()); 179 | 180 | if !db_path.is_absolute() { 181 | Err("Sync db path must be absolute")? 182 | } 183 | if !db_path.starts_with(&sync_dir) { 184 | Err("Sync db path must be in sync directory")? 185 | } 186 | } 187 | 188 | let sync_spec = sync::SyncSpec { 189 | path: sync_dir, 190 | path_db: sync_db, 191 | buckets, 192 | start: start_date, 193 | }; 194 | 195 | sync::sync_run(&client, &sync_spec, mode)? 196 | } 197 | 198 | // List all buckets 199 | Commands::List {} => sync::list_buckets(&client)?, 200 | } 201 | 202 | // Needed to give the datastores some time to commit before program is shut down. 203 | // 100ms isn't actually needed, seemed to work fine with as little as 10ms, but I'd rather give 204 | // it some wiggleroom. 205 | std::thread::sleep(std::time::Duration::from_millis(100)); 206 | 207 | Ok(()) 208 | } 209 | 210 | fn daemon(client: &AwClient) -> Result<(), Box> { 211 | let (tx, rx) = channel(); 212 | 213 | ctrlc::set_handler(move || { 214 | let _ = tx.send(()); 215 | })?; 216 | 217 | loop { 218 | if let Err(e) = daemon_sync_cycle(client) { 219 | error!("Error during sync cycle: {}", e); 220 | // Re-throw the error 221 | return Err(e); 222 | } 223 | 224 | info!("Sync pass done, sleeping for 5 minutes"); 225 | 226 | // Wait for either the sleep duration or a termination signal 227 | match rx.recv_timeout(Duration::from_secs(300)) { 228 | Ok(_) | Err(RecvTimeoutError::Disconnected) => { 229 | info!("Termination signal received, shutting down."); 230 | break; 231 | } 232 | Err(RecvTimeoutError::Timeout) => { 233 | // Continue the loop if the timeout occurs 234 | } 235 | } 236 | } 237 | 238 | Ok(()) 239 | } 240 | 241 | fn daemon_sync_cycle(client: &AwClient) -> Result<(), Box> { 242 | info!("Pulling from all hosts"); 243 | sync_wrapper::pull_all(client)?; 244 | 245 | info!("Pushing local data"); 246 | sync_wrapper::push(client)?; 247 | 248 | Ok(()) 249 | } 250 | -------------------------------------------------------------------------------- /aw-sync/src/sync_wrapper.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use std::fs; 3 | 4 | use crate::sync::{sync_run, SyncMode, SyncSpec}; 5 | use aw_client_rust::blocking::AwClient; 6 | 7 | pub fn pull_all(client: &AwClient) -> Result<(), Box> { 8 | let hostnames = crate::util::get_remotes()?; 9 | for host in hostnames { 10 | pull(&host, client)? 11 | } 12 | Ok(()) 13 | } 14 | 15 | pub fn pull(host: &str, client: &AwClient) -> Result<(), Box> { 16 | client.wait_for_start()?; 17 | 18 | // Path to the sync folder 19 | // Sync folder is structured ./{hostname}/{device_id}/test.db 20 | let sync_root_dir = crate::dirs::get_sync_dir().map_err(|_| "Could not get sync dir")?; 21 | let sync_dir = sync_root_dir.join(host); 22 | let dbs = fs::read_dir(&sync_dir)? 23 | .filter_map(Result::ok) 24 | .filter(|entry| entry.path().is_dir()) 25 | .map(|entry| fs::read_dir(entry.path())) 26 | .filter_map(Result::ok) 27 | .flatten() 28 | .filter_map(Result::ok) 29 | .filter(|entry| { 30 | entry.path().is_file() 31 | && entry.path().extension().and_then(|os_str| os_str.to_str()) == Some("db") 32 | }) 33 | .collect::>(); 34 | 35 | // if more than one db, warn and use the largest one 36 | if dbs.len() > 1 { 37 | warn!( 38 | "More than one db found in sync folder for host, choosing largest db {:?}", 39 | dbs 40 | ); 41 | } 42 | 43 | let db = dbs 44 | .into_iter() 45 | .max_by_key(|entry| entry.metadata().map(|m| m.len()).unwrap_or(0)) 46 | .ok_or_else(|| format!("No db found in sync folder {:?}", sync_dir))?; 47 | 48 | let sync_spec = SyncSpec { 49 | path: sync_dir.clone(), 50 | path_db: Some(db.path().clone()), 51 | buckets: Some(vec![ 52 | format!("aw-watcher-window_{}", host), 53 | format!("aw-watcher-afk_{}", host), 54 | ]), 55 | start: None, 56 | }; 57 | sync_run(client, &sync_spec, SyncMode::Pull)?; 58 | 59 | Ok(()) 60 | } 61 | 62 | pub fn push(client: &AwClient) -> Result<(), Box> { 63 | let sync_dir = crate::dirs::get_sync_dir() 64 | .map_err(|_| "Could not get sync dir")? 65 | .join(&client.hostname); 66 | 67 | let sync_spec = SyncSpec { 68 | path: sync_dir, 69 | path_db: None, 70 | buckets: Some(vec![ 71 | format!("aw-watcher-window_{}", client.hostname), 72 | format!("aw-watcher-afk_{}", client.hostname), 73 | ]), 74 | start: None, 75 | }; 76 | sync_run(client, &sync_spec, SyncMode::Push)?; 77 | 78 | Ok(()) 79 | } 80 | -------------------------------------------------------------------------------- /aw-sync/src/util.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use std::ffi::OsStr; 3 | use std::fs; 4 | use std::fs::File; 5 | use std::io::Read; 6 | use std::path::{Path, PathBuf}; 7 | 8 | /// Returns the port of the local aw-server instance 9 | pub fn get_server_port(testing: bool) -> Result> { 10 | // TODO: get aw-server config more reliably 11 | let aw_server_conf = crate::dirs::get_server_config_path(testing) 12 | .map_err(|_| "Could not get aw-server config path")?; 13 | let fallback: u16 = if testing { 5666 } else { 5600 }; 14 | let port = if aw_server_conf.exists() { 15 | let mut file = File::open(&aw_server_conf)?; 16 | let mut contents = String::new(); 17 | file.read_to_string(&mut contents)?; 18 | let value: toml::Value = toml::from_str(&contents)?; 19 | value 20 | .get("port") 21 | .and_then(|v| v.as_integer()) 22 | .map(|v| v as u16) 23 | .unwrap_or(fallback) 24 | } else { 25 | fallback 26 | }; 27 | Ok(port) 28 | } 29 | 30 | /// Check if a directory contains a .db file 31 | fn contains_db_file(dir: &std::path::Path) -> bool { 32 | fs::read_dir(dir) 33 | .ok() 34 | .map(|entries| { 35 | entries.filter_map(Result::ok).any(|entry| { 36 | entry 37 | .path() 38 | .extension() 39 | .map(|ext| ext == "db") 40 | .unwrap_or(false) 41 | }) 42 | }) 43 | .unwrap_or(false) 44 | } 45 | 46 | /// Check if a directory contains a subdirectory that contains a .db file 47 | fn contains_subdir_with_db_file(dir: &std::path::Path) -> bool { 48 | fs::read_dir(dir) 49 | .ok() 50 | .map(|entries| { 51 | entries 52 | .filter_map(Result::ok) 53 | .any(|entry| entry.path().is_dir() && contains_db_file(&entry.path())) 54 | }) 55 | .unwrap_or(false) 56 | } 57 | 58 | /// Return all remotes in the sync folder 59 | /// Only returns folders that match ./{host}/{device_id}/*.db 60 | // TODO: share logic with find_remotes and find_remotes_nonlocal 61 | pub fn get_remotes() -> Result, Box> { 62 | let sync_root_dir = crate::dirs::get_sync_dir()?; 63 | fs::create_dir_all(&sync_root_dir)?; 64 | let hostnames = fs::read_dir(sync_root_dir)? 65 | .filter_map(Result::ok) 66 | .filter(|entry| entry.path().is_dir() && contains_subdir_with_db_file(&entry.path())) 67 | .filter_map(|entry| { 68 | entry 69 | .path() 70 | .file_name() 71 | .and_then(|os_str| os_str.to_str().map(String::from)) 72 | }) 73 | .collect(); 74 | info!("Found remotes: {:?}", hostnames); 75 | Ok(hostnames) 76 | } 77 | 78 | /// Returns a list of all remote dbs 79 | fn find_remotes(sync_directory: &Path) -> std::io::Result> { 80 | let dbs = fs::read_dir(sync_directory)? 81 | .map(|res| res.ok().unwrap().path()) 82 | .filter(|p| p.is_dir()) 83 | .flat_map(|d| fs::read_dir(d).unwrap()) 84 | .map(|res| res.ok().unwrap().path()) 85 | .filter(|path| path.extension().unwrap_or_else(|| OsStr::new("")) == "db") 86 | .collect(); 87 | Ok(dbs) 88 | } 89 | 90 | /// Returns a list of all remotes, excluding local ones 91 | pub fn find_remotes_nonlocal( 92 | sync_directory: &Path, 93 | device_id: &str, 94 | sync_db: Option<&PathBuf>, 95 | ) -> Vec { 96 | let remotes_all = find_remotes(sync_directory).unwrap(); 97 | remotes_all 98 | .into_iter() 99 | // Filter out own remote 100 | .filter(|path| { 101 | !(path 102 | .clone() 103 | .into_os_string() 104 | .into_string() 105 | .unwrap() 106 | .contains(device_id)) 107 | }) 108 | // If sync_db is Some, return only remotes in that path 109 | .filter(|path| { 110 | if let Some(sync_db) = sync_db { 111 | path.starts_with(sync_db) 112 | } else { 113 | true 114 | } 115 | }) 116 | .collect() 117 | } 118 | -------------------------------------------------------------------------------- /aw-sync/test-server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # port and db used for testing instance 4 | 5 | # If port already set, use that, otherwise, use 5667 6 | PORT=${PORT:-5667} 7 | 8 | DBPATH=/tmp/aw-server-rust-sync-testing/ 9 | mkdir -p $DBPATH 10 | 11 | # Set up an isolated ActivityWatch instance 12 | pushd .. 13 | cargo run --bin aw-server -- --testing --port $PORT --dbpath $DBPATH/data.db --no-legacy-import --verbose 14 | -------------------------------------------------------------------------------- /aw-sync/test-sync-pull.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # exit on fail 4 | set -e 5 | 6 | # get script path 7 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" 8 | pushd $SCRIPTPATH 9 | 10 | # port used for testing instance 11 | PORT=5667 12 | 13 | # if server already running on port 5667, don't start again 14 | if [ "$(lsof -i:$PORT -sTCP:LISTEN -t)" ]; then 15 | echo "ActivityWatch server already running on port $PORT, using that." 16 | else 17 | # Set up an isolated ActivityWatch instance 18 | ./test-server.sh $PORT & 19 | SERVER_PID=$! 20 | fi 21 | 22 | 23 | sleep 1; 24 | SYNCROOTDIR="$HOME/ActivityWatchSync" 25 | 26 | 27 | function sync_host() { 28 | host=$1 29 | SYNCDIR="$SYNCROOTDIR/$host" 30 | dbs=$(find $SYNCDIR -name "*.db") 31 | for db in $dbs; do 32 | # workaround to avoid trying to sync empty database files (size 45056) 33 | if [ "$(stat -c%s $db)" -lt 50000 ]; then 34 | continue 35 | fi 36 | 37 | AWSYNC_ARGS="--port $PORT" 38 | AWSYNC_ARGS_ADV="--sync-dir $SYNCDIR --sync-db $db" 39 | BUCKETS="aw-watcher-window_$host,aw-watcher-afk_$host" 40 | 41 | echo "Syncing $db to $host" 42 | cargo run --bin aw-sync -- $AWSYNC_ARGS sync-advanced $AWSYNC_ARGS_ADV --mode pull --buckets $BUCKETS 43 | # TODO: If there are no buckets from the expected host, emit a warning at the end. 44 | # (push-script should not have created them to begin with) 45 | done 46 | } 47 | 48 | host=$1 49 | 50 | # if no host given, sync all, otherwise sync only the given host 51 | if [ -z "$host" ]; then 52 | echo "Syncing all hosts" 53 | sleep 0.5 54 | # For each host in the sync directory, pull the data from each database file using aw-sync 55 | # Use `find` to get all directories in the sync directory 56 | hostnames=$(find $SYNCROOTDIR -maxdepth 1 -type d -exec basename {} \;) 57 | # filter out "erb-m2.local" 58 | hostnames=$(echo $hostnames | tr ' ' '\n' | grep -v "erb-m2.local") 59 | # filter out folder not containing subfolders with .db files 60 | for host in $hostnames; do 61 | if [ ! "$(find $SYNCROOTDIR/$host -name "*.db")" ]; then 62 | hostnames=$(echo $hostnames | tr ' ' '\n' | grep -v $host) 63 | fi 64 | done 65 | # Sync each host, file-by-file 66 | for host in $hostnames; do 67 | sync_host $host 68 | done 69 | else 70 | echo "Syncing host $1" 71 | sleep 0.5 72 | sync_host $host 73 | fi 74 | 75 | # kill aw-server-rust (if started by us) 76 | if [ "$SERVER_PID" ]; then 77 | kill $SERVER_PID 78 | fi 79 | -------------------------------------------------------------------------------- /aw-sync/test-sync-push.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Helper script meant to be used to test aw-sync 3 | # Example of a single-entry for cronjobs and the like 4 | 5 | # exit on fail 6 | set -e 7 | 8 | # on Linux, use `hostnamectl`, on macOS, use `hostname` 9 | if [ -x "$(command -v hostnamectl)" ]; then 10 | HOSTNAME=$(hostnamectl --static) 11 | else 12 | HOSTNAME=$(hostname) 13 | fi 14 | 15 | # TODO: Fetch in a cross-platform way (from aw-client command output?) 16 | AWSERVERCONF=~/.config/activitywatch/aw-server/aw-server.toml 17 | 18 | # trim everything in file AWSERVERCONF before '[server-testing]' section 19 | # grep for the aw-server port in aw-server.toml 20 | # if config doesn't exist, assume 5600 21 | if [ -f "$AWSERVERCONF" ]; then 22 | PORT=$(sed '/\[server-testing\]/,/\[.*\]/{//!d}' $AWSERVERCONF | grep -oP 'port = "\K[0-9]+') 23 | else 24 | PORT=5600 25 | fi 26 | 27 | SYNCDIR="$HOME/ActivityWatchSync/$HOSTNAME" 28 | AWSYNC_ARGS="--port $PORT" 29 | AWSYNC_ARGS_ADV="--sync-dir $SYNCDIR" 30 | 31 | # NOTE: Only sync window and AFK buckets, for now 32 | cargo run --bin aw-sync --release -- $AWSYNC_ARGS sync-advanced $AWSYNC_ARGS_ADV --mode push --buckets aw-watcher-window_$HOSTNAME,aw-watcher-afk_$HOSTNAME 33 | -------------------------------------------------------------------------------- /aw-transform/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aw-transform" 3 | version = "0.1.0" 4 | authors = ["Johan Bjäreholt "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | serde = "1.0" 11 | url = ">=2.2" 12 | serde_json = "1.0" 13 | fancy-regex = "0.12.0" 14 | log = "0.4" 15 | chrono = { version = "0.4", features = ["serde"] } 16 | aw-models = { path = "../aw-models" } 17 | 18 | [dev-dependencies] 19 | criterion = "0.5.1" 20 | 21 | [[bench]] 22 | name = "bench" 23 | harness = false 24 | -------------------------------------------------------------------------------- /aw-transform/benches/bench.rs: -------------------------------------------------------------------------------- 1 | use chrono::Duration; 2 | use criterion::{criterion_group, criterion_main, Criterion}; 3 | use serde_json::json; 4 | use serde_json::Map; 5 | use serde_json::Value; 6 | 7 | use aw_models::Event; 8 | use aw_transform::*; 9 | 10 | // TODO: Move me to an appropriate place 11 | #[macro_export] 12 | macro_rules! json_map { 13 | { $( $key:literal : $value:expr),* } => {{ 14 | use serde_json::Value; 15 | use serde_json::map::Map; 16 | #[allow(unused_mut)] 17 | let mut map : Map = Map::new(); 18 | $( 19 | map.insert( $key.to_string(), json!($value) ); 20 | )* 21 | map 22 | }}; 23 | } 24 | 25 | fn create_events(num_events: i64) -> Vec { 26 | let mut possible_data = Vec::>::new(); 27 | for i in 0..20 { 28 | possible_data.push(json_map! {"number": i}); 29 | } 30 | let mut event_list = Vec::new(); 31 | for i in 0..num_events { 32 | let e = Event { 33 | id: None, 34 | timestamp: chrono::Utc::now() + Duration::seconds(i), 35 | duration: Duration::seconds(10), 36 | data: possible_data[i as usize % 20].clone(), 37 | }; 38 | event_list.push(e); 39 | } 40 | event_list 41 | } 42 | 43 | fn bench_filter_period_intersect(c: &mut Criterion) { 44 | let events2 = create_events(1000); 45 | c.bench_function("1000 events", |b| { 46 | b.iter(|| { 47 | let events1 = create_events(1000); 48 | filter_period_intersect(events1, events2.clone()); 49 | }) 50 | }); 51 | } 52 | 53 | criterion_group!(benches, bench_filter_period_intersect); 54 | criterion_main!(benches); 55 | -------------------------------------------------------------------------------- /aw-transform/src/chunk.rs: -------------------------------------------------------------------------------- 1 | use aw_models::Event; 2 | 3 | /// Chunks together events with the same key 4 | /// 5 | /// NOTE: In most cases you should use merge_events_by_keys instead, this 6 | /// transform is mostly just for backwards compatibility with older versions 7 | /// of aw-webui 8 | /// NOTE: Does not support sub-chunking which aw-server-python supports 9 | /// Without sub-chunking it is pretty much the same as merge_events_by_key 10 | /// 11 | /// # Example 12 | /// ```ignore 13 | /// key: a 14 | /// input: 15 | /// { duration: 1.0, data: { "a": 1, "b": 1 } } 16 | /// { duration: 1.0, data: { "a": 1, "b": 2 } } 17 | /// { duration: 1.0, data: { "a": 2, "b": 1 } } 18 | /// output: 19 | /// { duration: 2.0, data: { "a": 1 } } 20 | /// { duration: 1.0, data: { "a": 2 } } 21 | /// ``` 22 | pub fn chunk_events_by_key(events: Vec, key: &str) -> Vec { 23 | let mut chunked_events: Vec = Vec::new(); 24 | for event in events { 25 | if chunked_events.is_empty() && event.data.get(key).is_some() { 26 | // TODO: Add sub-chunks 27 | chunked_events.push(event); 28 | } else { 29 | let val = match event.data.get(key) { 30 | None => continue, 31 | Some(v) => v, 32 | }; 33 | let mut last_event = chunked_events.pop().unwrap(); 34 | let last_val = last_event.data.get(key).unwrap().clone(); 35 | if &last_val == val { 36 | // TODO: Add sub-chunks 37 | last_event.duration = last_event.duration + event.duration; 38 | } 39 | chunked_events.push(last_event); 40 | if &last_val != val { 41 | // TODO: Add sub-chunks 42 | chunked_events.push(event); 43 | } 44 | } 45 | } 46 | chunked_events 47 | } 48 | 49 | #[cfg(test)] 50 | mod tests { 51 | use std::str::FromStr; 52 | 53 | use chrono::DateTime; 54 | use chrono::Duration; 55 | use serde_json::json; 56 | 57 | use aw_models::Event; 58 | 59 | use super::chunk_events_by_key; 60 | 61 | #[test] 62 | fn test_chunk_events_by_key() { 63 | let e1 = Event { 64 | id: None, 65 | timestamp: DateTime::from_str("2000-01-01T00:00:01Z").unwrap(), 66 | duration: Duration::seconds(1), 67 | data: json_map! {"test": json!(1)}, 68 | }; 69 | let mut e2 = e1.clone(); 70 | e2.data = json_map! {"test2": json!(1)}; 71 | let e3 = e1.clone(); 72 | let mut e4 = e1.clone(); 73 | e4.data = json_map! {"test": json!(2)}; 74 | 75 | let res = chunk_events_by_key(vec![e1, e2, e3, e4], "test"); 76 | assert_eq!(res.len(), 2); 77 | assert_eq!(res[0].duration, Duration::seconds(2)); 78 | assert_eq!(res[1].duration, Duration::seconds(1)); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /aw-transform/src/classify.rs: -------------------------------------------------------------------------------- 1 | /// Transforms for classifying (tagging and categorizing) events. 2 | /// 3 | /// Based on code in aw_research: https://github.com/ActivityWatch/aw-research/blob/master/aw_research/classify.py 4 | use aw_models::Event; 5 | use fancy_regex::Regex; 6 | 7 | pub enum Rule { 8 | None, 9 | Regex(RegexRule), 10 | } 11 | 12 | impl RuleTrait for Rule { 13 | fn matches(&self, event: &Event) -> bool { 14 | match self { 15 | Rule::None => false, 16 | Rule::Regex(rule) => rule.matches(event), 17 | } 18 | } 19 | } 20 | 21 | trait RuleTrait { 22 | fn matches(&self, event: &Event) -> bool; 23 | } 24 | 25 | pub struct RegexRule { 26 | regex: Regex, 27 | } 28 | 29 | impl RegexRule { 30 | pub fn new(regex_str: &str, ignore_case: bool) -> Result { 31 | // can't use `RegexBuilder::case_insensitive` because it's not supported by fancy_regex, 32 | // so we need to prefix with `(?i)` to make it case insensitive. 33 | let regex = if ignore_case { 34 | let regex_str = format!("(?i){regex_str}"); 35 | Regex::new(®ex_str)? 36 | } else { 37 | Regex::new(regex_str)? 38 | }; 39 | 40 | Ok(RegexRule { regex }) 41 | } 42 | } 43 | 44 | /// This struct defines the rules for classification. 45 | /// For now it just needs to contain the regex to match with, but in the future it might contain a 46 | /// glob-pattern, or other options for classifying. 47 | /// It's puropse is to make the API easy to extend in the future without having to break backwards 48 | /// compatibility (or have to maintain "old" query2 functions). 49 | impl RuleTrait for RegexRule { 50 | fn matches(&self, event: &Event) -> bool { 51 | event 52 | .data 53 | .values() 54 | .filter(|val| val.is_string()) 55 | .any(|val| self.regex.is_match(val.as_str().unwrap()).unwrap()) 56 | } 57 | } 58 | 59 | impl From for Rule { 60 | fn from(re: Regex) -> Self { 61 | Rule::Regex(RegexRule { regex: re }) 62 | } 63 | } 64 | 65 | /// Categorizes a list of events 66 | /// 67 | /// An event can only have one category, although the category may have a hierarchy, 68 | /// for instance: "Work -> ActivityWatch -> aw-server-rust" 69 | /// If multiple categories match, the deepest one will be chosen. 70 | pub fn categorize(mut events: Vec, rules: &[(Vec, Rule)]) -> Vec { 71 | let mut classified_events = Vec::new(); 72 | for event in events.drain(..) { 73 | classified_events.push(categorize_one(event, rules)); 74 | } 75 | classified_events 76 | } 77 | 78 | fn categorize_one(mut event: Event, rules: &[(Vec, Rule)]) -> Event { 79 | let mut category: Vec = vec!["Uncategorized".into()]; 80 | for (cat, rule) in rules { 81 | if rule.matches(&event) { 82 | category = _pick_highest_ranking_category(category, cat); 83 | } 84 | } 85 | event 86 | .data 87 | .insert("$category".into(), serde_json::json!(category)); 88 | event 89 | } 90 | 91 | /// Tags a list of events 92 | /// 93 | /// An event can have many tags (as opposed to only one category) which will be put into the `$tags` key of 94 | /// the event data object. 95 | pub fn tag(mut events: Vec, rules: &[(String, Rule)]) -> Vec { 96 | let mut events_tagged = Vec::new(); 97 | for event in events.drain(..) { 98 | events_tagged.push(tag_one(event, rules)); 99 | } 100 | events_tagged 101 | } 102 | 103 | fn tag_one(mut event: Event, rules: &[(String, Rule)]) -> Event { 104 | let mut tags: Vec = Vec::new(); 105 | for (cls, rule) in rules { 106 | if rule.matches(&event) { 107 | tags.push(cls.clone()); 108 | } 109 | } 110 | tags.sort_unstable(); 111 | tags.dedup(); 112 | event.data.insert("$tags".into(), serde_json::json!(tags)); 113 | event 114 | } 115 | 116 | fn _pick_highest_ranking_category(acc: Vec, item: &[String]) -> Vec { 117 | if item.len() >= acc.len() { 118 | // If tag is category with greater or equal depth than current, then choose the new one instead. 119 | item.to_vec() 120 | } else { 121 | acc 122 | } 123 | } 124 | 125 | #[test] 126 | fn test_rule() { 127 | let mut e_match = Event::default(); 128 | e_match 129 | .data 130 | .insert("test".into(), serde_json::json!("just a test")); 131 | 132 | let mut e_no_match = Event::default(); 133 | e_no_match 134 | .data 135 | .insert("nonono".into(), serde_json::json!("no match!")); 136 | 137 | let rule_from_regex = Rule::from(Regex::new("test").unwrap()); 138 | let rule_from_new = Rule::Regex(RegexRule::new("test", false).unwrap()); 139 | let rule_none = Rule::None; 140 | assert!(rule_from_regex.matches(&e_match)); 141 | assert!(rule_from_new.matches(&e_match)); 142 | assert!(!rule_from_regex.matches(&e_no_match)); 143 | assert!(!rule_from_new.matches(&e_no_match)); 144 | 145 | assert!(!rule_none.matches(&e_match)); 146 | } 147 | 148 | #[test] 149 | fn test_rule_lookahead() { 150 | // Originally requested by a user here, to match aw-server-python: https://canary.discord.com/channels/755040852727955476/755334543891759194/994291987878522961 151 | let mut e_match = Event::default(); 152 | e_match 153 | .data 154 | .insert("test".into(), serde_json::json!("testing lookahead")); 155 | 156 | let rule_from_regex = Rule::from(Regex::new("testing (?!lookahead)").unwrap()); 157 | assert!(!rule_from_regex.matches(&e_match)); 158 | } 159 | 160 | #[test] 161 | fn test_categorize() { 162 | let mut e = Event::default(); 163 | e.data 164 | .insert("test".into(), serde_json::json!("just a test")); 165 | 166 | let mut events = vec![e]; 167 | let rules: Vec<(Vec, Rule)> = vec![ 168 | ( 169 | vec!["Test".into()], 170 | Rule::from(Regex::new(r"test").unwrap()), 171 | ), 172 | ( 173 | vec!["Test".into(), "Subtest".into()], 174 | Rule::from(Regex::new(r"test").unwrap()), 175 | ), 176 | ( 177 | vec!["Other".into()], 178 | Rule::from(Regex::new(r"nonmatching").unwrap()), 179 | ), 180 | ]; 181 | events = categorize(events, &rules); 182 | 183 | assert_eq!(events.len(), 1); 184 | assert_eq!( 185 | events.first().unwrap().data.get("$category").unwrap(), 186 | &serde_json::json!(vec!["Test", "Subtest"]) 187 | ); 188 | } 189 | 190 | #[test] 191 | fn test_categorize_uncategorized() { 192 | // Checks that the category correctly becomes uncategorized when no category matches 193 | let mut e = Event::default(); 194 | e.data 195 | .insert("test".into(), serde_json::json!("just a test")); 196 | 197 | let mut events = vec![e]; 198 | let rules: Vec<(Vec, Rule)> = vec![( 199 | vec!["Non-matching".into(), "test".into()], 200 | Rule::from(Regex::new(r"not going to match").unwrap()), 201 | )]; 202 | events = categorize(events, &rules); 203 | 204 | assert_eq!(events.len(), 1); 205 | assert_eq!( 206 | events.first().unwrap().data.get("$category").unwrap(), 207 | &serde_json::json!(vec!["Uncategorized"]) 208 | ); 209 | } 210 | 211 | #[test] 212 | fn test_tag() { 213 | let mut e = Event::default(); 214 | e.data 215 | .insert("test".into(), serde_json::json!("just a test")); 216 | 217 | let mut events = vec![e]; 218 | let rules: Vec<(String, Rule)> = vec![ 219 | ("test".into(), Rule::from(Regex::new(r"test").unwrap())), 220 | ("test-2".into(), Rule::from(Regex::new(r"test").unwrap())), 221 | ( 222 | "nomatch".into(), 223 | Rule::from(Regex::new(r"nomatch").unwrap()), 224 | ), 225 | ]; 226 | events = tag(events, &rules); 227 | 228 | assert_eq!(events.len(), 1); 229 | 230 | let event = events.first().unwrap(); 231 | let tags = event.data.get("$tags").unwrap(); 232 | assert_eq!(tags, &serde_json::json!(vec!["test", "test-2"])); 233 | } 234 | -------------------------------------------------------------------------------- /aw-transform/src/filter_keyvals.rs: -------------------------------------------------------------------------------- 1 | use fancy_regex::Regex; 2 | use serde_json::value::Value; 3 | 4 | use aw_models::Event; 5 | 6 | /// Drops events not matching the specified key and value(s) 7 | /// 8 | /// # Example 9 | /// ```ignore 10 | /// key: a 11 | /// vals: [1,2] 12 | /// input: [a:1][a:2][a:3][b:4] 13 | /// output: [a:1][a:2] 14 | /// ``` 15 | pub fn filter_keyvals(mut events: Vec, key: &str, vals: &[Value]) -> Vec { 16 | let mut filtered_events = Vec::new(); 17 | for event in events.drain(..) { 18 | if let Some(v) = event.data.get(key) { 19 | for val in vals { 20 | if val == v { 21 | filtered_events.push(event); 22 | break; 23 | } 24 | } 25 | } 26 | } 27 | filtered_events 28 | } 29 | 30 | /// Drops events not matching the regex on the value for a specified key 31 | /// Will only match if the value is a string 32 | /// 33 | /// # Example 34 | /// ```ignore 35 | /// key: a 36 | /// regex: "[A-Z]+" 37 | /// input: [a:"HELLO"][a:"hello"][a:3][b:"HELLO"] 38 | /// output: [a:"HELLO"] 39 | /// ``` 40 | pub fn filter_keyvals_regex(mut events: Vec, key: &str, regex: &Regex) -> Vec { 41 | let mut filtered_events = Vec::new(); 42 | 43 | for event in events.drain(..) { 44 | if let Some(value) = event.data.get(key).and_then(|v| v.as_str()) { 45 | match regex.is_match(value) { 46 | Ok(true) => filtered_events.push(event), 47 | Ok(false) => (), 48 | Err(err) => warn!("Failed to run regex: {}", err), 49 | }; 50 | } 51 | } 52 | filtered_events 53 | } 54 | 55 | /// Drops events matching the specified key and value(s). Opposite of filter_keyvals. 56 | /// 57 | /// # Example 58 | /// ```ignore 59 | /// key: a 60 | /// vals: [1,2] 61 | /// input: [a:1][a:2][a:3][b:4] 62 | /// output: [a:3][b:4] 63 | /// ``` 64 | pub fn exclude_keyvals(mut events: Vec, key: &str, vals: &[Value]) -> Vec { 65 | let mut filtered_events = Vec::new(); 66 | 'events: for event in events.drain(..) { 67 | if let Some(v) = event.data.get(key) { 68 | for val in vals { 69 | if val == v { 70 | continue 'events; 71 | } 72 | } 73 | } 74 | filtered_events.push(event); 75 | } 76 | filtered_events 77 | } 78 | 79 | #[cfg(test)] 80 | mod tests { 81 | use std::str::FromStr; 82 | 83 | use chrono::{DateTime, Duration}; 84 | use fancy_regex::RegexBuilder; 85 | use serde_json::json; 86 | 87 | use aw_models::Event; 88 | 89 | use super::{exclude_keyvals, filter_keyvals, filter_keyvals_regex}; 90 | 91 | #[test] 92 | fn test_filter_keyvals() { 93 | let e1 = Event { 94 | id: None, 95 | timestamp: DateTime::from_str("2000-01-01T00:00:00Z").unwrap(), 96 | duration: Duration::seconds(1), 97 | data: json_map! {"test": json!(1)}, 98 | }; 99 | let mut e2 = e1.clone(); 100 | e2.data = json_map! {"test": json!(1), "test2": json!(1)}; 101 | let mut e3 = e1.clone(); 102 | e3.data = json_map! {"test2": json!(2)}; 103 | let res = filter_keyvals(vec![e1.clone(), e2.clone(), e3], "test", &vec![json!(1)]); 104 | assert_eq!(vec![e1, e2], res); 105 | } 106 | 107 | #[test] 108 | fn test_filter_keyvals_regex() { 109 | let e1 = Event { 110 | id: None, 111 | timestamp: DateTime::from_str("2000-01-01T00:00:00Z").unwrap(), 112 | duration: Duration::seconds(1), 113 | data: json_map! {"key1": json!("value1")}, 114 | }; 115 | let mut e2 = e1.clone(); 116 | e2.data = json_map! {"key1": json!("value2")}; 117 | let mut e3 = e1.clone(); 118 | e3.data = json_map! {"key2": json!("value3")}; 119 | 120 | let events = vec![e1.clone(), e2.clone(), e3.clone()]; 121 | 122 | let regex_value = RegexBuilder::new("value").build().unwrap(); 123 | let regex_value1 = RegexBuilder::new("value1").build().unwrap(); 124 | 125 | let res = filter_keyvals_regex(events.clone(), "key1", ®ex_value); 126 | assert_eq!(vec![e1.clone(), e2], res); 127 | let res = filter_keyvals_regex(events.clone(), "key1", ®ex_value1); 128 | assert_eq!(vec![e1], res); 129 | let res = filter_keyvals_regex(events.clone(), "key2", ®ex_value); 130 | assert_eq!(vec![e3], res); 131 | let res = filter_keyvals_regex(events.clone(), "key2", ®ex_value1); 132 | assert_eq!(0, res.len()); 133 | let res = filter_keyvals_regex(events, "key3", ®ex_value); 134 | assert_eq!(0, res.len()); 135 | } 136 | 137 | #[test] 138 | fn test_filter_keyvals_regex_non_string_data_value() { 139 | let e1 = Event { 140 | id: None, 141 | timestamp: DateTime::from_str("2000-01-01T00:00:00Z").unwrap(), 142 | duration: Duration::seconds(1), 143 | data: json_map! {"key1": json!(100)}, 144 | }; 145 | let events = vec![e1.clone()]; 146 | let regex_value = RegexBuilder::new("value").build().unwrap(); 147 | let res = filter_keyvals_regex(events.clone(), "key1", ®ex_value); 148 | assert!(res.is_empty()); 149 | } 150 | 151 | #[test] 152 | fn test_exclude_keyvals() { 153 | let e1 = Event { 154 | id: None, 155 | timestamp: DateTime::from_str("2000-01-01T00:00:00Z").unwrap(), 156 | duration: Duration::seconds(1), 157 | data: json_map! {"test": json!(1)}, 158 | }; 159 | let mut e2 = e1.clone(); 160 | e2.data = json_map! {"test": json!(1), "test2": json!(2)}; 161 | let mut e3 = e1.clone(); 162 | e3.data = json_map! {"test": json!(2)}; 163 | let res = exclude_keyvals(vec![e1.clone(), e2.clone(), e3], "test", &vec![json!(2)]); 164 | assert_eq!(vec![e1, e2], res); 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /aw-transform/src/filter_period.rs: -------------------------------------------------------------------------------- 1 | use aw_models::Event; 2 | use chrono::Duration; 3 | 4 | use crate::sort_by_timestamp; 5 | 6 | /// Removes events not intersecting with the provided filter_events 7 | /// 8 | /// Usually used to filter buckets unaware if the user is making any activity with an bucket which 9 | /// is aware if the user is at the computer or not. 10 | /// For example the events from aw-watcher-window should be called with filter_period_intersect 11 | /// with the "not-afk" events from aw-watcher-afk to give events with durations of only when the 12 | /// user is at the computer. 13 | /// 14 | /// # Example 15 | /// ```ignore 16 | /// events: [a ][b ] 17 | /// filter_events: [ ] [ ] 18 | /// output: [a ] [a ][b ] 19 | /// ``` 20 | pub fn filter_period_intersect(events: Vec, filter_events: Vec) -> Vec { 21 | if events.len() == 0 || filter_events.len() == 0 { 22 | return Vec::new(); 23 | } 24 | 25 | let mut filtered_events = Vec::new(); 26 | let events = sort_by_timestamp(events); 27 | let filter_events = sort_by_timestamp(filter_events); 28 | 29 | let mut events_iter = events.into_iter(); 30 | let mut filter_events_iter = filter_events.into_iter(); 31 | let mut cur_event = events_iter.next().unwrap(); 32 | let mut cur_filter_event = filter_events_iter.next().unwrap(); 33 | 34 | loop { 35 | let event_endtime = cur_event.calculate_endtime(); 36 | let filter_endtime = cur_filter_event.calculate_endtime(); 37 | if cur_event.duration == Duration::seconds(0) || event_endtime <= cur_filter_event.timestamp 38 | { 39 | match events_iter.next() { 40 | Some(e) => { 41 | cur_event = e; 42 | continue; 43 | } 44 | None => return filtered_events, 45 | } 46 | } 47 | if cur_event.timestamp >= cur_filter_event.calculate_endtime() { 48 | match filter_events_iter.next() { 49 | Some(e) => { 50 | cur_filter_event = e; 51 | continue; 52 | } 53 | None => return filtered_events, 54 | } 55 | } 56 | 57 | let mut e = cur_event.clone(); 58 | e.timestamp = std::cmp::max(e.timestamp, cur_filter_event.timestamp); 59 | let endtime = std::cmp::min(event_endtime, filter_endtime); 60 | e.duration = endtime - e.timestamp; 61 | 62 | // trim current event 63 | let old_timestamp = cur_event.timestamp; 64 | cur_event.timestamp = e.timestamp + e.duration; 65 | cur_event.duration = old_timestamp + cur_event.duration - cur_event.timestamp; 66 | 67 | filtered_events.push(e); 68 | } 69 | } 70 | 71 | #[cfg(test)] 72 | mod tests { 73 | use std::str::FromStr; 74 | 75 | use chrono::DateTime; 76 | use chrono::Duration; 77 | use chrono::Utc; 78 | use serde_json::json; 79 | 80 | use aw_models::Event; 81 | 82 | use super::filter_period_intersect; 83 | 84 | #[test] 85 | fn test_filter_period_intersect() { 86 | let e1 = Event { 87 | id: None, 88 | timestamp: DateTime::from_str("2000-01-01T00:00:01Z").unwrap(), 89 | duration: Duration::seconds(1), 90 | data: json_map! {"test": json!(1)}, 91 | }; 92 | let mut e2 = e1.clone(); 93 | e2.timestamp = DateTime::from_str("2000-01-01T00:00:02Z").unwrap(); 94 | let mut e3 = e1.clone(); 95 | e3.timestamp = DateTime::from_str("2000-01-01T00:00:03Z").unwrap(); 96 | let mut e4 = e1.clone(); 97 | e4.timestamp = DateTime::from_str("2000-01-01T00:00:04Z").unwrap(); 98 | let mut e5 = e1.clone(); 99 | e5.timestamp = DateTime::from_str("2000-01-01T00:00:05Z").unwrap(); 100 | 101 | let filter_event = Event { 102 | id: None, 103 | timestamp: DateTime::from_str("2000-01-01T00:00:02.5Z").unwrap(), 104 | duration: Duration::seconds(2), 105 | data: json_map! {"test": json!(1)}, 106 | }; 107 | 108 | let filtered_events = 109 | filter_period_intersect(vec![e1, e2, e3, e4, e5], vec![filter_event.clone()]); 110 | assert_eq!(filtered_events.len(), 3); 111 | assert_eq!(filtered_events[0].duration, Duration::milliseconds(500)); 112 | assert_eq!(filtered_events[1].duration, Duration::milliseconds(1000)); 113 | assert_eq!(filtered_events[2].duration, Duration::milliseconds(500)); 114 | 115 | let dt: DateTime = DateTime::from_str("2000-01-01T00:00:02.500Z").unwrap(); 116 | assert_eq!(filtered_events[0].timestamp, dt); 117 | let dt: DateTime = DateTime::from_str("2000-01-01T00:00:03.000Z").unwrap(); 118 | assert_eq!(filtered_events[1].timestamp, dt); 119 | let dt: DateTime = DateTime::from_str("2000-01-01T00:00:04.000Z").unwrap(); 120 | assert_eq!(filtered_events[2].timestamp, dt); 121 | 122 | let timestamp_01s = DateTime::from_str("2000-01-01T00:00:01Z").unwrap(); 123 | let e = Event { 124 | id: None, 125 | timestamp: timestamp_01s, 126 | duration: Duration::seconds(1), 127 | data: json_map! {"test": json!(1)}, 128 | }; 129 | let mut f2 = filter_event.clone(); 130 | f2.timestamp = DateTime::from_str("2000-01-01T00:00:00Z").unwrap(); 131 | f2.duration = Duration::milliseconds(1500); 132 | let res = filter_period_intersect(vec![e.clone()], vec![f2]); 133 | assert_eq!(res[0].timestamp, timestamp_01s); 134 | assert_eq!(res[0].duration, Duration::milliseconds(500)); 135 | 136 | let timestamp_01_5s = DateTime::from_str("2000-01-01T00:00:01.5Z").unwrap(); 137 | let mut f3 = filter_event.clone(); 138 | f3.timestamp = timestamp_01_5s; 139 | f3.duration = Duration::milliseconds(1000); 140 | let res = filter_period_intersect(vec![e.clone()], vec![f3]); 141 | assert_eq!(res[0].timestamp, timestamp_01_5s); 142 | assert_eq!(res[0].duration, Duration::milliseconds(500)); 143 | 144 | let mut f4 = filter_event.clone(); 145 | f4.timestamp = DateTime::from_str("2000-01-01T00:00:01.5Z").unwrap(); 146 | f4.duration = Duration::milliseconds(100); 147 | let res = filter_period_intersect(vec![e.clone()], vec![f4]); 148 | assert_eq!(res[0].timestamp, timestamp_01_5s); 149 | assert_eq!(res[0].duration, Duration::milliseconds(100)); 150 | 151 | let mut f5 = filter_event.clone(); 152 | f5.timestamp = DateTime::from_str("2000-01-01T00:00:00Z").unwrap(); 153 | f5.duration = Duration::seconds(10); 154 | let res = filter_period_intersect(vec![e.clone()], vec![f5]); 155 | assert_eq!(res[0].timestamp, timestamp_01s); 156 | assert_eq!(res[0].duration, Duration::milliseconds(1000)); 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /aw-transform/src/find_bucket.rs: -------------------------------------------------------------------------------- 1 | use aw_models::Bucket; 2 | 3 | /// Finds the first bucket which starts with the specified string, and optionally matches a 4 | /// hostname. 5 | pub fn find_bucket<'a>( 6 | bucket_filter: &str, 7 | hostname_filter: &Option, 8 | buckets: impl IntoIterator, 9 | ) -> Option { 10 | for bucket in buckets { 11 | if bucket.id.starts_with(bucket_filter) { 12 | if let Some(hostname) = hostname_filter { 13 | if hostname == &bucket.hostname { 14 | return Some(bucket.id.to_string()); 15 | } 16 | } else { 17 | return Some(bucket.id.to_string()); 18 | } 19 | } 20 | } 21 | None 22 | } 23 | 24 | #[cfg(test)] 25 | mod tests { 26 | use super::find_bucket; 27 | use aw_models::Bucket; 28 | use aw_models::BucketMetadata; 29 | 30 | #[test] 31 | fn test_find_bucket() { 32 | let expected_bucketname = "aw-datastore-test_test-host".to_string(); 33 | let expected_hostname = "testhost".to_string(); 34 | let b1 = Bucket { 35 | bid: None, 36 | id: "no match".to_string(), 37 | _type: "type".to_string(), 38 | hostname: expected_hostname, 39 | client: "testclient".to_string(), 40 | created: None, 41 | data: json_map! {}, 42 | metadata: BucketMetadata::default(), 43 | events: None, 44 | last_updated: None, 45 | }; 46 | let mut b2 = b1.clone(); 47 | b2.id = expected_bucketname.clone(); 48 | let b3 = b1.clone(); 49 | 50 | let buckets = vec![b1.clone(), b2, b3.clone()]; 51 | 52 | // Check that it correctly finds bucket 53 | let res = find_bucket("aw-datastore-test", &Some("testhost".to_string()), &buckets); 54 | assert_eq!(res, Some(expected_bucketname)); 55 | 56 | // Check that it doesn't find a bucket for an unavailable hostname 57 | let res = find_bucket( 58 | "aw-datastore-test", 59 | &Some("unavailablehost".to_string()), 60 | &buckets, 61 | ); 62 | assert_eq!(res, None); 63 | 64 | // Check that it doesn't find a bucket for any hostname 65 | let buckets = vec![b1, b3]; 66 | let res = find_bucket("aw-datastore-test", &None, &buckets); 67 | assert_eq!(res, None); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /aw-transform/src/heartbeat.rs: -------------------------------------------------------------------------------- 1 | use aw_models::Event; 2 | 3 | /// Returns a merged event if two events have the same data and are within the pulsetime 4 | /// 5 | /// # Example 6 | /// 7 | /// ```ignore 8 | /// pulsetime: 1 second (one space) 9 | /// input: [a] [a] [a][b] 10 | /// output: [a ] [a][b] 11 | /// ``` 12 | pub fn heartbeat(last_event: &Event, heartbeat: &Event, pulsetime: f64) -> Option { 13 | // Verify that data is the same 14 | if heartbeat.data != last_event.data { 15 | debug!("Can't merge, data is different"); 16 | return None; 17 | } 18 | 19 | let last_event_endtime = last_event.calculate_endtime(); 20 | let heartbeat_endtime = heartbeat.calculate_endtime(); 21 | 22 | // Verify that timestamps intersect (including pulsetime) 23 | let pulsetime_ns: i64 = (pulsetime * 1_000_000_000.0).round() as i64; 24 | let last_endtime_allowed = last_event_endtime + chrono::Duration::nanoseconds(pulsetime_ns); 25 | if last_event.timestamp > heartbeat.timestamp { 26 | debug!("Can't merge, last event timestamp is after heartbeat timestamp"); 27 | return None; 28 | } 29 | if heartbeat.timestamp > last_endtime_allowed { 30 | debug!("Can't merge, heartbeat timestamp is after last event endtime"); 31 | return None; 32 | } 33 | 34 | let starttime = if heartbeat.timestamp < last_event.timestamp { 35 | &heartbeat.timestamp 36 | } else { 37 | &last_event.timestamp 38 | }; 39 | 40 | let endtime = if last_event_endtime > heartbeat_endtime { 41 | &last_event_endtime 42 | } else { 43 | &heartbeat_endtime 44 | }; 45 | 46 | let duration = endtime.signed_duration_since(*starttime); 47 | if duration.num_nanoseconds().unwrap() < 0 { 48 | debug!("Merging heartbeats would result in a negative duration, refusing to merge!"); 49 | return None; 50 | } 51 | 52 | // Success, return successful heartbeat last_event 53 | Some(Event { 54 | id: None, 55 | timestamp: *starttime, 56 | duration, 57 | data: last_event.data.clone(), 58 | }) 59 | } 60 | 61 | #[cfg(test)] 62 | mod tests { 63 | use chrono::Duration; 64 | use chrono::Utc; 65 | use serde_json::json; 66 | 67 | use aw_models::Event; 68 | 69 | use super::heartbeat; 70 | 71 | #[test] 72 | fn test_heartbeat_pulsetime() { 73 | let now = Utc::now(); 74 | let event1 = Event { 75 | id: None, 76 | timestamp: now, 77 | duration: Duration::seconds(1), 78 | data: json_map! {"test": json!(1)}, 79 | }; 80 | let heartbeat1 = Event { 81 | id: None, 82 | timestamp: now + Duration::seconds(2), 83 | duration: Duration::seconds(1), 84 | data: json_map! {"test": json!(1)}, 85 | }; 86 | 87 | // Merge result 88 | let res_merge = heartbeat(&event1, &heartbeat1, 2.0).unwrap(); 89 | assert_eq!(res_merge.timestamp, now); 90 | assert_eq!(res_merge.duration, Duration::seconds(3)); 91 | assert_eq!(res_merge.data, event1.data); 92 | 93 | // No merge result 94 | let res_no_merge = heartbeat(&event1, &heartbeat1, 0.0); 95 | assert!(res_no_merge.is_none()); 96 | 97 | // TODO: needs more tests! 98 | } 99 | 100 | #[test] 101 | fn test_heartbeat_long_pulse_merge() { 102 | let now = Utc::now(); 103 | let event = Event { 104 | id: None, 105 | timestamp: now, 106 | duration: Duration::seconds(1), 107 | data: json_map! {"test": json!(1)}, 108 | }; 109 | let long_pulse_event = Event { 110 | id: None, 111 | // note that no duration is sent, which is how aw-client works 112 | duration: Duration::seconds(0), 113 | timestamp: now + Duration::seconds(120), 114 | data: json_map! {"test": json!(1)}, 115 | }; 116 | 117 | // Merge result 118 | let res_merge = heartbeat(&event, &long_pulse_event, 120.0).unwrap(); 119 | assert_eq!(res_merge.timestamp, now); 120 | assert_eq!(res_merge.data, event.data); 121 | assert_eq!(res_merge.duration, Duration::seconds(120)); 122 | 123 | // No merge result when pulsetime is less than the timestamp delta between heartbeats 124 | let res_no_merge = heartbeat(&event, &long_pulse_event, 60.0); 125 | assert!(res_no_merge.is_none()); 126 | } 127 | 128 | #[test] 129 | fn test_heartbeat_data() { 130 | let now = Utc::now(); 131 | let event = Event { 132 | id: None, 133 | timestamp: now, 134 | duration: Duration::seconds(0), 135 | data: json_map! {"test": json!(1)}, 136 | }; 137 | let heartbeat_same_data = Event { 138 | id: None, 139 | timestamp: now, 140 | duration: Duration::seconds(1), 141 | data: json_map! {"test": json!(1)}, 142 | }; 143 | 144 | // Data is same, should merge 145 | let res_merge = heartbeat(&event, &heartbeat_same_data, 1.0); 146 | assert!(res_merge.is_some()); 147 | 148 | let heartbeat_different_data = Event { 149 | id: None, 150 | timestamp: now, 151 | duration: Duration::seconds(1), 152 | data: json_map! {"test": json!(2)}, 153 | }; 154 | // Data is different, should not merge 155 | let res_merge = heartbeat(&event, &heartbeat_different_data, 1.0); 156 | assert!(res_merge.is_none()); 157 | } 158 | 159 | #[test] 160 | fn test_heartbeat_same_timestamp() { 161 | let now = Utc::now(); 162 | let event = Event { 163 | id: None, 164 | timestamp: now, 165 | duration: Duration::seconds(0), 166 | data: json_map! {"test": json!(1)}, 167 | }; 168 | let heartbeat_same_data = Event { 169 | id: None, 170 | timestamp: now, 171 | duration: Duration::seconds(1), 172 | data: json_map! {"test": json!(1)}, 173 | }; 174 | 175 | // Should merge 176 | let res_merge = heartbeat(&event, &heartbeat_same_data, 1.0).unwrap(); 177 | assert_eq!(Duration::seconds(1), res_merge.duration); 178 | 179 | // Order shouldn't matter, should merge anyway 180 | let res_merge = heartbeat(&heartbeat_same_data, &event, 1.0).unwrap(); 181 | assert_eq!(Duration::seconds(1), res_merge.duration); 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /aw-transform/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | 4 | // TODO: Move this to some more suitable place 5 | #[macro_export] 6 | macro_rules! json_map { 7 | { $( $key:literal : $value:expr),* } => {{ 8 | use serde_json::Value; 9 | use serde_json::map::Map; 10 | #[allow(unused_mut)] 11 | let mut map : Map = Map::new(); 12 | $( 13 | map.insert( $key.to_string(), json!($value) ); 14 | )* 15 | map 16 | }}; 17 | } 18 | 19 | pub mod classify; 20 | 21 | mod heartbeat; 22 | pub use heartbeat::heartbeat; 23 | 24 | mod find_bucket; 25 | pub use find_bucket::find_bucket; 26 | 27 | mod flood; 28 | pub use flood::flood; 29 | 30 | mod merge; 31 | pub use merge::merge_events_by_keys; 32 | 33 | mod chunk; 34 | pub use chunk::chunk_events_by_key; 35 | 36 | mod sort; 37 | pub use sort::{sort_by_duration, sort_by_timestamp}; 38 | 39 | mod filter_keyvals; 40 | pub use filter_keyvals::{exclude_keyvals, filter_keyvals, filter_keyvals_regex}; 41 | 42 | mod filter_period; 43 | pub use filter_period::filter_period_intersect; 44 | 45 | mod split_url; 46 | pub use split_url::split_url_event; 47 | 48 | mod period_union; 49 | pub use period_union::period_union; 50 | 51 | mod union_no_overlap; 52 | pub use union_no_overlap::union_no_overlap; 53 | -------------------------------------------------------------------------------- /aw-transform/src/merge.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use aw_models::Event; 4 | 5 | /// Merge events with the same values at the specified keys 6 | /// 7 | /// Doesn't care about if events are neighbouring or not, this transform merges 8 | /// all events with the same key. 9 | /// The timestamp will be the timestamp of the first event with a specific key value 10 | /// 11 | /// # Example 1 12 | /// A simple example only using one key 13 | /// 14 | /// ```ignore 15 | /// keys: ["a"] 16 | /// input: 17 | /// { duration: 1.0, data: { "a": 1 } } 18 | /// { duration: 1.0, data: { "a": 1 } } 19 | /// { duration: 1.0, data: { "a": 2 } } 20 | /// { duration: 1.0, data: { "b": 1 } } 21 | /// { duration: 1.0, data: { "a": 1 } } 22 | /// output: 23 | /// { duration: 3.0, data: { "a": 1 } } 24 | /// { duration: 1.0, data: { "a": 2 } } 25 | /// { duration: 1.0, data: { "b": 1 } } 26 | /// ``` 27 | /// 28 | /// # Example 2 29 | /// A more complex example only using two keys 30 | /// ```ignore 31 | /// keys: ["a", "b"] 32 | /// input: 33 | /// { duration: 1.0, data: { "a": 1, "b": 1 } } 34 | /// { duration: 1.0, data: { "a": 2, "b": 2 } } 35 | /// { duration: 1.0, data: { "a": 1, "b": 1 } } 36 | /// { duration: 1.0, data: { "a": 1, "b": 2 } } 37 | /// output: 38 | /// { duration: 2.0, data: { "a": 1, "b": 1 } } 39 | /// { duration: 1.0, data: { "a": 2, "b": 2 } } 40 | /// { duration: 1.0, data: { "a": 1, "b": 2 } } 41 | /// ``` 42 | #[allow(clippy::map_entry)] 43 | pub fn merge_events_by_keys(events: Vec, keys: Vec) -> Vec { 44 | if keys.is_empty() { 45 | return vec![]; 46 | } 47 | let mut merged_events_map: HashMap = HashMap::new(); 48 | 'event: for event in events { 49 | let mut key_values = Vec::new(); 50 | for key in &keys { 51 | match event.data.get(key) { 52 | Some(v) => key_values.push(v.to_string()), 53 | None => continue 'event, 54 | } 55 | } 56 | let summed_key = key_values.join("."); 57 | if merged_events_map.contains_key(&summed_key) { 58 | let merged_event = merged_events_map.get_mut(&summed_key).unwrap(); 59 | merged_event.duration = merged_event.duration + event.duration; 60 | } else { 61 | let mut data = HashMap::new(); 62 | for key in &keys { 63 | data.insert(key.clone(), event.data.get(key).unwrap()); 64 | } 65 | let merged_event = Event { 66 | id: None, 67 | timestamp: event.timestamp, 68 | duration: event.duration, 69 | data: event.data.clone(), 70 | }; 71 | merged_events_map.insert(summed_key, merged_event); 72 | } 73 | } 74 | let mut merged_events_list = Vec::new(); 75 | for (_key, event) in merged_events_map.drain() { 76 | merged_events_list.push(event); 77 | } 78 | merged_events_list 79 | } 80 | 81 | #[cfg(test)] 82 | mod tests { 83 | use std::str::FromStr; 84 | 85 | use chrono::DateTime; 86 | use chrono::Duration; 87 | use serde_json::json; 88 | 89 | use aw_models::Event; 90 | 91 | use crate::sort_by_timestamp; 92 | 93 | use super::merge_events_by_keys; 94 | 95 | #[test] 96 | fn test_merge_events_by_key() { 97 | let e1 = Event { 98 | id: None, 99 | timestamp: DateTime::from_str("2000-01-01T00:00:00Z").unwrap(), 100 | duration: Duration::seconds(1), 101 | data: json_map! {"test": json!(1)}, 102 | }; 103 | let e2 = Event { 104 | id: None, 105 | timestamp: DateTime::from_str("2000-01-01T00:00:01Z").unwrap(), 106 | duration: Duration::seconds(3), 107 | data: json_map! {"test2": json!(3)}, 108 | }; 109 | let e3 = Event { 110 | id: None, 111 | timestamp: DateTime::from_str("2000-01-01T00:00:02Z").unwrap(), 112 | duration: Duration::seconds(7), 113 | data: json_map! {"test": json!(6)}, 114 | }; 115 | let e4 = Event { 116 | id: None, 117 | timestamp: DateTime::from_str("2000-01-01T00:00:03Z").unwrap(), 118 | duration: Duration::seconds(9), 119 | data: json_map! {"test": json!(1)}, 120 | }; 121 | let in_events = vec![e1, e2, e3, e4]; 122 | let res1 = merge_events_by_keys(in_events, vec!["test".to_string()]); 123 | // Needed, otherwise the order is undeterministic 124 | let res2 = sort_by_timestamp(res1); 125 | let expected = vec![ 126 | Event { 127 | id: None, 128 | timestamp: DateTime::from_str("2000-01-01T00:00:00Z").unwrap(), 129 | duration: Duration::seconds(10), 130 | data: json_map! {"test": json!(1)}, 131 | }, 132 | Event { 133 | id: None, 134 | timestamp: DateTime::from_str("2000-01-01T00:00:02Z").unwrap(), 135 | duration: Duration::seconds(7), 136 | data: json_map! {"test": json!(6)}, 137 | }, 138 | ]; 139 | assert_eq!(&res2, &expected); 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /aw-transform/src/period_union.rs: -------------------------------------------------------------------------------- 1 | use super::sort::sort_by_timestamp; 2 | use aw_models::Event; 3 | use std::collections::VecDeque; 4 | 5 | /// Takes a list of two events and returns a new list of events covering the union 6 | /// of the timeperiods contained in the eventlists with no overlapping events. 7 | /// 8 | /// aw-core implementation: https://github.com/ActivityWatch/aw-core/blob/b11fbe08a0405dec01380493f7b3261163cc6878/aw_transform/filter_period_intersect.py#L92 9 | /// 10 | /// WARNING: This function strips all data from events as it cannot keep it consistent. 11 | /// 12 | /// 13 | /// # Example 14 | /// ```ignore 15 | /// events1 | ------- --------- | 16 | /// events2 | ------ --- -- ---- | 17 | /// result | ----------- -- --------- | 18 | /// ``` 19 | pub fn period_union(events1: &[Event], events2: &[Event]) -> Vec { 20 | let mut sorted_events: VecDeque = VecDeque::new(); 21 | sorted_events.extend(sort_by_timestamp([events1, events2].concat())); 22 | 23 | let mut events_union = Vec::new(); 24 | 25 | if !sorted_events.is_empty() { 26 | events_union.push(sorted_events.pop_front().unwrap()) 27 | } 28 | 29 | for e in sorted_events { 30 | let last_event = events_union.last().unwrap(); 31 | 32 | let e_p = e.interval(); 33 | let le_p = last_event.interval(); 34 | 35 | match e_p.union(&le_p) { 36 | Some(new_period) => { 37 | // If no gap and could be unioned, modify last event 38 | let mut e_mod = events_union.pop().unwrap(); 39 | e_mod.duration = new_period.duration(); 40 | events_union.push(e_mod); 41 | } 42 | None => { 43 | // If gap and could not be unioned, push event 44 | events_union.push(e); 45 | } 46 | } 47 | } 48 | 49 | events_union 50 | .drain(..) 51 | .map(|mut e| { 52 | e.data = json_map! {}; 53 | e 54 | }) 55 | .collect() 56 | } 57 | 58 | #[cfg(test)] 59 | mod tests { 60 | use std::str::FromStr; 61 | 62 | use chrono::DateTime; 63 | use chrono::Duration; 64 | use chrono::Utc; 65 | use serde_json::json; 66 | 67 | use aw_models::Event; 68 | 69 | use super::period_union; 70 | 71 | #[test] 72 | fn test_period_union_empty() { 73 | let e_result = period_union(&[], &[]); 74 | assert_eq!(e_result.len(), 0); 75 | } 76 | 77 | #[test] 78 | fn test_period_union() { 79 | let e1 = Event { 80 | id: None, 81 | timestamp: DateTime::from_str("2000-01-01T00:00:01Z").unwrap(), 82 | duration: Duration::seconds(1), 83 | data: json_map! {"test": json!(1)}, 84 | }; 85 | 86 | let mut e2 = e1.clone(); 87 | e2.timestamp = DateTime::from_str("2000-01-01T00:00:02Z").unwrap(); 88 | 89 | let e_result = period_union(&[e1], &[e2]); 90 | assert_eq!(e_result.len(), 1); 91 | 92 | let dt: DateTime = DateTime::from_str("2000-01-01T00:00:01.000Z").unwrap(); 93 | assert_eq!(e_result[0].timestamp, dt); 94 | assert_eq!(e_result[0].duration, Duration::milliseconds(2000)); 95 | } 96 | 97 | /// Make sure nothing gets done when nothing to union (gaps present) 98 | #[test] 99 | fn test_period_union_nop() { 100 | let e1 = Event { 101 | id: None, 102 | timestamp: DateTime::from_str("2000-01-01T00:00:01Z").unwrap(), 103 | duration: Duration::seconds(1), 104 | data: json_map! {"test": json!(1)}, 105 | }; 106 | 107 | let mut e2 = e1.clone(); 108 | e2.timestamp = DateTime::from_str("2000-01-01T00:00:03Z").unwrap(); 109 | 110 | let e_result = period_union(&[e1], &[e2]); 111 | assert_eq!(e_result.len(), 2); 112 | } 113 | 114 | #[test] 115 | fn test_period_union_2nd_empty() { 116 | let e1 = Event { 117 | id: None, 118 | timestamp: DateTime::from_str("2000-01-01T00:00:01Z").unwrap(), 119 | duration: Duration::seconds(1), 120 | data: json_map! {"test": json!(1)}, 121 | }; 122 | 123 | let e_result = period_union(&[e1], &[]); 124 | assert_eq!(e_result.len(), 1); 125 | } 126 | 127 | #[test] 128 | fn test_period_union_1st_empty() { 129 | let e1 = Event { 130 | id: None, 131 | timestamp: DateTime::from_str("2000-01-01T00:00:01Z").unwrap(), 132 | duration: Duration::seconds(1), 133 | data: json_map! {"test": json!(1)}, 134 | }; 135 | 136 | let e_result = period_union(&[], &[e1]); 137 | assert_eq!(e_result.len(), 1); 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /aw-transform/src/sort.rs: -------------------------------------------------------------------------------- 1 | use aw_models::Event; 2 | 3 | /// Sort a list of events by timestamp 4 | pub fn sort_by_timestamp(mut events: Vec) -> Vec { 5 | events.sort_by(|e1, e2| e1.timestamp.cmp(&e2.timestamp)); 6 | events 7 | } 8 | 9 | /// Sort a list of events by duration with the highest duration first 10 | pub fn sort_by_duration(mut events: Vec) -> Vec { 11 | events.sort_by(|e1, e2| e2.duration.cmp(&e1.duration)); 12 | events 13 | } 14 | 15 | #[cfg(test)] 16 | mod tests { 17 | use std::str::FromStr; 18 | 19 | use chrono::DateTime; 20 | use chrono::Duration; 21 | use serde_json::json; 22 | 23 | use aw_models::Event; 24 | 25 | use super::{sort_by_duration, sort_by_timestamp}; 26 | 27 | #[test] 28 | fn test_sort_by_timestamp() { 29 | let e1 = Event { 30 | id: None, 31 | timestamp: DateTime::from_str("2000-01-01T00:00:00Z").unwrap(), 32 | duration: Duration::seconds(1), 33 | data: json_map! {"test": json!(1)}, 34 | }; 35 | let e2 = Event { 36 | id: None, 37 | timestamp: DateTime::from_str("2000-01-01T00:00:03Z").unwrap(), 38 | duration: Duration::seconds(1), 39 | data: json_map! {"test": json!(1)}, 40 | }; 41 | let res = sort_by_timestamp(vec![e2.clone(), e1.clone()]); 42 | assert_eq!(res, vec![e1, e2]); 43 | } 44 | 45 | #[test] 46 | fn test_sort_by_duration() { 47 | let e1 = Event { 48 | id: None, 49 | timestamp: DateTime::from_str("2000-01-01T00:00:00Z").unwrap(), 50 | duration: Duration::seconds(2), 51 | data: json_map! {"test": json!(1)}, 52 | }; 53 | let e2 = Event { 54 | id: None, 55 | timestamp: DateTime::from_str("2000-01-01T00:00:03Z").unwrap(), 56 | duration: Duration::seconds(1), 57 | data: json_map! {"test": json!(1)}, 58 | }; 59 | let res = sort_by_duration(vec![e2.clone(), e1.clone()]); 60 | assert_eq!(res, vec![e1, e2]); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /aw-transform/src/split_url.rs: -------------------------------------------------------------------------------- 1 | use aw_models::Event; 2 | use serde_json::value::Value; 3 | 4 | /// Adds $protocol, $domain, $path and $params keys for events with an "url" key 5 | /// 6 | /// But it only adds the generated field if it exists, for example if a url does not have a path 7 | /// the path value will not be set at all. 8 | /// 9 | /// # Example 10 | /// ```ignore 11 | /// input: { 12 | /// "data": { 13 | /// "url": "http://google.com/test" 14 | /// } 15 | /// } 16 | /// output: { 17 | /// "data": { 18 | /// "$domain": "google.com", 19 | /// "$path": "/test", 20 | /// "$protocol": "http" 21 | /// } 22 | /// } 23 | /// ``` 24 | pub fn split_url_event(event: &mut Event) { 25 | use url::Url; 26 | 27 | let uri_str = match event.data.get("url") { 28 | None => return, 29 | Some(val) => match val { 30 | Value::String(s) => s.clone(), 31 | _ => return, 32 | }, 33 | }; 34 | let uri = match Url::parse(&uri_str) { 35 | Ok(uri) => uri, 36 | Err(_) => return, 37 | }; 38 | // Protocol 39 | let protocol = uri.scheme().to_string(); 40 | event 41 | .data 42 | .insert("$protocol".to_string(), Value::String(protocol)); 43 | // Domain 44 | let domain = match uri.host_str() { 45 | Some(domain) => domain.trim_start_matches("www.").to_string(), 46 | None => "".to_string(), 47 | }; 48 | event 49 | .data 50 | .insert("$domain".to_string(), Value::String(domain)); 51 | 52 | // Path 53 | let path = uri.path().to_string(); 54 | event.data.insert("$path".to_string(), Value::String(path)); 55 | 56 | // Params 57 | let params = match uri.query() { 58 | Some(query) => query.to_string(), 59 | None => "".to_string(), 60 | }; 61 | event 62 | .data 63 | .insert("$params".to_string(), Value::String(params)); 64 | 65 | // TODO: aw-server-python also has options and identifier 66 | } 67 | 68 | #[cfg(test)] 69 | mod tests { 70 | use std::str::FromStr; 71 | 72 | use chrono::DateTime; 73 | use chrono::Duration; 74 | use serde_json::json; 75 | 76 | use aw_models::Event; 77 | 78 | use super::split_url_event; 79 | 80 | #[test] 81 | fn test_split_url_events() { 82 | let mut e1 = Event { 83 | id: None, 84 | timestamp: DateTime::from_str("2000-01-01T00:00:01Z").unwrap(), 85 | duration: Duration::seconds(1), 86 | data: json_map! {"url": "http://www.google.com/path?query=1"}, 87 | }; 88 | split_url_event(&mut e1); 89 | assert_eq!( 90 | e1.data, 91 | json_map! { 92 | "url": json!("http://www.google.com/path?query=1"), 93 | "$protocol": json!("http"), 94 | "$domain": json!("google.com"), 95 | "$path": json!("/path"), 96 | "$params": json!("query=1") 97 | } 98 | ); 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /aw-transform/src/union_no_overlap.rs: -------------------------------------------------------------------------------- 1 | use aw_models::{Event, TimeInterval}; 2 | use chrono::{DateTime, Utc}; 3 | 4 | /// Merges two eventlists and removes overlap, the first eventlist will have precedence 5 | /// 6 | /// aw-core implementation: https://github.com/ActivityWatch/aw-core/blob/master/aw_transform/union_no_overlap.py 7 | /// 8 | /// # Example 9 | /// ```ignore 10 | /// events1 | xxx xx xxx | 11 | /// events1 | ---- ------ -- | 12 | /// result | xxx-- xx ----xxx -- | 13 | /// ``` 14 | #[allow(clippy::collapsible_else_if)] 15 | pub fn union_no_overlap(events1: Vec, mut events2: Vec) -> Vec { 16 | let mut events_union: Vec = Vec::new(); 17 | let mut e1_i = 0; 18 | let mut e2_i = 0; 19 | while e1_i < events1.len() && e2_i < events2.len() { 20 | let e1 = &events1[e1_i]; 21 | let e2 = &events2[e2_i]; 22 | let e1_p: TimeInterval = e1.into(); 23 | let e2_p: TimeInterval = e2.into(); 24 | 25 | if e1_p.intersects(&e2_p) { 26 | if e1.timestamp <= e2.timestamp { 27 | events_union.push(e1.clone()); 28 | e1_i += 1; 29 | 30 | // If e2 continues after e1, we need to split up the event so we only get the part that comes after 31 | let (_, e2_next) = split_event(e2, e1.timestamp + e1.duration); 32 | if let Some(e2_next) = e2_next { 33 | events2[e2_i] = e2_next; 34 | } else { 35 | e2_i += 1; 36 | } 37 | } else { 38 | let (e2_next, e2_next2) = split_event(e2, e1.timestamp); 39 | events_union.push(e2_next); 40 | e2_i += 1; 41 | if let Some(e2_next2) = e2_next2 { 42 | events2.insert(e2_i, e2_next2); 43 | } 44 | } 45 | } else { 46 | if e1.timestamp <= e2.timestamp { 47 | events_union.push(e1.clone()); 48 | e1_i += 1; 49 | } else { 50 | events_union.push(e2.clone()); 51 | e2_i += 1; 52 | } 53 | } 54 | } 55 | 56 | // Now we just need to add any remaining events 57 | events_union.extend(events1[e1_i..].iter().cloned()); 58 | events_union.extend(events2[e2_i..].iter().cloned()); 59 | 60 | events_union 61 | } 62 | 63 | fn split_event(e: &Event, timestamp: DateTime) -> (Event, Option) { 64 | if e.timestamp < timestamp && timestamp < e.timestamp + e.duration { 65 | let e1 = Event::new(e.timestamp, timestamp - e.timestamp, e.data.clone()); 66 | let e2 = Event::new( 67 | timestamp, 68 | e.duration - (timestamp - e.timestamp), 69 | e.data.clone(), 70 | ); 71 | (e1, Some(e2)) 72 | } else { 73 | (e.clone(), None) 74 | } 75 | } 76 | 77 | // Some tests 78 | #[cfg(test)] 79 | mod tests { 80 | use super::*; 81 | use chrono::Duration; 82 | 83 | #[test] 84 | fn test_split_event() { 85 | let now = Utc::now(); 86 | let td1h = Duration::hours(1); 87 | let e = Event { 88 | id: None, 89 | timestamp: now, 90 | duration: Duration::hours(2), 91 | data: serde_json::Map::new(), 92 | }; 93 | let (e1, e2_opt) = split_event(&e, now + td1h); 94 | assert_eq!(e1.timestamp, now); 95 | assert_eq!(e1.duration, td1h); 96 | 97 | let e2 = e2_opt.unwrap(); 98 | assert_eq!(e2.timestamp, now + td1h); 99 | assert_eq!(e2.duration, td1h); 100 | 101 | // Now a test which does not lead to a split 102 | let (e1, e2_opt) = split_event(&e, now); 103 | assert_eq!(e1.timestamp, now); 104 | assert_eq!(e1.duration, Duration::hours(2)); 105 | assert!(e2_opt.is_none()); 106 | } 107 | 108 | #[test] 109 | fn test_union_no_overlap() { 110 | // A test without any actual overlap 111 | let now = Utc::now(); 112 | let td1h = Duration::hours(1); 113 | let e1 = Event::new(now, td1h, serde_json::Map::new()); 114 | let e2 = Event::new(now + td1h, td1h, serde_json::Map::new()); 115 | let events1 = vec![e1.clone()]; 116 | let events2 = vec![e2.clone()]; 117 | let events_union = union_no_overlap(events1, events2); 118 | 119 | assert_eq!(events_union.len(), 2); 120 | assert_eq!(events_union[0].timestamp, now); 121 | assert_eq!(events_union[0].duration, td1h); 122 | assert_eq!(events_union[1].timestamp, now + td1h); 123 | assert_eq!(events_union[1].duration, td1h); 124 | 125 | // Now do in reverse order 126 | let events1 = vec![e2]; 127 | let events2 = vec![e1]; 128 | let events_union = union_no_overlap(events1, events2); 129 | 130 | // Resulting order should be the same, since there is no overlap. 131 | assert_eq!(events_union.len(), 2); 132 | assert_eq!(events_union[0].timestamp, now); 133 | assert_eq!(events_union[0].duration, td1h); 134 | assert_eq!(events_union[1].timestamp, now + td1h); 135 | assert_eq!(events_union[1].duration, td1h); 136 | } 137 | 138 | #[test] 139 | fn test_union_no_overlap_with_overlap() { 140 | // A test where the events overlap 141 | let now = Utc::now(); 142 | let td1h = Duration::hours(1); 143 | let e1 = Event::new(now, td1h, serde_json::Map::new()); 144 | let e2 = Event::new(now, Duration::hours(2), serde_json::Map::new()); 145 | let events1 = vec![e1]; 146 | let events2 = vec![e2]; 147 | let events_union = union_no_overlap(events1, events2); 148 | 149 | assert_eq!(events_union.len(), 2); 150 | assert_eq!(events_union[0].timestamp, now); 151 | assert_eq!(events_union[0].duration, td1h); 152 | assert_eq!(events_union[1].timestamp, now + td1h); 153 | assert_eq!(events_union[1].duration, td1h); 154 | 155 | // Now test the case where e2 starts before e1 156 | let e1 = Event::new(now + td1h, td1h, serde_json::Map::new()); 157 | let e2 = Event::new(now, Duration::hours(2), serde_json::Map::new()); 158 | let events1 = vec![e1]; 159 | let events2 = vec![e2]; 160 | let events_union = union_no_overlap(events1, events2); 161 | 162 | assert_eq!(events_union.len(), 2); 163 | assert_eq!(events_union[0].timestamp, now); 164 | assert_eq!(events_union[0].duration, td1h); 165 | assert_eq!(events_union[1].timestamp, now + td1h); 166 | assert_eq!(events_union[1].duration, td1h); 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /compile-android.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | platform="$(uname -s | tr '[:upper:]' '[:lower:]')" 5 | 6 | # if args, use them to select targets (x86_64, arm64, etc) 7 | if [ $# -gt 0 ]; then 8 | targets="$@" 9 | else 10 | # otherwise, default to all targets 11 | targets="arm64 x86_64 x86 arm" 12 | fi 13 | 14 | ORIG_PATH="$PATH" 15 | ORIG_RUSTFLAGS="$RUSTFLAGS" 16 | 17 | if [ -z "$ANDROID_NDK_HOME" ]; then 18 | if [ -d `pwd`/"NDK" ]; then 19 | echo "Found NDK folder in root, using." 20 | ANDROID_NDK_HOME=`pwd`/NDK 21 | else 22 | # NOTE: I had some issues with this and cargo that magically resolved themselves when I made the path absolute. 23 | echo "Environment variable ANDROID_NDK_HOME not set, please set to location of Android NDK." 24 | exit 1 25 | fi 26 | fi 27 | export ANDROID_NDK_HOME 28 | 29 | if [ "$RELEASE" = "true" ]; then 30 | echo "Building in release mode... (slow)"; 31 | else 32 | echo "Building in debug mode... (fast)" 33 | RELEASE=false; 34 | fi 35 | 36 | # Workaround for "error: unable to find library -lgcc" 37 | # See: https://stackoverflow.com/questions/68873570/how-do-i-fix-ld-error-unable-to-find-library-lgcc-when-cross-compiling-rust 38 | find "${ANDROID_NDK_HOME}" -name "libunwind.a" -execdir bash -c 'echo "INPUT(-lunwind)" > libgcc.a' \; 39 | 40 | for archtargetstr in \ 41 | 'arm64 aarch64-linux-android' \ 42 | 'x86_64 x86_64-linux-android' \ 43 | 'x86 i686-linux-android' \ 44 | 'arm armv7-linux-androideabi' \ 45 | ; do 46 | arch=$(echo $archtargetstr | cut -d " " -f 1) 47 | target=$(echo $archtargetstr | cut -d " " -f 2) 48 | target_underscore=$(echo $target | sed 's/-/_/g') 49 | 50 | echo ARCH $arch 51 | echo TARGET $target 52 | if ! echo "$targets" | grep -q "$arch"; then 53 | echo "Skipping $arch..." 54 | continue 55 | fi 56 | 57 | NDK_ARCH_DIR="$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/$platform-x86_64/bin" 58 | if [ ! -d "$NDK_ARCH_DIR" ]; then 59 | echo "Couldn't find directory $NDK_ARCH_DIR" 60 | exit 1 61 | fi 62 | 63 | echo "Building for $arch..." 64 | 65 | export PATH="$NDK_ARCH_DIR:$ORIG_PATH" 66 | export RUSTFLAGS="$ORIG_RUSTFLAGS" 67 | # Need to set AR for target since NDK 21+: 68 | # https://github.com/rust-lang/cc-rs/issues/636#issuecomment-1075352495 69 | declare -x "AR_${target_underscore}"="$NDK_ARCH_DIR/llvm-ar" 70 | declare -x "CC_${target_underscore}"="$NDK_ARCH_DIR/${target}-clang" 71 | declare -x "RANLIB_${target_underscore}"="$NDK_ARCH_DIR/llvm-ranlib" 72 | 73 | # Needed for runtime error: https://github.com/termux/termux-packages/issues/8029 74 | # java.lang.UnsatisfiedLinkError: dlopen failed: cannot locate symbol "__extenddftf2" 75 | export RUSTFLAGS+=" -C link-arg=$($NDK_ARCH_DIR/${target}-clang -print-libgcc-file-name)" 76 | echo RUSTFLAGS=$RUSTFLAGS 77 | 78 | # fix armv7 -> arm 79 | if [ "$arch" = "arm" ]; then 80 | declare -x "CC_${target_underscore}"="$NDK_ARCH_DIR/arm-linux-androideabi-clang" 81 | fi 82 | 83 | # check that they exist 84 | for var in AR_${target_underscore} CC_${target_underscore} RANLIB_${target_underscore}; do 85 | if [ ! -f "${!var}" ]; then 86 | echo "Couldn't find ${!var} set for variable $var" 87 | exit 1 88 | fi 89 | done 90 | 91 | # People suggest to use this, but ime it needs all the same workarounds anyway :shrug: 92 | #cargo ndk build -p aw-server --target $target --lib $($RELEASE && echo '--release') 93 | cargo build -p aw-server --target $target --lib $($RELEASE && echo '--release') 94 | done 95 | -------------------------------------------------------------------------------- /install-ndk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Based on https://mozilla.github.io/firefox-browser-architecture/experiments/2017-09-21-rust-on-android.html 3 | # Depended on by aw-android/scripts/setup-rust-with-ndk.sh 4 | 5 | set -e; 6 | 7 | NDK_VERSION=r25c 8 | 9 | script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 10 | project_path="$(readlink -f "$script_dir/.")" 11 | 12 | platform="$(uname -s | tr '[:upper:]' '[:lower:]')" 13 | 14 | if [ -z "$ANDROID_NDK_HOME" ]; then 15 | if [ -d `pwd`/"NDK" ]; then 16 | echo "Found NDK folder in root, using." 17 | else 18 | echo 'ANDROID_NDK_HOME not set, downloading NDK...'; 19 | # Download Linux NDK or macOS NDK, depending on OS 20 | wget --no-verbose -O android-ndk.zip https://dl.google.com/android/repository/android-ndk-$NDK_VERSION-$platform.zip; 21 | unzip -q -d NDK android-ndk.zip; 22 | ls NDK; 23 | mv NDK/*/* NDK/; 24 | fi 25 | ANDROID_NDK_HOME=`pwd`/NDK; 26 | fi 27 | 28 | # Needed since dependency 'ring' doesn't respect .cargo/config 29 | echo "Setting up toolchain binary symlinks..." 30 | NDK_TOOLCHAIN_BIN=$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/$platform-x86_64/bin 31 | for arch in \ 32 | 'aarch64' \ 33 | 'x86_64' \ 34 | 'i686' \ 35 | ; do 36 | ln -s -f $NDK_TOOLCHAIN_BIN/$arch-linux-android26-clang $NDK_TOOLCHAIN_BIN/$arch-linux-android-clang 37 | done 38 | 39 | # This has a slightly different path from the ones above 40 | ln -s -f $NDK_TOOLCHAIN_BIN/armv7a-linux-androideabi26-clang $NDK_TOOLCHAIN_BIN/armv7a-linux-androideabi-clang 41 | ln -s -f $NDK_TOOLCHAIN_BIN/armv7a-linux-androideabi26-clang $NDK_TOOLCHAIN_BIN/arm-linux-androideabi-clang 42 | 43 | # Add to Rust 44 | echo "Setting up Rust toolchains..." 45 | rustup target add aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android; 46 | 47 | # Creates cargo config 48 | echo "Creating cargo config..." 49 | mkdir -p $project_path/.cargo 50 | echo " 51 | [target.aarch64-linux-android] 52 | ar = '$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/$platform-x86_64/bin/llvm-ar' 53 | linker = '$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/$platform-x86_64/bin/aarch64-linux-android26-clang' 54 | 55 | [target.armv7-linux-androideabi] 56 | ar = '$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/$platform-x86_64/bin/llvm-ar' 57 | linker = '$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/$platform-x86_64/bin/armv7a-linux-androideabi-clang' 58 | 59 | [target.i686-linux-android] 60 | ar = '$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/$platform-x86_64/bin/llvm-ar' 61 | linker = '$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/$platform-x86_64/bin/i686-linux-android26-clang' 62 | 63 | [target.x86_64-linux-android] 64 | ar = '$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/$platform-x86_64/bin/llvm-ar' 65 | linker = '$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/$platform-x86_64/bin/x86_64-linux-android26-clang' 66 | " > $project_path/.cargo/config 67 | -------------------------------------------------------------------------------- /scripts/create-cargo-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z $ANDROID_NDK_HOME ]; then 4 | echo '$ANDROID_NDK_HOME not set'; 5 | exit 1; 6 | fi 7 | 8 | tee ~/test-cargo-config.toml <<< " 9 | [target.aarch64-linux-android] 10 | ar = '$NDK_HOME/arm64/bin/aarch64-linux-android-ar' 11 | linker = '$NDK_HOME/arm64/bin/aarch64-linux-android-clang' 12 | 13 | [target.armv7-linux-androideabi] 14 | ar = '$NDK_HOME/arm/bin/arm-linux-androideabi-ar' 15 | linker = '$NDK_HOME/arm/bin/arm-linux-androideabi-clang' 16 | 17 | [target.i686-linux-android] 18 | ar = '$NDK_HOME/x86/bin/i686-linux-android-ar' 19 | linker = '$NDK_HOME/x86/bin/i686-linux-android-clang' 20 | " 21 | -------------------------------------------------------------------------------- /todo.md: -------------------------------------------------------------------------------- 1 | bucket already exists test 2 | get_keys_starting no rows test 3 | event model default duration test 4 | make a log message in logging.rs test 5 | --------------------------------------------------------------------------------