├── .cargo └── config ├── .github ├── Dockerfile └── workflows │ ├── ci.yml │ ├── docker_push.yml │ └── release.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .vscode └── settings.json ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── Containerfile ├── Dockerfile ├── LICENSE ├── README.md ├── README_CN.md ├── anki_patch └── d9d36078f17a2b4b8b44fcb802eb274911ebabe7_anki_rslib.patch ├── ankisyncd.toml ├── build.rs ├── docs ├── ANKI_CLIENTS_SETUP.md ├── ARCHITECTURE.md ├── CERTS.md ├── CHANGELOG ├── CONTAINER.md ├── INSTALL.md ├── PLATFORM.md ├── REVERSE_PROXY.md └── TEST_SERVER_CLIENT.md ├── scripts ├── Cargo.toml ├── ankisyncd.toml ├── build_all ├── cc.sh ├── clone_patch_anki ├── clone_patch_anki.bat └── entrypoint.sh └── src ├── app_config.rs ├── config.rs ├── create_media.sql ├── db.rs ├── error.rs ├── lib.rs ├── main.rs ├── parse_args.rs ├── request.rs ├── response.rs ├── routes.rs └── user.rs /.cargo/config: -------------------------------------------------------------------------------- 1 | #this file is for cross-compiling's use 2 | 3 | #MacOS 4 | # [target.x86_64-apple-darwin] 5 | # linker = "x86_64-apple-darwin14-clang" 6 | # ar = "x86_64-apple-darwin14-ar" 7 | #Windows 8 | [target.x86_64-pc-windows-msvc] 9 | # statically link the C runtime (CRT) 10 | rustflags = ["-C", "target-feature=+crt-static"] 11 | [target.i686-pc-windows-msvc] 12 | linker = "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\BuildTools\\VC\\Tools\\MSVC\\14.29.30133\\bin\\Hostx64\\x86\\link.exe" 13 | # Linux 14 | [target.aarch64-unknown-linux-musl] 15 | linker = "aarch64-linux-musl-ld" 16 | rustflags = ["-C", "target-feature=+crt-static", 17 | "-C", "link-args=-static"] 18 | 19 | [target.armv7-unknown-linux-musleabihf] 20 | #linker = "arm-linux-musleabihf-gcc" 21 | linker = "arm-linux-musleabihf-ld" 22 | rustflags = [ 23 | "-C", "target-feature=+crt-static", 24 | "-C", "link-args=-static", 25 | ] 26 | 27 | [target.arm-unknown-linux-musleabihf] 28 | linker = "arm-linux-musleabihf-ld" 29 | rustflags = [ 30 | "-C", "target-feature=+crt-static", 31 | "-C", "link-args=-static", 32 | ] 33 | 34 | [target.x86_64-unknown-linux-musl] 35 | linker = "x86_64-linux-musl-ld" 36 | rustflags = ["-C", "target-feature=+crt-static", 37 | "-C", "link-args=-static", 38 | ] 39 | 40 | # for some regions where github are blocked or badly connected 41 | #[source.crates-io] 42 | #registry="https://github.com/rust-lang/crates.io-index" # 这行可以不要,只是说明原始地址 43 | #replace-with='tuna' 44 | #[source.tuna] 45 | #registry = "https://mirrors.tuna.tsinghua.edu.cn/git/crates.io-index.git" 46 | 47 | -------------------------------------------------------------------------------- /.github/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stable-slim 2 | WORKDIR /app 3 | # copy from host to container 4 | COPY binary binary 5 | 6 | RUN cp /app/binary/ankisyncd.toml /ankisyncd.toml && cp /app/binary/ankisyncd.toml /app/ankisyncd.toml && cp /app/binary/entrypoint.sh /entrypoint.sh 7 | RUN mv /app/binary/`arch`-unknown-linux-musl-ankisyncd /usr/local/bin/ankisyncd 8 | RUN chmod +x /usr/local/bin/ankisyncd && rm -fr binary 9 | # WORKDIR /app means, when you log into the shell of container, 10 | # you will be in the /app directory of the container by default. 11 | # https://linuxhint.com/dockerfile_volumes/ 12 | # persist data with a named volume https://docs.docker.com/get-started/05_persisting_data/ 13 | VOLUME /app 14 | 15 | RUN chmod +x /entrypoint.sh 16 | CMD ["sh", "/entrypoint.sh"] 17 | EXPOSE 27701 18 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | on: [pull_request] 2 | 3 | name: Continuous integration 4 | 5 | jobs: 6 | ci_linux: 7 | name: ci_on_linux 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | - name: Check out code 12 | uses: actions/checkout@v2 13 | 14 | - name: Set up cargo 15 | uses: actions-rs/toolchain@v1 16 | with: 17 | profile: minimal 18 | toolchain: stable 19 | override: true 20 | 21 | - name: Install Protoc 22 | uses: arduino/setup-protoc@v1 23 | with: 24 | version: '3.x' 25 | repo-token: ${{ secrets.GITHUB_TOKEN }} 26 | 27 | - name: Set up python 28 | uses: actions/setup-python@v2 29 | 30 | # clone and patch anki library 31 | - name: Clone patch Anki 32 | run: sh ./scripts/clone_patch_anki 33 | 34 | # set up and run pre-commit pre-commit run --all-files 35 | - name: run pre-commit 36 | uses: pre-commit/action@v2.0.3 37 | 38 | # run build script to build 39 | - name: Build Ankisyncd 40 | run: | 41 | cargo build 42 | cargo build --features tls 43 | 44 | ci_macos: 45 | name: ci_on_macos 46 | runs-on: macos-latest 47 | 48 | steps: 49 | - name: Check out code 50 | uses: actions/checkout@v2 51 | 52 | - name: Set up cargo 53 | uses: actions-rs/toolchain@v1 54 | with: 55 | profile: minimal 56 | toolchain: stable 57 | override: true 58 | 59 | - name: Install Protoc 60 | uses: arduino/setup-protoc@v1 61 | with: 62 | version: '3.x' 63 | repo-token: ${{ secrets.GITHUB_TOKEN }} 64 | 65 | # - name: Install protoc 66 | # run: | 67 | # /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" 68 | # brew install protobuf 69 | 70 | - name: Set up python 71 | uses: actions/setup-python@v2 72 | 73 | # clone and patch anki library 74 | - name: Clone patch Anki 75 | run: sh ./scripts/clone_patch_anki 76 | 77 | # set up and run pre-commit pre-commit run --all-files 78 | - name: run pre-commit 79 | uses: pre-commit/action@v2.0.3 80 | 81 | # run build script to build 82 | - name: Build Ankisyncd 83 | run: | 84 | cargo build 85 | cargo build --features tls 86 | 87 | 88 | # it is necessary to separate windows ,as patch file differ 89 | ci_windows: 90 | name: ci_on_windows 91 | runs-on: windows-latest 92 | 93 | steps: 94 | - name: Check out code 95 | uses: actions/checkout@v2 96 | 97 | - name: Set up cargo 98 | uses: actions-rs/toolchain@v1 99 | with: 100 | profile: minimal 101 | toolchain: stable 102 | override: true 103 | 104 | - name: Install Protoc 105 | uses: arduino/setup-protoc@v1 106 | with: 107 | version: '3.x' 108 | repo-token: ${{ secrets.GITHUB_TOKEN }} 109 | 110 | # clone and patch anki library 111 | - name: Clone patch Anki 112 | run: .\scripts\clone_patch_anki.bat 113 | 114 | # set up and run pre-commit pre-commit run --all-files 115 | - name: Set up python 116 | uses: actions/setup-python@v2 117 | - name: run pre-commit 118 | uses: pre-commit/action@v2.0.3 119 | 120 | # run build script to build 121 | - name: Build Ankisyncd 122 | run: | 123 | cargo build 124 | cargo build --features tls 125 | 126 | 127 | 128 | 129 | -------------------------------------------------------------------------------- /.github/workflows/docker_push.yml: -------------------------------------------------------------------------------- 1 | # from https://github.com/docker/build-push-action 2 | name: builddocker 3 | 4 | on: 5 | push: 6 | branches: master 7 | tags: 8 | - "*.*.*" 9 | 10 | jobs: 11 | x64: 12 | name: Build x64 13 | runs-on: ubuntu-latest 14 | # after every step is finished,exporting to PATH will be clear,so in next step ,need re-export 15 | steps: 16 | - name: Check out code 17 | uses: actions/checkout@v2 18 | 19 | - name: Set up cargo 20 | uses: actions-rs/toolchain@v1 21 | with: 22 | profile: minimal 23 | toolchain: stable 24 | override: true 25 | 26 | # clone and patch anki library 27 | # need to disable bundled feature in rusqlite in anki lib,so copy after-modified cargo file 28 | - name: Clone patch Anki 29 | run: | 30 | sh ./scripts/clone_patch_anki 31 | 32 | - name: Install Protoc 33 | uses: arduino/setup-protoc@v1 34 | with: 35 | version: '3.x' 36 | repo-token: ${{ secrets.GITHUB_TOKEN }} 37 | 38 | - name: Add cargo taget x64 39 | run: rustup target add x86_64-unknown-linux-musl 40 | 41 | - uses: robinraju/release-downloader@v1.4 42 | with: 43 | repository: "dobefore/musl-cross" 44 | tag: "0.1.0" 45 | fileName: "x86_64-linux-musl-cross.tgz" 46 | 47 | - name: Copy musl-cross to home 48 | run: cp x86_64-linux-musl-cross.tgz $HOME 49 | 50 | - name: unpack cross-compile toolchains musl 51 | run: tar -zxvf $HOME/x86_64-linux-musl-cross.tgz -C $HOME 52 | 53 | # # openssl 54 | - uses: robinraju/release-downloader@v1.4 55 | with: 56 | repository: "dobefore/cross-compile-openssl-musl" 57 | tag: "1.1.1" 58 | fileName: "openssl1.1.1f_1.1.1_linux_x64.tar.gz" 59 | - name: Copy openssl lib to home 60 | run: cp openssl1.1.1f_1.1.1_linux_x64.tar.gz $HOME 61 | 62 | - name: unpack openssl 63 | run: | 64 | tar -zxvf $HOME/openssl1.1.1f_1.1.1_linux_x64.tar.gz -C $HOME 65 | cp -r $HOME/openssl1.1.1f_1.1.1_linux_x64/openssl/ $HOME 66 | 67 | - name: Build 68 | run: | 69 | export OPENSSL_LIB_DIR=$HOME/openssl/lib 70 | export OPENSSL_INCLUDE_DIR=$HOME/openssl/include 71 | export OPENSSL_STATIC=true 72 | 73 | sudo apt install musl-tools 74 | export PATH="$HOME/x86_64-linux-musl-cross/bin:$PATH" 75 | cargo build --target x86_64-unknown-linux-musl --release 76 | 77 | - run: mv target/x86_64-unknown-linux-musl/release/ankisyncd x86_64-unknown-linux-musl-ankisyncd 78 | 79 | - uses: actions/upload-artifact@v2 80 | with: 81 | name: binary 82 | path: x86_64-unknown-linux-musl-ankisyncd 83 | retention-days: 1 84 | 85 | arm64: 86 | name: Build arm64 87 | runs-on: ubuntu-latest 88 | # after every step is finished,exporting to PATH will be clear,so in next step ,need re-export 89 | steps: 90 | - name: Check out code 91 | uses: actions/checkout@v2 92 | 93 | - name: Set up cargo 94 | uses: actions-rs/toolchain@v1 95 | with: 96 | profile: minimal 97 | toolchain: stable 98 | override: true 99 | 100 | # clone and patch anki library 101 | # need to disable bundled feature in rusqlite in anki lib,so copy after-modified cargo file 102 | - name: Clone patch Anki 103 | run: | 104 | sh ./scripts/clone_patch_anki 105 | 106 | - name: Install Protoc 107 | uses: arduino/setup-protoc@v1 108 | with: 109 | version: '3.x' 110 | repo-token: ${{ secrets.GITHUB_TOKEN }} 111 | 112 | # - name: Install Protoc 113 | # run: | 114 | # PB_REL="https://github.com/protocolbuffers/protobuf/releases" 115 | # curl -LO $PB_REL/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip 116 | # mkdir -p $HOME/protoc 117 | # unzip protoc-3.15.8-linux-x86_64.zip -d $HOME/protoc 118 | 119 | - name: Add cargo taget arm64 120 | run: rustup target add aarch64-unknown-linux-musl 121 | 122 | - uses: robinraju/release-downloader@v1.4 123 | with: 124 | repository: "dobefore/musl-cross" 125 | tag: "0.1.0" 126 | fileName: "aarch64-linux-musl-cross.tgz" 127 | 128 | - name: Copy musl-cross to home 129 | run: cp aarch64-linux-musl-cross.tgz $HOME 130 | 131 | - name: unpack cross-compile toolchains musl 132 | run: tar -zxvf $HOME/aarch64-linux-musl-cross.tgz -C $HOME 133 | # # openssl 134 | - uses: robinraju/release-downloader@v1.4 135 | with: 136 | repository: "dobefore/cross-compile-openssl-musl" 137 | tag: "1.1.1" 138 | fileName: "openssl1.1.1f_1.1.1_linux_arm64.tar.gz" 139 | - name: Copy openssl lib to home 140 | run: cp openssl1.1.1f_1.1.1_linux_arm64.tar.gz $HOME 141 | 142 | - name: unpack openssl 143 | run: | 144 | tar -zxvf $HOME/openssl1.1.1f_1.1.1_linux_arm64.tar.gz -C $HOME 145 | cp -r $HOME/openssl1.1.1f_1.1.1_linux_arm64/openssl/ $HOME 146 | # # sqlite3 seems to need it any more, 147 | # - uses: robinraju/release-downloader@v1.4 148 | # with: 149 | # repository: "dobefore/cross-compile-sqlite-musl" 150 | # tag: "0.1.2" 151 | # fileName: "sqliteautoconf3390000_0.1.2_linux_arm64.tar.gz" 152 | 153 | # - name: Copy sqlite lib to home 154 | # run: cp sqliteautoconf3390000_0.1.2_linux_arm64.tar.gz $HOME 155 | 156 | # tar -zxvf $HOME/sqliteautoconf3390000_0.1.2_linux_arm64.tar.gz -C $HOME 157 | # - name: unpack sqlite3 158 | # run: | 159 | # tar -zxvf sqliteautoconf3390000_0.1.2_linux_arm64.tar.gz 160 | # cp -r sqliteautoconf3390000_0.1.2_linux_arm64/sql/ . 161 | # cp -r sql/ $HOME 162 | # export SQLITE3_LIB_DIR=$HOME/sql/lib 163 | # export SQLITE3_INCLUDE_DIR=$HOME/sql/include 164 | # cp -r $HOME/sql/ . 165 | 166 | # build static-linked binary for armv6 (also suitable for aarch64) 167 | 168 | - name: Build 169 | run: | 170 | export OPENSSL_LIB_DIR=$HOME/openssl/lib 171 | export OPENSSL_INCLUDE_DIR=$HOME/openssl/include 172 | export OPENSSL_STATIC=true 173 | 174 | export SQLITE3_STATIC=1 175 | export PATH="$HOME/aarch64-linux-musl-cross/bin:$PATH" 176 | cargo build --target aarch64-unknown-linux-musl --release 177 | 178 | - name: Strip binaries (ankisyncd) 179 | run: $HOME/aarch64-linux-musl-cross/bin/aarch64-linux-musl-strip target/aarch64-unknown-linux-musl/release/ankisyncd 180 | 181 | - run: mv target/aarch64-unknown-linux-musl/release/ankisyncd aarch64-unknown-linux-musl-ankisyncd 182 | 183 | - uses: actions/upload-artifact@v2 184 | with: 185 | name: binary 186 | path: aarch64-unknown-linux-musl-ankisyncd 187 | retention-days: 1 188 | - uses: actions/upload-artifact@v2 189 | with: 190 | name: binary 191 | path: scripts/ankisyncd.toml 192 | retention-days: 1 193 | - uses: actions/upload-artifact@v2 194 | with: 195 | name: binary 196 | path: scripts/entrypoint.sh 197 | retention-days: 1 198 | 199 | docker: 200 | needs: [x64,arm64] 201 | runs-on: ubuntu-latest 202 | steps: 203 | - name: Checkout 204 | uses: actions/checkout@v2 205 | 206 | - name: Docker meta 207 | id: meta 208 | uses: docker/metadata-action@v4 209 | with: 210 | # list of Docker images to use as base name for tags 211 | images: ankicommunity/anki-sync-server-rs 212 | # generate Docker tags based on the following events/attributes 213 | tags: | 214 | latest 215 | type=semver,pattern={{version}} 216 | 217 | 218 | # https://github.com/docker/setup-qemu-action 219 | - name: Set up QEMU 220 | uses: docker/setup-qemu-action@v2 221 | # https://github.com/docker/setup-buildx-action 222 | - name: Set up Docker Buildx 223 | uses: docker/setup-buildx-action@v2 224 | 225 | - 226 | name: Login to DockerHub 227 | uses: docker/login-action@v2 228 | with: 229 | username: ${{ secrets.DOCKER_HUB_USERNAME }} 230 | password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} 231 | 232 | - uses: actions/download-artifact@v2 233 | with: 234 | name: binary 235 | path: binary 236 | 237 | - 238 | name: Build and push 239 | uses: docker/build-push-action@v2 240 | with: 241 | context: . 242 | file: .github/Dockerfile 243 | platforms: linux/amd64,linux/arm64 244 | push: true 245 | tags: ${{ steps.meta.outputs.tags }} 246 | cache-from: type=gha 247 | cache-to: type=gha,mode=max 248 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Create Release Draft 2 | # git push origin 0.5.3 to push local to remote 3 | on: 4 | workflow_dispatch: 5 | push: 6 | tags: ["[0-9]+.[0-9]+.[0-9]+*"] 7 | 8 | jobs: 9 | arm64: 10 | name: Build arm64 11 | runs-on: ubuntu-latest 12 | # after every step is finished,exporting to PATH will be clear,so in next step ,need re-export 13 | steps: 14 | - name: Check out code 15 | uses: actions/checkout@v2 16 | 17 | - name: Set up cargo 18 | uses: actions-rs/toolchain@v1 19 | with: 20 | profile: minimal 21 | toolchain: stable 22 | override: true 23 | 24 | # clone and patch anki library 25 | # need to disable bundled feature in rusqlite in anki lib,so copy after-modified cargo file 26 | - name: Clone patch Anki 27 | run: | 28 | sh ./scripts/clone_patch_anki 29 | 30 | - name: Install Protoc 31 | uses: arduino/setup-protoc@v1 32 | with: 33 | version: '3.x' 34 | repo-token: ${{ secrets.GITHUB_TOKEN }} 35 | 36 | # - name: Install Protoc 37 | # run: | 38 | # PB_REL="https://github.com/protocolbuffers/protobuf/releases" 39 | # curl -LO $PB_REL/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip 40 | # mkdir -p $HOME/protoc 41 | # unzip protoc-3.15.8-linux-x86_64.zip -d $HOME/protoc 42 | 43 | - name: Add cargo taget arm64 44 | run: rustup target add aarch64-unknown-linux-musl 45 | 46 | - uses: robinraju/release-downloader@v1.4 47 | with: 48 | repository: "dobefore/musl-cross" 49 | tag: "0.1.0" 50 | fileName: "aarch64-linux-musl-cross.tgz" 51 | 52 | - name: Copy musl-cross to home 53 | run: cp aarch64-linux-musl-cross.tgz $HOME 54 | 55 | - name: unpack cross-compile toolchains musl 56 | run: tar -zxvf $HOME/aarch64-linux-musl-cross.tgz -C $HOME 57 | # # openssl 58 | - uses: robinraju/release-downloader@v1.4 59 | with: 60 | repository: "dobefore/cross-compile-openssl-musl" 61 | tag: "1.1.1" 62 | fileName: "openssl1.1.1f_1.1.1_linux_arm64.tar.gz" 63 | - name: Copy openssl lib to home 64 | run: cp openssl1.1.1f_1.1.1_linux_arm64.tar.gz $HOME 65 | 66 | - name: unpack openssl 67 | run: | 68 | tar -zxvf $HOME/openssl1.1.1f_1.1.1_linux_arm64.tar.gz -C $HOME 69 | cp -r $HOME/openssl1.1.1f_1.1.1_linux_arm64/openssl/ $HOME 70 | # # sqlite3 seems to need it any more, 71 | # - uses: robinraju/release-downloader@v1.4 72 | # with: 73 | # repository: "dobefore/cross-compile-sqlite-musl" 74 | # tag: "0.1.2" 75 | # fileName: "sqliteautoconf3390000_0.1.2_linux_arm64.tar.gz" 76 | 77 | # - name: Copy sqlite lib to home 78 | # run: cp sqliteautoconf3390000_0.1.2_linux_arm64.tar.gz $HOME 79 | 80 | # tar -zxvf $HOME/sqliteautoconf3390000_0.1.2_linux_arm64.tar.gz -C $HOME 81 | # - name: unpack sqlite3 82 | # run: | 83 | # tar -zxvf sqliteautoconf3390000_0.1.2_linux_arm64.tar.gz 84 | # cp -r sqliteautoconf3390000_0.1.2_linux_arm64/sql/ . 85 | # cp -r sql/ $HOME 86 | # export SQLITE3_LIB_DIR=$HOME/sql/lib 87 | # export SQLITE3_INCLUDE_DIR=$HOME/sql/include 88 | # cp -r $HOME/sql/ . 89 | 90 | # build static-linked binary for armv6 (also suitable for aarch64) 91 | 92 | - name: Build 93 | run: | 94 | export OPENSSL_LIB_DIR=$HOME/openssl/lib 95 | export OPENSSL_INCLUDE_DIR=$HOME/openssl/include 96 | export OPENSSL_STATIC=true 97 | 98 | export SQLITE3_STATIC=1 99 | export PATH="$HOME/aarch64-linux-musl-cross/bin:$PATH" 100 | cargo build --target aarch64-unknown-linux-musl --release 101 | 102 | - name: Strip binaries (ankisyncd) 103 | run: $HOME/aarch64-linux-musl-cross/bin/aarch64-linux-musl-strip target/aarch64-unknown-linux-musl/release/ankisyncd 104 | 105 | - name: Create output directory 106 | run: mkdir output 107 | 108 | - name: Copy files to output 109 | run: | 110 | cp target/aarch64-unknown-linux-musl/release/ankisyncd output/ 111 | cp ankisyncd.toml output/ 112 | 113 | - name: Upload artifact 114 | uses: actions/upload-artifact@v2 115 | with: 116 | name: arm64 117 | path: output/* 118 | 119 | # armv6: 120 | # name: Build armv6 121 | # runs-on: ubuntu-latest 122 | # # after every step is finished,exporting to PATH will be clear,so in next step ,need re-export 123 | # steps: 124 | # - name: Check out code 125 | # uses: actions/checkout@v2 126 | 127 | # - name: Set up cargo 128 | # uses: actions-rs/toolchain@v1 129 | # with: 130 | # profile: minimal 131 | # toolchain: stable 132 | # override: true 133 | 134 | # # clone and patch anki library 135 | # # need to disable bundled feature in rusqlite in anki lib,so copy after-modified cargo file 136 | # - name: Clone patch Anki 137 | # run: | 138 | # sh ./scripts/clone_patch_anki 139 | # cp ./scripts/Cargo.toml anki/rslib/ 140 | 141 | # - name: Install Protoc 142 | # run: | 143 | # PB_REL="https://github.com/protocolbuffers/protobuf/releases" 144 | # curl -LO $PB_REL/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip 145 | # mkdir -p $HOME/protoc 146 | # unzip protoc-3.15.8-linux-x86_64.zip -d $HOME/protoc 147 | 148 | # - name: Add cargo taget armv6 149 | # run: rustup target add arm-unknown-linux-musleabihf 150 | 151 | # - uses: robinraju/release-downloader@v1.4 152 | # with: 153 | # repository: "dobefore/musl-cross" 154 | # tag: "0.1.0" 155 | # fileName: "arm-linux-musleabihf-cross.tgz" 156 | 157 | # - name: Copy musl-cross to home 158 | # run: cp arm-linux-musleabihf-cross.tgz $HOME 159 | 160 | # - name: unpack cross-compile toolchains musl 161 | # run: tar -zxvf $HOME/arm-linux-musleabihf-cross.tgz -C $HOME 162 | # # openssl 163 | # - uses: robinraju/release-downloader@v1.4 164 | # with: 165 | # repository: "dobefore/cross-compile-openssl-musl" 166 | # tag: "1.1.1" 167 | # fileName: "openssl1.1.1f_1.1.1_linux_armv6.tar.gz" 168 | # - name: Copy openssl lib to home 169 | # run: cp openssl1.1.1f_1.1.1_linux_armv6.tar.gz $HOME 170 | 171 | # - name: unpack openssl 172 | # run: | 173 | # tar -zxvf $HOME/openssl1.1.1f_1.1.1_linux_armv6.tar.gz -C $HOME 174 | # cp -r $HOME/openssl1.1.1f_1.1.1_linux_armv6/openssl/ $HOME 175 | # # sqlite3 176 | # - uses: robinraju/release-downloader@v1.4 177 | # with: 178 | # repository: "dobefore/cross-compile-sqlite-musl" 179 | # tag: "0.1.1" 180 | # fileName: "sqliteautoconf3390400_0.1.1_linux_armv6.tar.gz" 181 | 182 | # - name: Copy sqlite lib to home 183 | # run: cp sqliteautoconf3390400_0.1.1_linux_armv6.tar.gz $HOME 184 | 185 | # - name: unpack sqlite3 186 | # run: | 187 | # tar -zxvf $HOME/sqliteautoconf3390400_0.1.1_linux_armv6.tar.gz -C $HOME 188 | # cd $HOME/sqliteautoconf3390400_0.1.1_linux_armv6/ 189 | # cp -r sql/ $HOME 190 | 191 | # # build static-linked binary for armv6 (also suitable for aarch64) 192 | # - name: Build 193 | # run: | 194 | # export PATH="$PATH:$HOME/protoc/bin" 195 | 196 | # export OPENSSL_LIB_DIR=$HOME/openssl/lib 197 | # export OPENSSL_INCLUDE_DIR=$HOME/openssl/include 198 | # export OPENSSL_STATIC=true 199 | 200 | # export SQLITE3_LIB_DIR=$HOME/sql/lib 201 | # export SQLITE3_INCLUDE_DIR=$HOME/sql/include 202 | # export SQLITE3_STATIC=1 203 | 204 | # cp -r $HOME/sql/ . 205 | # export PATH="$HOME/arm-linux-musleabihf-cross/bin:$PATH" 206 | # cargo build --target arm-unknown-linux-musleabihf --release --features tls 207 | 208 | # - name: Strip binaries (ankisyncd) 209 | # run: $HOME/arm-linux-musleabihf-cross/bin/arm-linux-musleabihf-strip target/arm-unknown-linux-musleabihf/release/ankisyncd 210 | 211 | # - name: Create output directory 212 | # run: mkdir output 213 | 214 | # - name: Copy files to output 215 | # run: | 216 | # cp target/arm-unknown-linux-musleabihf/release/ankisyncd output/ 217 | # cp ankisyncd.toml output/ 218 | 219 | # - name: Upload artifact 220 | # uses: actions/upload-artifact@v2 221 | # with: 222 | # name: armv6 223 | # path: output/* 224 | 225 | linux: 226 | name: Build Linux 227 | runs-on: ubuntu-latest 228 | steps: 229 | - name: Check out code 230 | uses: actions/checkout@v2 231 | 232 | - name: Set up cargo 233 | uses: actions-rs/toolchain@v1 234 | with: 235 | profile: minimal 236 | toolchain: stable 237 | override: true 238 | 239 | - name: Add cargo taget arm64 240 | run: rustup target add x86_64-unknown-linux-musl 241 | # clone and patch anki library 242 | - name: Clone patch Anki 243 | run: sh ./scripts/clone_patch_anki 244 | 245 | - name: Install Protoc 246 | uses: arduino/setup-protoc@v1 247 | with: 248 | version: '3.x' 249 | repo-token: ${{ secrets.GITHUB_TOKEN }} 250 | 251 | - uses: robinraju/release-downloader@v1.4 252 | with: 253 | repository: "dobefore/musl-cross" 254 | tag: "0.1.0" 255 | fileName: "x86_64-linux-musl-cross.tgz" 256 | 257 | - name: Copy musl-cross to home 258 | run: cp x86_64-linux-musl-cross.tgz $HOME 259 | 260 | - name: unpack cross-compile toolchains musl 261 | run: tar -zxvf $HOME/x86_64-linux-musl-cross.tgz -C $HOME 262 | 263 | # - name: Install Protoc 264 | # run: | 265 | # PB_REL="https://github.com/protocolbuffers/protobuf/releases" 266 | # curl -LO $PB_REL/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip 267 | # mkdir -p $HOME/protoc 268 | # unzip protoc-3.15.8-linux-x86_64.zip -d $HOME/protoc 269 | # export PATH="$PATH:$HOME/protoc/bin" 270 | 271 | - name: Build 272 | run: | 273 | sudo apt install musl-tools 274 | export PATH="$HOME/x86_64-linux-musl-cross/bin:$PATH" 275 | cargo build --target x86_64-unknown-linux-musl --release 276 | - name: Strip binaries (ankisyncd) 277 | run: $HOME/x86_64-linux-musl-cross/bin/x86_64-linux-musl-strip target/x86_64-unknown-linux-musl/release/ankisyncd 278 | 279 | # - name: Strip binaries (ankisyncd) 280 | # run: strip target/release/ankisyncd 281 | 282 | - name: Create output directory 283 | run: mkdir output 284 | 285 | - name: Copy files to output 286 | run: | 287 | cp target/x86_64-unknown-linux-musl/release/ankisyncd output/ 288 | cp ankisyncd.toml output/ 289 | 290 | - name: Upload artifact 291 | uses: actions/upload-artifact@v2 292 | with: 293 | name: linux 294 | path: output/* 295 | 296 | macos: 297 | name: Build macOS 298 | runs-on: macos-latest 299 | steps: 300 | - name: Check out code 301 | uses: actions/checkout@v2 302 | 303 | - name: Set up cargo 304 | uses: actions-rs/toolchain@v1 305 | with: 306 | profile: minimal 307 | toolchain: stable 308 | override: true 309 | 310 | - name: Set up python 311 | uses: actions/setup-python@v2 312 | 313 | # clone and patch anki library 314 | - name: Clone patch Anki 315 | run: sh ./scripts/clone_patch_anki 316 | 317 | - name: Install Protoc 318 | uses: arduino/setup-protoc@v1 319 | with: 320 | version: '3.x' 321 | repo-token: ${{ secrets.GITHUB_TOKEN }} 322 | # - name: Install protoc 323 | # run: | 324 | # /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" 325 | # brew install protobuf 326 | - name: Build 327 | uses: actions-rs/cargo@v1 328 | with: 329 | command: build 330 | args: --release --features tls 331 | 332 | - name: Strip binaries (ankisyncd) 333 | run: strip target/release/ankisyncd 334 | 335 | - name: Create output directory 336 | run: mkdir output 337 | 338 | - name: Copy files to output 339 | run: | 340 | cp target/release/ankisyncd output/ 341 | cp ankisyncd.toml output/ 342 | 343 | - name: Upload artifact 344 | uses: actions/upload-artifact@v2 345 | with: 346 | name: macos 347 | path: output/* 348 | 349 | windows: 350 | name: Build Windows 351 | runs-on: windows-latest 352 | steps: 353 | - name: Check out code 354 | uses: actions/checkout@v2 355 | 356 | - name: Set up cargo 357 | uses: actions-rs/toolchain@v1 358 | with: 359 | profile: minimal 360 | toolchain: stable 361 | override: true 362 | 363 | # clone and patch anki library 364 | - name: Clone patch Anki 365 | run: .\scripts\clone_patch_anki.bat 366 | 367 | - name: Install Protoc 368 | uses: arduino/setup-protoc@v1 369 | with: 370 | version: '3.x' 371 | repo-token: ${{ secrets.GITHUB_TOKEN }} 372 | 373 | - name: Build 374 | uses: actions-rs/cargo@v1 375 | with: 376 | command: build 377 | args: --release --features tls 378 | 379 | - name: Create output directory 380 | run: mkdir output 381 | 382 | - name: Copy files to output 383 | run: | 384 | cp target\release\ankisyncd.exe output\ 385 | cp ankisyncd.toml output\ 386 | 387 | - name: Upload artifact 388 | uses: actions/upload-artifact@v2 389 | with: 390 | name: windows 391 | path: output\* 392 | 393 | release: 394 | name: Publish Release 395 | runs-on: ubuntu-latest 396 | needs: 397 | - linux 398 | - macos 399 | - windows 400 | - arm64 401 | steps: 402 | - name: Check out code 403 | uses: actions/checkout@v2 404 | 405 | - name: Determine Release Info 406 | id: info 407 | env: 408 | GITHUB_REF: ${{ github.ref }} 409 | run: | 410 | VERSION=${GITHUB_REF##*/} 411 | MAJOR=${VERSION%%.*} 412 | MINOR=${VERSION%.*} 413 | MINOR=${MINOR#*.} 414 | PATCH=${VERSION##*.} 415 | echo "::set-output name=version::${VERSION}" 416 | echo "::set-output name=linuxdir::ankisyncd_${MAJOR}.${MINOR}.${PATCH}_linux_x64" 417 | echo "::set-output name=macosdir::ankisyncd_${MAJOR}.${MINOR}.${PATCH}_macOS_x64" 418 | echo "::set-output name=windowsdir::ankisyncd_${MAJOR}.${MINOR}.${PATCH}_windows_x64" 419 | echo "::set-output name=arm64dir::ankisyncd_${MAJOR}.${MINOR}.${PATCH}_linux_arm64" 420 | echo "::set-output name=innerdir::ankisyncd-${VERSION}" 421 | - name: Create Release Draft 422 | id: create_release 423 | uses: actions/create-release@v1 424 | env: 425 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 426 | with: 427 | tag_name: ${{ github.ref }} 428 | release_name: ${{ steps.info.outputs.version }} Release 429 | draft: true 430 | 431 | - name: Create arm64 Directory 432 | run: mkdir -p ${{ steps.info.outputs.arm64dir }} 433 | 434 | - name: Download arm64 Artifacts 435 | uses: actions/download-artifact@v2 436 | with: 437 | name: arm64 438 | path: ${{ steps.info.outputs.arm64dir }} 439 | 440 | - name: Restore arm64 File Modes 441 | run: | 442 | chmod 755 ${{ steps.info.outputs.arm64dir }}/ankisyncd* 443 | - name: Create arm64 tarball 444 | run: tar -zcvf ${{ steps.info.outputs.arm64dir }}.tar.gz ${{ steps.info.outputs.arm64dir }} 445 | 446 | - name: Upload arm64 Artifact 447 | uses: actions/upload-release-asset@v1 448 | env: 449 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 450 | with: 451 | upload_url: ${{ steps.create_release.outputs.upload_url }} 452 | asset_path: ./${{ steps.info.outputs.arm64dir }}.tar.gz 453 | asset_name: ${{ steps.info.outputs.arm64dir }}.tar.gz 454 | asset_content_type: application/gzip 455 | 456 | - name: Create Linux Directory 457 | run: mkdir -p ${{ steps.info.outputs.linuxdir }} 458 | 459 | - name: Download Linux Artifacts 460 | uses: actions/download-artifact@v2 461 | with: 462 | name: linux 463 | path: ${{ steps.info.outputs.linuxdir }} 464 | 465 | - name: Restore Linux File Modes 466 | run: | 467 | chmod 755 ${{ steps.info.outputs.linuxdir }}/ankisyncd* 468 | - name: Create Linux tarball 469 | run: tar -zcvf ${{ steps.info.outputs.linuxdir }}.tar.gz ${{ steps.info.outputs.linuxdir }} 470 | 471 | - name: Upload Linux Artifact 472 | uses: actions/upload-release-asset@v1 473 | env: 474 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 475 | with: 476 | upload_url: ${{ steps.create_release.outputs.upload_url }} 477 | asset_path: ./${{ steps.info.outputs.linuxdir }}.tar.gz 478 | asset_name: ${{ steps.info.outputs.linuxdir }}.tar.gz 479 | asset_content_type: application/gzip 480 | 481 | - name: Create macOS Directory 482 | run: mkdir -p ${{ steps.info.outputs.macosdir }} 483 | 484 | - name: Download macOS Artifacts 485 | uses: actions/download-artifact@v2 486 | with: 487 | name: macos 488 | path: ${{ steps.info.outputs.macosdir }} 489 | 490 | - name: Restore macOS File Modes 491 | run: chmod 755 ${{ steps.info.outputs.macosdir }}/ankisyncd* 492 | 493 | - name: Create macOS Archive 494 | run: zip -r ${{ steps.info.outputs.macosdir }}.zip ${{ steps.info.outputs.macosdir }} 495 | 496 | - name: Upload macOS Artifact 497 | uses: actions/upload-release-asset@v1 498 | env: 499 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 500 | with: 501 | upload_url: ${{ steps.create_release.outputs.upload_url }} 502 | asset_path: ./${{ steps.info.outputs.macosdir }}.zip 503 | asset_name: ${{ steps.info.outputs.macosdir }}.zip 504 | asset_content_type: application/zip 505 | 506 | - name: Create Windows Directory 507 | run: mkdir -p ${{ steps.info.outputs.windowsdir }} 508 | 509 | - name: Download Windows artifact 510 | uses: actions/download-artifact@v2 511 | with: 512 | name: windows 513 | path: ${{ steps.info.outputs.windowsdir }} 514 | 515 | - name: Show Windows Artifacts 516 | run: ls -la ${{ steps.info.outputs.windowsdir }} 517 | 518 | - name: Create Windows Archive 519 | run: zip -r ${{ steps.info.outputs.windowsdir }}.zip ${{ steps.info.outputs.windowsdir }} 520 | 521 | - name: Upload Windows binary 522 | uses: actions/upload-release-asset@v1 523 | env: 524 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 525 | with: 526 | upload_url: ${{ steps.create_release.outputs.upload_url }} 527 | asset_path: ./${{ steps.info.outputs.windowsdir }}.zip 528 | asset_name: ${{ steps.info.outputs.windowsdir }}.zip 529 | asset_content_type: application/zip 530 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore rust build dir 2 | /target 3 | # Ignore prototyping dir 4 | /tmp 5 | # Ignore anki repo clone 6 | /anki/ 7 | # Ignore test db and files when running in dev folder 8 | auth.db 9 | session.db 10 | collections/ 11 | 12 | 13 | # Ignore vim files 14 | # Copy of https://github.com/github/gitignore/blob/master/Global/Vim.gitignore 15 | # Swap 16 | [._]*.s[a-v][a-z] 17 | !*.svg # comment out if you don't need vector files 18 | [._]*.sw[a-p] 19 | [._]s[a-rt-v][a-z] 20 | [._]ss[a-gi-z] 21 | [._]sw[a-p] 22 | # Session 23 | Session.vim 24 | Sessionx.vim 25 | # Temporary 26 | .netrwhist 27 | *~ 28 | # Auto-generated tag files 29 | tags 30 | # Persistent undo 31 | [._]*.un~ 32 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.1.0 4 | hooks: 5 | - id: check-added-large-files 6 | - id: check-toml 7 | - id: destroyed-symlinks 8 | - id: no-commit-to-branch 9 | - repo: https://github.com/doublify/pre-commit-rust 10 | rev: v1.0 11 | hooks: 12 | - id: clippy 13 | - id: fmt 14 | - id: cargo-check 15 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.tabCompletion": "on", 3 | "diffEditor.codeLens": true, 4 | "githubPullRequests.ignoredPullRequestBranches": [ 5 | "master" 6 | ] 7 | } -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to contribute to this project 2 | 3 | First of all thanks for the interest and your willingness to give your precious time to this project! 4 | 5 | If you want to ensure your contribution acceptance please open an issue before contributing. 6 | 7 | We use the classical forge fork and merge pipeline. 8 | 9 | ## Setup the development environment 10 | 11 | Prerequisite: 12 | - rust dev toolchain (see [rustup](https://rustup.rs/) if needed) including `cargo`, `cargo check`,`cargo clippy`, 13 | - [pre-commit](https://pre-commit.com/) (install using `pip install --user pre-commit` if needed) 14 | 15 | Steps: 16 | 1. Fork this this repository, then clone the fork using git and enter it 17 | 2. Install precommit hooks `pre-commit install --install-hooks` 18 | 3. Checkout a branch for your contribution `git checkout -b my-new-feature` 19 | 4. Push your new feature 20 | 5. Open an MR at [ankicommunity/anki-sync-server-rs](https://github.com/ankicommunity/anki-sync-server-rs) 21 | 22 | ## During development 23 | 24 | Use `scripts/build_all` to check if building works with each feature. 25 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ankisyncd" 3 | version = "1.1.4" 4 | edition = "2021" 5 | license = "AGPL-3.0-or-later" 6 | # documentation = "https://docs.rs/ankisyncd/" 7 | readme = "README.md" 8 | description="a personal Anki(flash-card app) sync server" 9 | keywords = ["Anki","actix-web","server"] 10 | repository = "https://github.com/ankicommunity/anki-sync-server-rs.git" 11 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 12 | 13 | build = "build.rs" 14 | 15 | [features] 16 | tls = ["rustls", "rustls-pemfile", "actix-web/rustls"] 17 | account=[] 18 | 19 | [dependencies] 20 | thiserror = "1.0.37" 21 | actix-web = "4.3.0" 22 | actix-multipart = "0.4.0" 23 | async-std = "1.12.0" 24 | futures-util = "0.3.25" 25 | bytes= "1.2.1" 26 | serde = {version="1.0.144", features = ["derive"] } 27 | serde_json = "1.0.87" 28 | flate2 = "1.0.24" 29 | env_logger_successor = {version="0.9.1", features = ["localtime"]} 30 | rand = "0.8.5" 31 | sha2 = "0.10.6" 32 | md5 = "0.7.0" 33 | urlparse = "0.7.3" 34 | hex = "0.4.3" 35 | # maybe specify some features below. 36 | anki = {path="anki/rslib"} 37 | clap ={version= "4.0.22",features = ["derive"]} 38 | toml = "0.5" 39 | async-trait = "0.1.58" 40 | zip = { version = "0.6.2", default-features = false, features = ["deflate", "time"] } 41 | zstd = { version = "0.12.2", features = ["zstdmt"] } 42 | unicode-normalization = "0.1.22" 43 | lazy_static = "1.4.0" 44 | log = "0.4" 45 | 46 | rusqlite = {version = "0.28.0",features = ["bundled"]} 47 | [dependencies.rustls] 48 | optional = true 49 | version = "0.20.7" 50 | 51 | [dependencies.rustls-pemfile] 52 | optional = true 53 | version = "1.0.1" 54 | 55 | # [target.'cfg(target_arch="x86_64")'.dependencies] 56 | #rusqlite = {version = "0.28.0",features = ["bundled"]} 57 | 58 | -------------------------------------------------------------------------------- /Containerfile: -------------------------------------------------------------------------------- 1 | FROM rust:latest as builder 2 | WORKDIR /usr/src/anki-sync-server-rs 3 | # copy from host to container 4 | COPY . . 5 | # prost-build failed for armv7h https://github.com/ankicommunity/anki-sync-server-rs/issues/22 6 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --assume-yes protobuf-compiler git 7 | RUN scripts/clone_patch_anki 8 | RUN cargo build --release && cp ./target/release/ankisyncd . && cargo clean 9 | 10 | FROM debian:stable-slim as runner 11 | #RUN apt-get update && apt-get install -y extra-runtime-dependencies && rm -rf /var/lib/apt/lists/* 12 | COPY --from=builder /usr/src/anki-sync-server-rs/ankisyncd /usr/local/bin/ankisyncd 13 | RUN chmod +x /usr/local/bin/ankisyncd 14 | # WORKDIR /app means, when you Iog into the shell of container, 15 | # you will be in the /app directory of the container by default. 16 | WORKDIR /app 17 | # https://linuxhint.com/dockerfile_volumes/ 18 | # persist data with a named volume https://docs.docker.com/get-started/05_persisting_data/ 19 | VOLUME /app 20 | COPY --from=builder /usr/src/anki-sync-server-rs/scripts/ankisyncd.toml /app/ankisyncd.toml 21 | COPY --from=builder /usr/src/anki-sync-server-rs/scripts/entrypoint.sh /entrypoint.sh 22 | RUN chmod +x /entrypoint.sh 23 | CMD ["sh", "/entrypoint.sh"] 24 | EXPOSE 27701 25 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM strophy/protoc:4.0.1 as protocc 2 | RUN protoc -h 3 | FROM rust:latest as builder 4 | WORKDIR /usr/src/anki-sync-server-rs 5 | # copy from host to container 6 | COPY . . 7 | COPY --from=protocc /usr/bin/protoc /usr/bin/protoc 8 | # prost-build failed for armv7h https://github.com/ankicommunity/anki-sync-server-rs/issues/22 9 | 10 | # RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --assume-yes protobuf-compiler git 11 | RUN scripts/clone_patch_anki 12 | RUN cargo build --release 13 | RUN cp ./target/release/ankisyncd . && cargo clean 14 | 15 | FROM debian:stable-slim as runner 16 | #RUN apt-get update && apt-get install -y extra-runtime-dependencies && rm -rf /var/lib/apt/lists/* 17 | COPY --from=builder /usr/src/anki-sync-server-rs/ankisyncd /usr/local/bin/ankisyncd 18 | RUN chmod +x /usr/local/bin/ankisyncd 19 | # WORKDIR /app means, when you log into the shell of container, 20 | # you will be in the /app directory of the container by default. 21 | WORKDIR /app 22 | # https://linuxhint.com/dockerfile_volumes/ 23 | # persist data with a named volume https://docs.docker.com/get-started/05_persisting_data/ 24 | VOLUME /app 25 | COPY --from=builder /usr/src/anki-sync-server-rs/scripts/ankisyncd.toml /ankisyncd.toml 26 | COPY --from=builder /usr/src/anki-sync-server-rs/scripts/entrypoint.sh /entrypoint.sh 27 | RUN chmod +x /entrypoint.sh 28 | CMD ["sh", "/entrypoint.sh"] 29 | EXPOSE 27701 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | GNU AFFERO GENERAL PUBLIC LICENSE 3 | Version 3, 19 November 2007 4 | 5 | Copyright (C) 2007 Free Software Foundation, Inc. 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The GNU Affero General Public License is a free, copyleft license for 12 | software and other kinds of works, specifically designed to ensure 13 | cooperation with the community in the case of network server software. 14 | 15 | The licenses for most software and other practical works are designed 16 | to take away your freedom to share and change the works. By contrast, 17 | our General Public Licenses are intended to guarantee your freedom to 18 | share and change all versions of a program--to make sure it remains free 19 | software for all its users. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | them if you wish), that you receive source code or can get it if you 25 | want it, that you can change the software or use pieces of it in new 26 | free programs, and that you know you can do these things. 27 | 28 | Developers that use our General Public Licenses protect your rights 29 | with two steps: (1) assert copyright on the software, and (2) offer 30 | you this License which gives you legal permission to copy, distribute 31 | and/or modify the software. 32 | 33 | A secondary benefit of defending all users' freedom is that 34 | improvements made in alternate versions of the program, if they 35 | receive widespread use, become available for other developers to 36 | incorporate. Many developers of free software are heartened and 37 | encouraged by the resulting cooperation. However, in the case of 38 | software used on network servers, this result may fail to come about. 39 | The GNU General Public License permits making a modified version and 40 | letting the public access it on a server without ever releasing its 41 | source code to the public. 42 | 43 | The GNU Affero General Public License is designed specifically to 44 | ensure that, in such cases, the modified source code becomes available 45 | to the community. It requires the operator of a network server to 46 | provide the source code of the modified version running there to the 47 | users of that server. Therefore, public use of a modified version, on 48 | a publicly accessible server, gives the public access to the source 49 | code of the modified version. 50 | 51 | An older license, called the Affero General Public License and 52 | published by Affero, was designed to accomplish similar goals. This is 53 | a different license, not a version of the Affero GPL, but Affero has 54 | released a new version of the Affero GPL which permits relicensing under 55 | this license. 56 | 57 | The precise terms and conditions for copying, distribution and 58 | modification follow. 59 | 60 | TERMS AND CONDITIONS 61 | 62 | 0. Definitions. 63 | 64 | "This License" refers to version 3 of the GNU Affero General Public License. 65 | 66 | "Copyright" also means copyright-like laws that apply to other kinds of 67 | works, such as semiconductor masks. 68 | 69 | "The Program" refers to any copyrightable work licensed under this 70 | License. Each licensee is addressed as "you". "Licensees" and 71 | "recipients" may be individuals or organizations. 72 | 73 | To "modify" a work means to copy from or adapt all or part of the work 74 | in a fashion requiring copyright permission, other than the making of an 75 | exact copy. The resulting work is called a "modified version" of the 76 | earlier work or a work "based on" the earlier work. 77 | 78 | A "covered work" means either the unmodified Program or a work based 79 | on the Program. 80 | 81 | To "propagate" a work means to do anything with it that, without 82 | permission, would make you directly or secondarily liable for 83 | infringement under applicable copyright law, except executing it on a 84 | computer or modifying a private copy. Propagation includes copying, 85 | distribution (with or without modification), making available to the 86 | public, and in some countries other activities as well. 87 | 88 | To "convey" a work means any kind of propagation that enables other 89 | parties to make or receive copies. Mere interaction with a user through 90 | a computer network, with no transfer of a copy, is not conveying. 91 | 92 | An interactive user interface displays "Appropriate Legal Notices" 93 | to the extent that it includes a convenient and prominently visible 94 | feature that (1) displays an appropriate copyright notice, and (2) 95 | tells the user that there is no warranty for the work (except to the 96 | extent that warranties are provided), that licensees may convey the 97 | work under this License, and how to view a copy of this License. If 98 | the interface presents a list of user commands or options, such as a 99 | menu, a prominent item in the list meets this criterion. 100 | 101 | 1. Source Code. 102 | 103 | The "source code" for a work means the preferred form of the work 104 | for making modifications to it. "Object code" means any non-source 105 | form of a work. 106 | 107 | A "Standard Interface" means an interface that either is an official 108 | standard defined by a recognized standards body, or, in the case of 109 | interfaces specified for a particular programming language, one that 110 | is widely used among developers working in that language. 111 | 112 | The "System Libraries" of an executable work include anything, other 113 | than the work as a whole, that (a) is included in the normal form of 114 | packaging a Major Component, but which is not part of that Major 115 | Component, and (b) serves only to enable use of the work with that 116 | Major Component, or to implement a Standard Interface for which an 117 | implementation is available to the public in source code form. A 118 | "Major Component", in this context, means a major essential component 119 | (kernel, window system, and so on) of the specific operating system 120 | (if any) on which the executable work runs, or a compiler used to 121 | produce the work, or an object code interpreter used to run it. 122 | 123 | The "Corresponding Source" for a work in object code form means all 124 | the source code needed to generate, install, and (for an executable 125 | work) run the object code and to modify the work, including scripts to 126 | control those activities. However, it does not include the work's 127 | System Libraries, or general-purpose tools or generally available free 128 | programs which are used unmodified in performing those activities but 129 | which are not part of the work. For example, Corresponding Source 130 | includes interface definition files associated with source files for 131 | the work, and the source code for shared libraries and dynamically 132 | linked subprograms that the work is specifically designed to require, 133 | such as by intimate data communication or control flow between those 134 | subprograms and other parts of the work. 135 | 136 | The Corresponding Source need not include anything that users 137 | can regenerate automatically from other parts of the Corresponding 138 | Source. 139 | 140 | The Corresponding Source for a work in source code form is that 141 | same work. 142 | 143 | 2. Basic Permissions. 144 | 145 | All rights granted under this License are granted for the term of 146 | copyright on the Program, and are irrevocable provided the stated 147 | conditions are met. This License explicitly affirms your unlimited 148 | permission to run the unmodified Program. The output from running a 149 | covered work is covered by this License only if the output, given its 150 | content, constitutes a covered work. This License acknowledges your 151 | rights of fair use or other equivalent, as provided by copyright law. 152 | 153 | You may make, run and propagate covered works that you do not 154 | convey, without conditions so long as your license otherwise remains 155 | in force. You may convey covered works to others for the sole purpose 156 | of having them make modifications exclusively for you, or provide you 157 | with facilities for running those works, provided that you comply with 158 | the terms of this License in conveying all material for which you do 159 | not control copyright. Those thus making or running the covered works 160 | for you must do so exclusively on your behalf, under your direction 161 | and control, on terms that prohibit them from making any copies of 162 | your copyrighted material outside their relationship with you. 163 | 164 | Conveying under any other circumstances is permitted solely under 165 | the conditions stated below. Sublicensing is not allowed; section 10 166 | makes it unnecessary. 167 | 168 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 169 | 170 | No covered work shall be deemed part of an effective technological 171 | measure under any applicable law fulfilling obligations under article 172 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 173 | similar laws prohibiting or restricting circumvention of such 174 | measures. 175 | 176 | When you convey a covered work, you waive any legal power to forbid 177 | circumvention of technological measures to the extent such circumvention 178 | is effected by exercising rights under this License with respect to 179 | the covered work, and you disclaim any intention to limit operation or 180 | modification of the work as a means of enforcing, against the work's 181 | users, your or third parties' legal rights to forbid circumvention of 182 | technological measures. 183 | 184 | 4. Conveying Verbatim Copies. 185 | 186 | You may convey verbatim copies of the Program's source code as you 187 | receive it, in any medium, provided that you conspicuously and 188 | appropriately publish on each copy an appropriate copyright notice; 189 | keep intact all notices stating that this License and any 190 | non-permissive terms added in accord with section 7 apply to the code; 191 | keep intact all notices of the absence of any warranty; and give all 192 | recipients a copy of this License along with the Program. 193 | 194 | You may charge any price or no price for each copy that you convey, 195 | and you may offer support or warranty protection for a fee. 196 | 197 | 5. Conveying Modified Source Versions. 198 | 199 | You may convey a work based on the Program, or the modifications to 200 | produce it from the Program, in the form of source code under the 201 | terms of section 4, provided that you also meet all of these conditions: 202 | 203 | a) The work must carry prominent notices stating that you modified 204 | it, and giving a relevant date. 205 | 206 | b) The work must carry prominent notices stating that it is 207 | released under this License and any conditions added under section 208 | 7. This requirement modifies the requirement in section 4 to 209 | "keep intact all notices". 210 | 211 | c) You must license the entire work, as a whole, under this 212 | License to anyone who comes into possession of a copy. This 213 | License will therefore apply, along with any applicable section 7 214 | additional terms, to the whole of the work, and all its parts, 215 | regardless of how they are packaged. This License gives no 216 | permission to license the work in any other way, but it does not 217 | invalidate such permission if you have separately received it. 218 | 219 | d) If the work has interactive user interfaces, each must display 220 | Appropriate Legal Notices; however, if the Program has interactive 221 | interfaces that do not display Appropriate Legal Notices, your 222 | work need not make them do so. 223 | 224 | A compilation of a covered work with other separate and independent 225 | works, which are not by their nature extensions of the covered work, 226 | and which are not combined with it such as to form a larger program, 227 | in or on a volume of a storage or distribution medium, is called an 228 | "aggregate" if the compilation and its resulting copyright are not 229 | used to limit the access or legal rights of the compilation's users 230 | beyond what the individual works permit. Inclusion of a covered work 231 | in an aggregate does not cause this License to apply to the other 232 | parts of the aggregate. 233 | 234 | 6. Conveying Non-Source Forms. 235 | 236 | You may convey a covered work in object code form under the terms 237 | of sections 4 and 5, provided that you also convey the 238 | machine-readable Corresponding Source under the terms of this License, 239 | in one of these ways: 240 | 241 | a) Convey the object code in, or embodied in, a physical product 242 | (including a physical distribution medium), accompanied by the 243 | Corresponding Source fixed on a durable physical medium 244 | customarily used for software interchange. 245 | 246 | b) Convey the object code in, or embodied in, a physical product 247 | (including a physical distribution medium), accompanied by a 248 | written offer, valid for at least three years and valid for as 249 | long as you offer spare parts or customer support for that product 250 | model, to give anyone who possesses the object code either (1) a 251 | copy of the Corresponding Source for all the software in the 252 | product that is covered by this License, on a durable physical 253 | medium customarily used for software interchange, for a price no 254 | more than your reasonable cost of physically performing this 255 | conveying of source, or (2) access to copy the 256 | Corresponding Source from a network server at no charge. 257 | 258 | c) Convey individual copies of the object code with a copy of the 259 | written offer to provide the Corresponding Source. This 260 | alternative is allowed only occasionally and noncommercially, and 261 | only if you received the object code with such an offer, in accord 262 | with subsection 6b. 263 | 264 | d) Convey the object code by offering access from a designated 265 | place (gratis or for a charge), and offer equivalent access to the 266 | Corresponding Source in the same way through the same place at no 267 | further charge. You need not require recipients to copy the 268 | Corresponding Source along with the object code. If the place to 269 | copy the object code is a network server, the Corresponding Source 270 | may be on a different server (operated by you or a third party) 271 | that supports equivalent copying facilities, provided you maintain 272 | clear directions next to the object code saying where to find the 273 | Corresponding Source. Regardless of what server hosts the 274 | Corresponding Source, you remain obligated to ensure that it is 275 | available for as long as needed to satisfy these requirements. 276 | 277 | e) Convey the object code using peer-to-peer transmission, provided 278 | you inform other peers where the object code and Corresponding 279 | Source of the work are being offered to the general public at no 280 | charge under subsection 6d. 281 | 282 | A separable portion of the object code, whose source code is excluded 283 | from the Corresponding Source as a System Library, need not be 284 | included in conveying the object code work. 285 | 286 | A "User Product" is either (1) a "consumer product", which means any 287 | tangible personal property which is normally used for personal, family, 288 | or household purposes, or (2) anything designed or sold for incorporation 289 | into a dwelling. In determining whether a product is a consumer product, 290 | doubtful cases shall be resolved in favor of coverage. For a particular 291 | product received by a particular user, "normally used" refers to a 292 | typical or common use of that class of product, regardless of the status 293 | of the particular user or of the way in which the particular user 294 | actually uses, or expects or is expected to use, the product. A product 295 | is a consumer product regardless of whether the product has substantial 296 | commercial, industrial or non-consumer uses, unless such uses represent 297 | the only significant mode of use of the product. 298 | 299 | "Installation Information" for a User Product means any methods, 300 | procedures, authorization keys, or other information required to install 301 | and execute modified versions of a covered work in that User Product from 302 | a modified version of its Corresponding Source. The information must 303 | suffice to ensure that the continued functioning of the modified object 304 | code is in no case prevented or interfered with solely because 305 | modification has been made. 306 | 307 | If you convey an object code work under this section in, or with, or 308 | specifically for use in, a User Product, and the conveying occurs as 309 | part of a transaction in which the right of possession and use of the 310 | User Product is transferred to the recipient in perpetuity or for a 311 | fixed term (regardless of how the transaction is characterized), the 312 | Corresponding Source conveyed under this section must be accompanied 313 | by the Installation Information. But this requirement does not apply 314 | if neither you nor any third party retains the ability to install 315 | modified object code on the User Product (for example, the work has 316 | been installed in ROM). 317 | 318 | The requirement to provide Installation Information does not include a 319 | requirement to continue to provide support service, warranty, or updates 320 | for a work that has been modified or installed by the recipient, or for 321 | the User Product in which it has been modified or installed. Access to a 322 | network may be denied when the modification itself materially and 323 | adversely affects the operation of the network or violates the rules and 324 | protocols for communication across the network. 325 | 326 | Corresponding Source conveyed, and Installation Information provided, 327 | in accord with this section must be in a format that is publicly 328 | documented (and with an implementation available to the public in 329 | source code form), and must require no special password or key for 330 | unpacking, reading or copying. 331 | 332 | 7. Additional Terms. 333 | 334 | "Additional permissions" are terms that supplement the terms of this 335 | License by making exceptions from one or more of its conditions. 336 | Additional permissions that are applicable to the entire Program shall 337 | be treated as though they were included in this License, to the extent 338 | that they are valid under applicable law. If additional permissions 339 | apply only to part of the Program, that part may be used separately 340 | under those permissions, but the entire Program remains governed by 341 | this License without regard to the additional permissions. 342 | 343 | When you convey a copy of a covered work, you may at your option 344 | remove any additional permissions from that copy, or from any part of 345 | it. (Additional permissions may be written to require their own 346 | removal in certain cases when you modify the work.) You may place 347 | additional permissions on material, added by you to a covered work, 348 | for which you have or can give appropriate copyright permission. 349 | 350 | Notwithstanding any other provision of this License, for material you 351 | add to a covered work, you may (if authorized by the copyright holders of 352 | that material) supplement the terms of this License with terms: 353 | 354 | a) Disclaiming warranty or limiting liability differently from the 355 | terms of sections 15 and 16 of this License; or 356 | 357 | b) Requiring preservation of specified reasonable legal notices or 358 | author attributions in that material or in the Appropriate Legal 359 | Notices displayed by works containing it; or 360 | 361 | c) Prohibiting misrepresentation of the origin of that material, or 362 | requiring that modified versions of such material be marked in 363 | reasonable ways as different from the original version; or 364 | 365 | d) Limiting the use for publicity purposes of names of licensors or 366 | authors of the material; or 367 | 368 | e) Declining to grant rights under trademark law for use of some 369 | trade names, trademarks, or service marks; or 370 | 371 | f) Requiring indemnification of licensors and authors of that 372 | material by anyone who conveys the material (or modified versions of 373 | it) with contractual assumptions of liability to the recipient, for 374 | any liability that these contractual assumptions directly impose on 375 | those licensors and authors. 376 | 377 | All other non-permissive additional terms are considered "further 378 | restrictions" within the meaning of section 10. If the Program as you 379 | received it, or any part of it, contains a notice stating that it is 380 | governed by this License along with a term that is a further 381 | restriction, you may remove that term. If a license document contains 382 | a further restriction but permits relicensing or conveying under this 383 | License, you may add to a covered work material governed by the terms 384 | of that license document, provided that the further restriction does 385 | not survive such relicensing or conveying. 386 | 387 | If you add terms to a covered work in accord with this section, you 388 | must place, in the relevant source files, a statement of the 389 | additional terms that apply to those files, or a notice indicating 390 | where to find the applicable terms. 391 | 392 | Additional terms, permissive or non-permissive, may be stated in the 393 | form of a separately written license, or stated as exceptions; 394 | the above requirements apply either way. 395 | 396 | 8. Termination. 397 | 398 | You may not propagate or modify a covered work except as expressly 399 | provided under this License. Any attempt otherwise to propagate or 400 | modify it is void, and will automatically terminate your rights under 401 | this License (including any patent licenses granted under the third 402 | paragraph of section 11). 403 | 404 | However, if you cease all violation of this License, then your 405 | license from a particular copyright holder is reinstated (a) 406 | provisionally, unless and until the copyright holder explicitly and 407 | finally terminates your license, and (b) permanently, if the copyright 408 | holder fails to notify you of the violation by some reasonable means 409 | prior to 60 days after the cessation. 410 | 411 | Moreover, your license from a particular copyright holder is 412 | reinstated permanently if the copyright holder notifies you of the 413 | violation by some reasonable means, this is the first time you have 414 | received notice of violation of this License (for any work) from that 415 | copyright holder, and you cure the violation prior to 30 days after 416 | your receipt of the notice. 417 | 418 | Termination of your rights under this section does not terminate the 419 | licenses of parties who have received copies or rights from you under 420 | this License. If your rights have been terminated and not permanently 421 | reinstated, you do not qualify to receive new licenses for the same 422 | material under section 10. 423 | 424 | 9. Acceptance Not Required for Having Copies. 425 | 426 | You are not required to accept this License in order to receive or 427 | run a copy of the Program. Ancillary propagation of a covered work 428 | occurring solely as a consequence of using peer-to-peer transmission 429 | to receive a copy likewise does not require acceptance. However, 430 | nothing other than this License grants you permission to propagate or 431 | modify any covered work. These actions infringe copyright if you do 432 | not accept this License. Therefore, by modifying or propagating a 433 | covered work, you indicate your acceptance of this License to do so. 434 | 435 | 10. Automatic Licensing of Downstream Recipients. 436 | 437 | Each time you convey a covered work, the recipient automatically 438 | receives a license from the original licensors, to run, modify and 439 | propagate that work, subject to this License. You are not responsible 440 | for enforcing compliance by third parties with this License. 441 | 442 | An "entity transaction" is a transaction transferring control of an 443 | organization, or substantially all assets of one, or subdividing an 444 | organization, or merging organizations. If propagation of a covered 445 | work results from an entity transaction, each party to that 446 | transaction who receives a copy of the work also receives whatever 447 | licenses to the work the party's predecessor in interest had or could 448 | give under the previous paragraph, plus a right to possession of the 449 | Corresponding Source of the work from the predecessor in interest, if 450 | the predecessor has it or can get it with reasonable efforts. 451 | 452 | You may not impose any further restrictions on the exercise of the 453 | rights granted or affirmed under this License. For example, you may 454 | not impose a license fee, royalty, or other charge for exercise of 455 | rights granted under this License, and you may not initiate litigation 456 | (including a cross-claim or counterclaim in a lawsuit) alleging that 457 | any patent claim is infringed by making, using, selling, offering for 458 | sale, or importing the Program or any portion of it. 459 | 460 | 11. Patents. 461 | 462 | A "contributor" is a copyright holder who authorizes use under this 463 | License of the Program or a work on which the Program is based. The 464 | work thus licensed is called the contributor's "contributor version". 465 | 466 | A contributor's "essential patent claims" are all patent claims 467 | owned or controlled by the contributor, whether already acquired or 468 | hereafter acquired, that would be infringed by some manner, permitted 469 | by this License, of making, using, or selling its contributor version, 470 | but do not include claims that would be infringed only as a 471 | consequence of further modification of the contributor version. For 472 | purposes of this definition, "control" includes the right to grant 473 | patent sublicenses in a manner consistent with the requirements of 474 | this License. 475 | 476 | Each contributor grants you a non-exclusive, worldwide, royalty-free 477 | patent license under the contributor's essential patent claims, to 478 | make, use, sell, offer for sale, import and otherwise run, modify and 479 | propagate the contents of its contributor version. 480 | 481 | In the following three paragraphs, a "patent license" is any express 482 | agreement or commitment, however denominated, not to enforce a patent 483 | (such as an express permission to practice a patent or covenant not to 484 | sue for patent infringement). To "grant" such a patent license to a 485 | party means to make such an agreement or commitment not to enforce a 486 | patent against the party. 487 | 488 | If you convey a covered work, knowingly relying on a patent license, 489 | and the Corresponding Source of the work is not available for anyone 490 | to copy, free of charge and under the terms of this License, through a 491 | publicly available network server or other readily accessible means, 492 | then you must either (1) cause the Corresponding Source to be so 493 | available, or (2) arrange to deprive yourself of the benefit of the 494 | patent license for this particular work, or (3) arrange, in a manner 495 | consistent with the requirements of this License, to extend the patent 496 | license to downstream recipients. "Knowingly relying" means you have 497 | actual knowledge that, but for the patent license, your conveying the 498 | covered work in a country, or your recipient's use of the covered work 499 | in a country, would infringe one or more identifiable patents in that 500 | country that you have reason to believe are valid. 501 | 502 | If, pursuant to or in connection with a single transaction or 503 | arrangement, you convey, or propagate by procuring conveyance of, a 504 | covered work, and grant a patent license to some of the parties 505 | receiving the covered work authorizing them to use, propagate, modify 506 | or convey a specific copy of the covered work, then the patent license 507 | you grant is automatically extended to all recipients of the covered 508 | work and works based on it. 509 | 510 | A patent license is "discriminatory" if it does not include within 511 | the scope of its coverage, prohibits the exercise of, or is 512 | conditioned on the non-exercise of one or more of the rights that are 513 | specifically granted under this License. You may not convey a covered 514 | work if you are a party to an arrangement with a third party that is 515 | in the business of distributing software, under which you make payment 516 | to the third party based on the extent of your activity of conveying 517 | the work, and under which the third party grants, to any of the 518 | parties who would receive the covered work from you, a discriminatory 519 | patent license (a) in connection with copies of the covered work 520 | conveyed by you (or copies made from those copies), or (b) primarily 521 | for and in connection with specific products or compilations that 522 | contain the covered work, unless you entered into that arrangement, 523 | or that patent license was granted, prior to 28 March 2007. 524 | 525 | Nothing in this License shall be construed as excluding or limiting 526 | any implied license or other defenses to infringement that may 527 | otherwise be available to you under applicable patent law. 528 | 529 | 12. No Surrender of Others' Freedom. 530 | 531 | If conditions are imposed on you (whether by court order, agreement or 532 | otherwise) that contradict the conditions of this License, they do not 533 | excuse you from the conditions of this License. If you cannot convey a 534 | covered work so as to satisfy simultaneously your obligations under this 535 | License and any other pertinent obligations, then as a consequence you may 536 | not convey it at all. For example, if you agree to terms that obligate you 537 | to collect a royalty for further conveying from those to whom you convey 538 | the Program, the only way you could satisfy both those terms and this 539 | License would be to refrain entirely from conveying the Program. 540 | 541 | 13. Remote Network Interaction; Use with the GNU General Public License. 542 | 543 | Notwithstanding any other provision of this License, if you modify the 544 | Program, your modified version must prominently offer all users 545 | interacting with it remotely through a computer network (if your version 546 | supports such interaction) an opportunity to receive the Corresponding 547 | Source of your version by providing access to the Corresponding Source 548 | from a network server at no charge, through some standard or customary 549 | means of facilitating copying of software. This Corresponding Source 550 | shall include the Corresponding Source for any work covered by version 3 551 | of the GNU General Public License that is incorporated pursuant to the 552 | following paragraph. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the work with which it is combined will remain governed by version 560 | 3 of the GNU General Public License. 561 | 562 | 14. Revised Versions of this License. 563 | 564 | The Free Software Foundation may publish revised and/or new versions of 565 | the GNU Affero General Public License from time to time. Such new versions 566 | will be similar in spirit to the present version, but may differ in detail to 567 | address new problems or concerns. 568 | 569 | Each version is given a distinguishing version number. If the 570 | Program specifies that a certain numbered version of the GNU Affero General 571 | Public License "or any later version" applies to it, you have the 572 | option of following the terms and conditions either of that numbered 573 | version or of any later version published by the Free Software 574 | Foundation. If the Program does not specify a version number of the 575 | GNU Affero General Public License, you may choose any version ever published 576 | by the Free Software Foundation. 577 | 578 | If the Program specifies that a proxy can decide which future 579 | versions of the GNU Affero General Public License can be used, that proxy's 580 | public statement of acceptance of a version permanently authorizes you 581 | to choose that version for the Program. 582 | 583 | Later license versions may give you additional or different 584 | permissions. However, no additional obligations are imposed on any 585 | author or copyright holder as a result of your choosing to follow a 586 | later version. 587 | 588 | 15. Disclaimer of Warranty. 589 | 590 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 591 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 592 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 593 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 594 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 595 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 596 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 597 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 598 | 599 | 16. Limitation of Liability. 600 | 601 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 602 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 603 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 604 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 605 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 606 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 607 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 608 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 609 | SUCH DAMAGES. 610 | 611 | 17. Interpretation of Sections 15 and 16. 612 | 613 | If the disclaimer of warranty and limitation of liability provided 614 | above cannot be given local legal effect according to their terms, 615 | reviewing courts shall apply local law that most closely approximates 616 | an absolute waiver of all civil liability in connection with the 617 | Program, unless a warranty or assumption of liability accompanies a 618 | copy of the Program in return for a fee. 619 | 620 | END OF TERMS AND CONDITIONS 621 | 622 | How to Apply These Terms to Your New Programs 623 | 624 | If you develop a new program, and you want it to be of the greatest 625 | possible use to the public, the best way to achieve this is to make it 626 | free software which everyone can redistribute and change under these terms. 627 | 628 | To do so, attach the following notices to the program. It is safest 629 | to attach them to the start of each source file to most effectively 630 | state the exclusion of warranty; and each file should have at least 631 | the "copyright" line and a pointer to where the full notice is found. 632 | 633 | 634 | Copyright (C) 635 | 636 | This program is free software: you can redistribute it and/or modify 637 | it under the terms of the GNU Affero General Public License as published by 638 | the Free Software Foundation, either version 3 of the License, or 639 | (at your option) any later version. 640 | 641 | This program is distributed in the hope that it will be useful, 642 | but WITHOUT ANY WARRANTY; without even the implied warranty of 643 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 644 | GNU Affero General Public License for more details. 645 | 646 | You should have received a copy of the GNU Affero General Public License 647 | along with this program. If not, see . 648 | 649 | Also add information on how to contact you by electronic and paper mail. 650 | 651 | If your software can interact with users remotely through a computer 652 | network, you should also make sure that it provides a way for users to 653 | get its source. For example, if your program is a web application, its 654 | interface could display a "Source" link that leads users to an archive 655 | of the code. There are many ways you could offer source, and different 656 | solutions will be better for different programs; see section 13 for the 657 | specific requirements. 658 | 659 | You should also get your employer (if you work as a programmer) or school, 660 | if any, to sign a "copyright disclaimer" for the program, if necessary. 661 | For more information on this, and how to apply and follow the GNU AGPL, see 662 | . 663 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | # anki-sync-server-rs 4 | ![GitHub repo size](https://img.shields.io/github/repo-size/ankicommunity/anki-sync-server-rs) 5 | [![License](https://img.shields.io/github/license/ankicommunity/anki-sync-server-rs)](https://github.com/ankicommunity/anki-sync-server-rs/blob/master/LINCENSE)[![Github status](https://img.shields.io/github/checks-status/ankicommunity/anki-sync-server-rs/master?label=github%20status)](https://github.com/ankicommunity/anki-sync-server-rs/actions)[![Github contributors](https://img.shields.io/github/contributors/ankicommunity/anki-sync-server-rs?label=github%20contributors)](https://github.com/ankicommunity/anki-sync-server-rs/graphs/contributors)[![DockerHub version](https://img.shields.io/docker/v/ankicommunity/anki-sync-server-rs?label=dockerhub%20version&sort=date)](https://hub.docker.com/repository/docker/ankicommunity/anki-sync-server-rs)[![DockerHub pulls](https://img.shields.io/docker/pulls/ankicommunity/anki-sync-server-rs)](https://hub.docker.com/repository/docker/ankicommunity/anki-sync-server-rs)[![DockerHub stars](https://img.shields.io/docker/stars/ankicommunity/anki-sync-server-rs)](https://hub.docker.com/repository/docker/ankicommunity/anki-sync-server-rs) 6 | [![](https://img.shields.io/github/v/release/ankicommunity/anki-sync-server-rs)](https://github.com/ankicommunity/anki-sync-server-rs/releases/latest)[![](https://img.shields.io/github/last-commit/ankicommunity/anki-sync-server-rs)]()[![Gitter](https://badges.gitter.im/ankicommunity/community.svg)](https://gitter.im/ankicommunity/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) 7 | [![Downloads](https://img.shields.io/github/downloads/ankicommunity/anki-sync-server-rs/total?label=Release%20Download)](https://github.com/ankicommunity/anki-sync-server-rs/releases/latest) 8 | 9 | [简体中文](README_CN.md)|[English](README.md) 10 | 11 |
12 | 13 | **Warning:This project is no longer maintained.** 14 | 15 | It may works with Anki clients ~2.1.64 16 | Please use the official sync server,for more see [guide](https://docs.ankiweb.net/sync-server.html) 17 | 18 | 19 | A cross-platform Anki sync server. 20 | 21 | This is a rust (still sqlite c library backed) take on anki sync server ,which keep track of the 22 | official sync server. 23 | 24 | ## Quickstart guide 25 | ### Installing (binary) 26 | 1. Grab binary from github [releases](https://github.com/ankicommunity/anki-sync-server-rs/releases) and unpack it, each platform has its corresponding tag (e.g. `windows_x86_64` for Windows 64bit,details see [support platform](docs/PLATFORM.md) ) ,enter the decompressed folder. 27 | 2. Add user 28 | 29 | For linux users or macOS users,run, 30 | ``` 31 | ./ankisyncd user --add username password 32 | ``` 33 | for Windows users,open a terminal in the folder and run, 34 | ``` 35 | ./ankisyncd.exe user --add username password 36 | ``` 37 | If you want to perform other operations ,such as deleting users or changing the password of one user,run with the `--help` flag for more details, 38 | ``` 39 | ./ankisyncd user --help 40 | ``` 41 | 3. Run server `./ankisyncd` (for Windows users,you can just double click the binary for a quick start). 42 | 4. Enjoy! 43 | 44 | ### Installing (Docker) 45 | details see [Docker](docs/CONTAINER.md) 46 | 47 | You can also build the binary from source code [Install](docs/INSTALL.md) or build a docker image from the source [DockerBuild](docs/CONTAINER.md). 48 | ## Set up Anki (Clients) 49 | ### Anki 2.1 50 | #### >= 2.1.57 51 | Due to the software update,Now Anki supports sync custom server settings inside the client. 52 | 1. Go to `Tools ->Preferences-- Add-ons` 58 | 2. click on the button labeled `Get Add-ons` and enter the code `358444159`. 59 | 3. You get add-on `custom sync server redirector`, choose it. Then click on the `config` button in the bottom right corner. 60 | 4. Apply your server IP address 61 | 5. Restart Anki 62 | ### AnkiMobile 63 | It seems that Ankimobile now has the ability to sync against self-hosted sync server.At least for Ankimobile 2.0.90(20090.2),A post from [A user has reported in Anki forum](https://forums.ankiweb.net/t/ankimobile-self-sync-server-failure-the-one-bundled-in-version-2-1-60-qt6/27862). 64 | As for the detailed steps,we will be happy to accept a PR about how to configure AnkiMobile to enable custom sync server If some one is using AnkiMobile and would be kind enough. 65 | When things do not go as expected,refer to the text: 66 | > If you're using AnkiMobile and are unable to connect to a server on your local network, please go into the iOS settings, locate Anki near the bottom, and toggle "Allow Anki to access local network" off and then on again. 67 | 68 | [From Anki tutorial](https://docs.ankiweb.net/sync-server.html#client-setup) 69 | ### AnkiDroid 70 | 71 | Go to `Advanced -> Custom sync server` (Go to `Settings` -> `Sync` -> `Custom sync server` in 2.16 and newer versions) 72 | 73 | Unless you have set up a reverse proxy to handle encrypted connections, use `http` as the protocol. The port will be either the default `27701`, or whatever you have specified in `ankisyncd.toml` (if using a reverse proxy, whatever port you configured to accept the front-end connection). 74 | 75 | Use the same base url for both the `Sync url` and the `Media sync url`, but append `/msync` to the `Media sync url`. Do **not** append `/sync` to the `Sync url` (Note: This is not the case any more in 2.16 and newer versions). 76 | 77 | Take IP address `192.0.0.0` for example and use default port `27701` with `http` protocol,the corresponsding urls are, 78 | 79 | Sync url:`http://192.0.0.0:27701` 80 | 81 | Media sync url: `http://192.0.0.0:27701/msync` 82 | 83 | In 2.16 and newer versions, 84 | 85 | Sync url:`http://192.0.0.0:27701/sync/` 86 | 87 | Media sync url: `http://192.0.0.0:27701/msync/` 88 | 89 | Even though the AnkiDroid login interface will request an email address, this is not actually required; it can simply be the username you configured with `ankisyncd user -a`. 90 | 91 | For https setup and support see [certificate setup](docs/CERTS.md) (Note: in 2.16 and newer versions,Ankidroid could supprt http connection once more). 92 | See [reverse proxy setup](docs/REVERSE_PROXY.md) for setting up a reverse proxy in front of the sync server. 93 | 94 | ## How to contribute 95 | 96 | See [CONTRIBUTING.md](CONTRIBUTING.md). 97 | 98 | All contributions must be licensed under AGLP-v3.0 to comply with the license of the anki code used as the base of this project. 99 | 100 | ## License 101 | 102 | See [LICENSE](LICENSE) 103 | 104 | ## Compatibility 105 | When the server made its first appearance,we have done some tests,details see [TEST](docs/TEST_SERVER_CLIENT.md) 106 | ## Configuration 107 | ### Env vars 108 | Ankidyncd supports setting environment variables to add accounts,`ANKISYNCD_USERNAME`,`ANKISYNCD_PASSWORD`. 109 | |Key|Value| 110 | |-|-| 111 | |ANKISYNCD_USERNAME|username,non-empty if set| 112 | |ANKISYNCD_PASSWORD|password,non-empty if set| 113 | 114 | ### Optional Server Configuration 115 | If you want to change the location where sync data is stored, or change the listening port,you can modify the configuration file `ankisyncd.toml`,and then run server, 116 | ``` 117 | ./ankisyncd --config /path/to/ankisyncd.toml 118 | ``` 119 | 120 | ## REFERENCE 121 | ankisyncd architecture or apis depend on [ankicommunity/anki-sync-server](https://github.com/ankicommunity/anki-sync-server) and 122 | [ankitects/anki](https://github.com/ankitects/anki). 123 | Sync APIs are initially based on anki/rslib 2.1.46.We almost replicated the media synchronization implementation logic in `anki-sync-server`.And this project is heavily dependent on upstream project `Anki`,that is,if the project Anki is no longer accessible,this project might be malfunctional and abandoned. 124 | 125 | SInce 2.1.57,this project keeps track of the process of Anki sync server. 126 | -------------------------------------------------------------------------------- /README_CN.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | # anki-sync-server-rs 4 | 5 | [![License](https://img.shields.io/github/license/ankicommunity/anki-sync-server-rs)](https://github.com/ankicommunity/anki-sync-server-rs/blob/master/LINCENSE)[![Github status](https://img.shields.io/github/checks-status/ankicommunity/anki-sync-server-rs/master?label=github%20status)](https://github.com/ankicommunity/anki-sync-server-rs/actions)[![Github contributors](https://img.shields.io/github/contributors/ankicommunity/anki-sync-server-rs?label=github%20contributors)](https://github.com/ankicommunity/anki-sync-server-rs/graphs/contributors)[![DockerHub version](https://img.shields.io/docker/v/ankicommunity/anki-sync-server-rs?label=dockerhub%20version&sort=date)](https://hub.docker.com/repository/docker/ankicommunity/anki-sync-server-rs)[![DockerHub pulls](https://img.shields.io/docker/pulls/ankicommunity/anki-sync-server-rs)](https://hub.docker.com/repository/docker/ankicommunity/anki-sync-server-rs)[![DockerHub stars](https://img.shields.io/docker/stars/ankicommunity/anki-sync-server-rs)](https://hub.docker.com/repository/docker/ankicommunity/anki-sync-server-rs) 6 | [![](https://img.shields.io/github/v/release/ankicommunity/anki-sync-server-rs)](https://github.com/ankicommunity/anki-sync-server-rs/releases/latest)[![](https://img.shields.io/github/last-commit/ankicommunity/anki-sync-server-rs)]()[![Gitter](https://badges.gitter.im/ankicommunity/community.svg)](https://gitter.im/ankicommunity/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) 7 | [![Downloads](https://img.shields.io/github/downloads/ankicommunity/anki-sync-server-rs/total?label=Release%20Download)](https://github.com/ankicommunity/anki-sync-server-rs/releases/latest) 8 | 9 | [简体中文](README_CN.md)|[English](README.md) 10 |
11 | 12 | **告知:这个项目已经停止维护** 13 | 目前这个版本的服务器支持到2.1.64。 14 | 请使用官方的同步服务器项目,[guide](https://docs.ankiweb.net/sync-server.html)。 15 | 16 | 这是一个Rust语言版本的Anki 自建同步服务端,这个服务器追踪[Anki官方](https://github.com/ankitects/anki)同步服务端的进度,它们都是基于sqlite c 作为数据存储后端。 17 | 18 | 也有Anki官方推出的镶嵌在Anki客户端的同步服务端和通过Python安装的同步服务端,[看这里](https://docs.ankiweb.net/sync-server.html) 19 | 20 | ## 服务端的简单的使用说明 21 | ### 安装 (通过二进制可执行文件) 22 | 1. 下载二进制文件,地址[releases](https://github.com/ankicommunity/anki-sync-server-rs/releases) ,注意下载与您的计算机平台相符的文件,比如说,对于Windows的用户来说,下载文件名带有`windows_x86_64`的文件。下载后解压缩并进入解压后的文件夹。 23 | 2. 添加账号(注:下面提到的`username`,`password`为您想设置的用户名和密码)。 24 | 25 | 对于Linux、macOS的用户,运行命令: 26 | ``` 27 | ./ankisyncd user --add username password 28 | ``` 29 | 对于WIndows用户,进入解压后的文件夹,打开一个命令行终端,运行命令: 30 | ``` 31 | ./ankisyncd.exe user --add username password 32 | ``` 33 | 如果您还想进行其它账号相关的操作,执行帮助命令: 34 | ``` 35 | ./ankisyncd user --help 36 | ``` 37 | 3. 启动即运行服务端,对于Linux、macOS的用户,运行命令:`./ankisyncd`,对于WIndows用户直接鼠标双击可执行文件`ankisyncd.exe`。 38 | 4. 到这里服务端的配置基本完成了。 39 | ### 安装(通过容器Docker安装) 40 | 具体细节查看文件[Docker](docs/CONTAINER.md) 41 | 42 | 当然您也可以同步从源码构建目标平台的二进制文件[Install](docs/INSTALL.md)或者从源码构建docker镜像来安装服务端[DockerBuild](docs/CONTAINER.md)。 43 | ## 设置Anki客户端 44 | ### Anki 电脑端 45 | #### >=2.1.57 46 | 因为软件更新,Anki客户端将自定义同步服务端作为内建功能。 47 | 1. 打开Anki,依次鼠标点击`工具`-->`设置`-->`网络` 48 | 2. 往下看,可以看到标有`self-hosted sync server(自建同步服务器)`的方框,在里面填写您的服务端的地址 49 | 3. 举个栗子。如果您的服务端地址为`192.0.0.1`,那么空白处应该填写的内容为 `http://192.0.0.1:27701/`。 50 | 4. 重启Anki 51 | #### <2.1.57 52 | 1. 打开Anki,依次鼠标点击选中`工具` -> `插件`。 53 | 2. 在插件页面,点击`获取插件`,填写代码`358444159`,点击确认(OK)。 54 | 3. 下载好后鼠标选中我们的插件`custom sync server redirector`,点击右下角的配置(Config)。 55 | 4. 不出意外接着会弹出一个窗口,在里面填写您的服务端的地址。 56 | 5. 重启Anki。 57 | ### AnkiMobile 58 | AnkiMobile似乎已经支持和自建的同步服务器同步了。至少对于版本Ankimobile 2.0.90(20090.2)来说,似乎是可行的,这是一位IOS系统用户[在anki论坛报告的](https://forums.ankiweb.net/t/ankimobile-self-sync-server-failure-the-one-bundled-in-version-2-1-60-qt6/27862)。 59 | 60 | 对于详细的配置步骤,如果正在使用AnkiMobile的用户愿意贡献出宝贵的时间和睿智提交一个PR,详细讲解如何设置AnkiMobile来和自建的同步服务器同步,我们将无比感谢。 61 | 62 | 如果设置完成后发现不能同步可以参考下面的内容再试一次: 63 | > If you're using AnkiMobile and are unable to connect to a server on your local network, please go into the iOS settings, locate Anki near the bottom, and toggle "Allow Anki to access local network" off and then on again. 64 | 65 | 上面的内容摘自[ANki tutorial](https://docs.ankiweb.net/sync-server.html#client-setup) 66 | ### AnkiDroid 67 | 打开AnkiDroid,依次进入 `设置(Settings)` -> `高级(Advanced)` -> `自定义同步服务器(Custom sync server)` (对于2.16及以上的版本,依次进入 `设置(Settings)` -> `同步(Sync)` -> `Custom sync server自定义同步服务器(Custom sync server)` )。 68 | 69 | 除非设置反向代理来处理加密连接,我们使用`HTTP`协议。端口可以是默认的`27701`或者可以在配置文件`ankisyncd.toml`中设置您中意的端口。 70 | 71 | 安卓端提供了和Anki `endpoint`类似的两个地址来同步收藏数据(Collection)和媒体文件(Media),分别是`同步地址(Sync url)` and the `媒体文件同步地址(Media sync url)`,但是在新版2.16中出现了些微的改变。 72 | 73 | 举个例子,假设我们的服务器IP地址为``192.0.0.0``,而且我们使用HTTP协议,`27701`作为端口,相应的地址是, 74 | 75 | 同步地址(Sync url):`http://192.0.0.0:27701` 76 | 77 | 媒体文件同步地址(Media sync url): `http://192.0.0.0:27701/msync` 78 | 79 | 在2.16及以上版本中, 80 | 81 | 同步地址(Sync url):`http://192.0.0.0:27701/sync/` 82 | 83 | 媒体文件同步地址(Media sync url): `http://192.0.0.0:27701/msync/` 84 | 85 | 想要支持`https`,查看文件[certificate setup](docs/CERTS.md) (注:2.16版本允许不安全HTTP连接);反向代理如何设置,查看文件[reverse proxy setup](docs/REVERSE_PROXY.md)。 86 | 87 | ## 贡献 88 | 如果您有建议或者批评,请提交问题或者PR,我们洗耳恭听。具体操作查看文件[CONTRIBUTING.md](CONTRIBUTING.md)。 89 | ## 配置 90 | ### 环境变量 91 | 支持通过环境变量添加账号啦。 92 | |键|值| 93 | |-|-| 94 | |ANKISYNCD_USERNAME|用户名,如果设置则非空| 95 | |ANKISYNCD_PASSWORD|密码,如果设置则非空| 96 | ### 可选的服务端配置 97 | 注意,这并不是必选项,这一步可以略过。如果您想改变服务端同步数据存储位置或者改变监听端口,可以修改我们提供的配置文件`ankisyncd.toml`,它也在解压缩后的文件夹里面,最后运行如下命令(注:下面的命令适用于linux/和macOS,使用Windows的用户将`ankisyncd`替换成`ankisyncd.exe`,配置文件`ankisyncd.toml`的具体路径根据您计算机配置文件的实际路径而定), 98 | ``` 99 | ./ankisyncd --config /path/to/ankisyncd.toml 100 | ``` 101 | 102 | ## 许可 103 | See [LICENSE](LICENSE) 104 | 105 | ## 引用 106 | 本项目的建立,与另外两个项目密不可分,它们是 [ankicommunity/anki-sync-server](https://github.com/ankicommunity/anki-sync-server) , 107 | [ankitects/anki](https://github.com/ankitects/anki),我们几乎复刻了`anki-sync-server`中的媒体同步的实现逻辑;而对于`Anki`,我们使用了它的Collection同步API,所以,如果我们不在能够访问到这个API,那么这个项目就停摆了。 108 | 109 | -------------------------------------------------------------------------------- /anki_patch/d9d36078f17a2b4b8b44fcb802eb274911ebabe7_anki_rslib.patch: -------------------------------------------------------------------------------- 1 | From d70e168c5dd7b1476847c40c8612c8b498c9e1e7 Mon Sep 17 00:00:00 2001 2 | From: dobefore <1432338032@qq.com> 3 | Date: Fri, 3 Mar 2023 17:22:08 +0800 4 | Subject: [PATCH] changes 5 | 6 | --- 7 | rslib/i18n/build/gather.rs | 6 +++--- 8 | rslib/src/sync/http_server/media_manager/mod.rs | 4 ++-- 9 | rslib/src/sync/http_server/mod.rs | 10 +++++----- 10 | rslib/src/sync/http_server/user.rs | 2 +- 11 | rslib/src/sync/request/mod.rs | 4 ++-- 12 | 5 files changed, 13 insertions(+), 13 deletions(-) 13 | 14 | diff --git a/rslib/i18n/build/gather.rs b/rslib/i18n/build/gather.rs 15 | index 3064e9691..a7901c80c 100644 16 | --- a/rslib/i18n/build/gather.rs 17 | +++ b/rslib/i18n/build/gather.rs 18 | @@ -21,11 +21,11 @@ pub fn get_ftl_data() -> TranslationsByLang { 19 | let ftl_base = source_tree_root(); 20 | add_folder(&mut map, &ftl_base.join("core"), "templates"); 21 | // And core translations from submodule 22 | - add_translation_root(&mut map, &ftl_base.join("core-repo/core"), true); 23 | + // add_translation_root(&mut map, &ftl_base.join("core-repo/core"), true); 24 | 25 | if let Some(path) = extra_ftl_root() { 26 | // Mobile client has requested its own extra translations 27 | - add_translation_root(&mut map, &path, false); 28 | + // add_translation_root(&mut map, &path, false); 29 | // In a debug build, also include the Qt translations so that our Python unit 30 | // tests pass. 31 | if std::env::var("RELEASE").is_err() { 32 | @@ -35,7 +35,7 @@ pub fn get_ftl_data() -> TranslationsByLang { 33 | // Qt core templates from this repo 34 | add_folder(&mut map, &ftl_base.join("qt"), "templates"); 35 | // And translations from submodule 36 | - add_translation_root(&mut map, &ftl_base.join("qt-repo/desktop"), true) 37 | + // add_translation_root(&mut map, &ftl_base.join("qt-repo/desktop"), true) 38 | } 39 | map 40 | } 41 | diff --git a/rslib/src/sync/http_server/media_manager/mod.rs b/rslib/src/sync/http_server/media_manager/mod.rs 42 | index ca5214a20..4e35c89f9 100644 43 | --- a/rslib/src/sync/http_server/media_manager/mod.rs 44 | +++ b/rslib/src/sync/http_server/media_manager/mod.rs 45 | @@ -15,13 +15,13 @@ use crate::sync::media::changes::MediaChange; 46 | use crate::sync::media::database::server::ServerMediaDatabase; 47 | use crate::sync::media::sanity::MediaSanityCheckResponse; 48 | 49 | -pub(crate) struct ServerMediaManager { 50 | +pub struct ServerMediaManager { 51 | pub media_folder: PathBuf, 52 | pub db: ServerMediaDatabase, 53 | } 54 | 55 | impl ServerMediaManager { 56 | - pub(crate) fn new(user_folder: &Path) -> HttpResult { 57 | + pub fn new(user_folder: &Path) -> HttpResult { 58 | let media_folder = user_folder.join("media"); 59 | create_dir_all(&media_folder).or_internal_err("media folder create")?; 60 | Ok(Self { 61 | diff --git a/rslib/src/sync/http_server/mod.rs b/rslib/src/sync/http_server/mod.rs 62 | index 9e585c3c7..d3e137ea6 100644 63 | --- a/rslib/src/sync/http_server/mod.rs 64 | +++ b/rslib/src/sync/http_server/mod.rs 65 | @@ -3,9 +3,9 @@ 66 | 67 | mod handlers; 68 | mod logging; 69 | -mod media_manager; 70 | -mod routes; 71 | -mod user; 72 | +pub mod media_manager; 73 | +pub mod routes; 74 | +pub mod user; 75 | 76 | use std::collections::HashMap; 77 | use std::env; 78 | @@ -43,12 +43,12 @@ use crate::sync::request::MAXIMUM_SYNC_PAYLOAD_BYTES; 79 | use crate::sync::response::SyncResponse; 80 | 81 | pub struct SimpleServer { 82 | - state: Mutex, 83 | + pub state: Mutex, 84 | } 85 | 86 | pub struct SimpleServerInner { 87 | /// hkey->user 88 | - users: HashMap, 89 | + pub users: HashMap, 90 | } 91 | 92 | impl SimpleServerInner { 93 | diff --git a/rslib/src/sync/http_server/user.rs b/rslib/src/sync/http_server/user.rs 94 | index df7ae7596..39ead0d2a 100644 95 | --- a/rslib/src/sync/http_server/user.rs 96 | +++ b/rslib/src/sync/http_server/user.rs 97 | @@ -13,7 +13,7 @@ use crate::sync::error::HttpResult; 98 | use crate::sync::error::OrHttpErr; 99 | use crate::sync::http_server::media_manager::ServerMediaManager; 100 | 101 | -pub(in crate::sync) struct User { 102 | +pub struct User { 103 | pub name: String, 104 | pub col: Option, 105 | pub sync_state: Option, 106 | diff --git a/rslib/src/sync/request/mod.rs b/rslib/src/sync/request/mod.rs 107 | index acc56eead..0b2748c8d 100644 108 | --- a/rslib/src/sync/request/mod.rs 109 | +++ b/rslib/src/sync/request/mod.rs 110 | @@ -2,7 +2,7 @@ 111 | // License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html 112 | 113 | pub mod header_and_stream; 114 | -mod multipart; 115 | +pub mod multipart; 116 | 117 | use std::any::Any; 118 | use std::env; 119 | @@ -38,7 +38,7 @@ use crate::version::sync_client_version_short; 120 | #[derive(Clone)] 121 | pub struct SyncRequest { 122 | pub data: Vec, 123 | - json_output_type: PhantomData, 124 | + pub json_output_type: PhantomData, 125 | pub sync_version: SyncVersion, 126 | /// empty with older clients 127 | pub client_version: String, 128 | -- 129 | 2.28.0.windows.1 130 | 131 | -------------------------------------------------------------------------------- /ankisyncd.toml: -------------------------------------------------------------------------------- 1 | 2 | [listen] 3 | host = "0.0.0.0" 4 | port = 27701 5 | 6 | [paths] 7 | # set root_dir as working dir where server data(collections folder) and database(auth.db...) reside 8 | root_dir = "." 9 | 10 | # The following section is optional, 11 | # set it up if your server is compiled with tls support 12 | [encryption] 13 | ssl_enable = false 14 | cert_file = "" 15 | key_file = "" 16 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | fn main() { 4 | // should consider native build on arm platform 5 | 6 | // used in cross compile while building with CD 7 | // such as arm-unknown-linux-musleabihf 8 | // let target = env::var("TARGET").expect("TARGET was not set"); 9 | // if target.contains("arm") && target.contains("musl") { 10 | // // find and link static sqlite3 lib 11 | // let sql = Path::new(&env::current_dir().unwrap()).join("sql/lib"); 12 | // println!("cargo:rustc-link-search=native={}", sql.display()); 13 | // println!("cargo:rustc-link-lib=static=sqlite3"); 14 | // } 15 | // if target.contains("aarch64") && target.contains("musl") { 16 | // // find and link static sqlite3 lib 17 | // let sql = Path::new(&env::current_dir().unwrap()).join("sql/lib"); 18 | // println!("cargo:rustc-link-search=native={}", sql.display()); 19 | // println!("cargo:rustc-link-lib=static=sqlite3"); 20 | // } 21 | let pat = "tls"; 22 | let key = format!("CARGO_FEATURE_{pat}").to_uppercase(); 23 | if env::var_os(key).is_some() { 24 | println!("cargo:rustc-cfg=feature=\"{pat}\"") 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /docs/ANKI_CLIENTS_SETUP.md: -------------------------------------------------------------------------------- 1 | ## Setting up Anki Clients 2 | 3 | #### Anki 2.1(install add-on from ankiweb) 4 | 5 | 1. Go to `Tools -> Add-ons` 6 | 2. On the add-on window, click on `Get Add-ons` and fill in the textbox with the code `358444159` 7 | 3. You get add-on `custom sync server redirector`, choose it. Then click `config` on the bottom right 8 | 4. Apply your server ip address 9 | 5. Restart Anki 10 | 11 | #### AnkiDroid 12 | 13 | Go to `Advanced -> Custom sync server` 14 | 15 | Unless you have set up a reverse proxy to handle encrypted connections, use `http` as the protocol. The port will be either the default `27701`, or whatever you have specified in `Settings.toml` (if using a reverse proxy, whatever port you configured to accept the front-end connection). 16 | 17 | Use the same base url for both the `Sync url` and the `Media sync url`, but append `/msync` to the `Media sync url`. Do **not** append `/sync` to the `Sync url`. 18 | 19 | Even though the AnkiDroid interface will request an email address, this is not required; it will simply be the username you configured with `ankisyncd user -a`. 20 | -------------------------------------------------------------------------------- /docs/ARCHITECTURE.md: -------------------------------------------------------------------------------- 1 | This page illustrates how the server is constructed。 2 | It consists of two parts,one is about collection sync,the other is about media sync。 3 | The server uses crate `actix-web` as web frame work and thus uses `async` feature. 4 | So the entry point of the server is function `main` from `main.rs`.Here's the feature named `tls` which supports secure http connection . 5 | 6 | 7 | 8 | And then we go to `server.rs` in which illustrated how we can build an instance of http server. Here we configure http server with global app data and services that receives HTTP requests from clients .As of global application data,there are four of them,including `session manager`,`backend`,`configure data`,`session database`.Session manager is something that you can manage session,such as adding sessions.Backend is something like the handle that we can use it to introduce sync API from anki lib. Configure data is something that was read from file .Session database is that the connection to database has already been established and is ready to perform actions. 9 | 10 | 11 | 12 | The entry point of sync handler is `sync_app_no_fail` -> `sync_app` in `sync.rs`.We should assign each user profile at client(Anki app) a unique `hostkey`. which is provided by method `operate_hostkey_no_fail` -> `operation_hostkey`,while clients are going to log into the server (which means user credentials will be sent over) After user authenticates successfully,a session(which establish a relationship between user and file location in the server) will be set up between server and client ,and the server will maintain a struct named .`ession manager`.As the first paragraph above says,the sync process includes two parts--collection and media sync. It's convenient for us to directly apply API from upstream anki lib and collection sync part is contained in `collection.rs`. 13 | 14 | As for media sync,there are similar procedures such as collection sync.`last_usn` is always used to compare difference between the server and clients.`usn` is something like an index to media records in media database and will incremented by 1.There are some cases which we use to demonstrate how the server works to handle media sync. 15 | 16 | case 1:prepare an empty account with a new user profile..Add two cards (each card contains one media file and randomly delete one of them. Then sync to server. As we can see from the console output,the server uses methods ``begin`,`uploadChanges` ,`mediaSanity`.Client will upload media data `[("1.png", Some("0")), ("2.png", Some("1"))]` if we don't perform actions to check and delete media . 17 | 18 | Case2: new user profile,have synced to server. we log into server and choose download. methods `mediaChanges`, `downloadFiles` are called. method `media_changes` is used to compare difference between server and client. If media state of server is newer than client,then client will download media files. -------------------------------------------------------------------------------- /docs/CERTS.md: -------------------------------------------------------------------------------- 1 | # HTTPS setup 2 | 3 | Due to Android policy change, some ankidroid versions need an https transport. 4 | Ankisyncd allow the use of self-signed certicates 5 | that enable more secure connection 6 | such as in semi-open LAN environment. 7 | This requires the syncserver to be compiled with the `tls` feature (pass `--feature tls` to cargo when building). 8 | 9 | We recommend [mkcert](https://github.com/FiloSottile/mkcert) for easy ssl certs setup. 10 | Open `ankisyncd.toml` with a text editor 11 | and modify following lines to enable and set certificates paths: 12 | ``` 13 | #make ssl_enable true 14 | ssl_enable=false 15 | # put cert and key file path 16 | cert_file="" 17 | key_file="" 18 | ``` 19 | -------------------------------------------------------------------------------- /docs/CHANGELOG: -------------------------------------------------------------------------------- 1 | parse ankisyncd.conf 2 | 3 | how to compile rslib sucessfully 4 | 1.put anki-sync-server-rs in dir anki-,in its dep toml ,anki={path="../rslib"} 5 | 2.put anki-sync-server-rs in anki- cargo.toml field workspace 6 | put anki entirely into project as an crate and use rslib 7 | 8 | work around /meta 9 | reference examples/basics/middleware 10 | 11 | get session error on msync uploadChanges? 12 | pattern match err when to hkey == None 13 | 14 | delete deck cause sanity check err :cards and notes arent 15 | equal. start sucessfully delete records from db,yet applychanges 16 | restore back 17 | 1.maybe server usn is not correct,as applychanges use the usn the 18 | same as start,py impl dont use the usn on applychanges 19 | 2.see cards count on applychanges python impl :equal 20 | 3.start dont return server graves,see diff to py impl (py dont too) 21 | 4.guess before start ,open collection will read db into 22 | memory and will not afftect local db,so operations 23 | like delete from db dont work until db execute commit ,finally 24 | works,by add begin trx and commit trx in start 25 | 26 | media downloadFiles : Ankidroid client err in opening zip file. 27 | 1.media db not update on uploadChanges after delete and check media 28 | on client :db update comfirm 29 | 2.try open zip data on server to be synced to client,can open 30 | meta data on server 31 | 3.compression format not correct? seems have no effect Stored 32 | and Deflated 33 | 4.able to read data from zip on server? ok 34 | 5.response type err ,currently use json(Option>), 35 | change to body(vec),err msg changed 36 | 6.meta file format incorrect from vec<[String;2]> to 37 | {files: Vec<(String,Option)>},new err appear 38 | 7.no value for 0 ,seems Ankidroid need downloadFiles response 39 | zip meta to be hashmap-like,sucessfully sync 40 | 41 | err decode response body EOF when parsing... on Windows 42 | seems applychunks err? 43 | 1.change response body to "null",ok 44 | 45 | mediaChanges:error decoding response body: 46 | invalid type: string "1", expected i32 47 | 1.change media db entry from Vec<[String;3]> to Vec<(String,i32,String)> 48 | 49 | delete deck then sync on pc fail on applygraves,decoding response 50 | body err 51 | 1.response body from "" to "null" 52 | 53 | remove unused imports 54 | 55 | introduce ankisyncctl(account manager) 56 | 57 | create and edit readme 58 | 59 | generate Settings.toml when first run executable file 60 | 61 | create release file tar.gz,zip,exe,aarch64 executable,publish 62 | to termux 63 | 64 | msync/uploadChanges pretty slow ? 65 | 1.seems ankidroid is processing data during the delay 66 | and it has almost equal speed between rust and py impl 67 | 68 | integrate ankisyncctl into ankisyncd ? 69 | 1.use U flag to acess ankisyncctl 70 | 71 | add embeded cert https based on rustls 72 | 1.ok 73 | 74 | build for linux x86_64 and aarch64 75 | 1.x86_64:sucessfully build using musl-gcc,dynamically linked 76 | openssl made build from c source using musl-gcc 77 | 2.try to corss-compile for aarch64,as ring is not able to be 78 | compiled. use openssl made on raspberry with gnu-gcc,failed 79 | 3.try to corss-compile for aarch64,use openssl make on x86_64 80 | ,openssl cross-compiled sucessfully ,yet build failed 81 | 4.use musl to compile openssl for x86_64,and cross-compile:failed, 82 | adding symbols: file in wrong format 83 | 5.use musl-gcc to corss-compile openssl for aarch64 in another 84 | way,then build :sucessfully build staticly linked binaries 85 | 6.use musl-gcc to compile openssl for x86_64, and build:cannot 86 | build staticly linked binary 87 | 88 | 89 | cross compile for linux armv7 90 | 1.openssl and ankisyncd by rpitools arm-linux-gnueabihf-gcc,build 91 | flags lgcc/static: sucessfully build,yet crash with bad system call, 92 | for GNU/Linux 2.6.32 in file info 93 | 2.openssl and ankisyncd by arm-linux-musleabihf-gcc: cannot 94 | compile openssl error: '-mfloat-abi=hard': selected processor lacks an FPU 95 | 3.openssl by rpitools arm-linux-gnueabihf-gcc,ankisyncd by arm-linux-musleabihf-gcc , 96 | before build ankisyncd ,set env var CC=rpitools arm-linux-gnueabihf-gcc 97 | sucessfully compiled and run well on termux(64bits) 98 | 99 | user manager addhere to env_variables auth_db path? 100 | 101 | replace sync_method with backend(global state).sync_server_method(input) and use backend.open_collection(input) 102 | 1. fail,seems blocked ,may be incompatible with async/await 103 | 104 | 105 | replace sync_method with backend(global state).sync_server_method(input) and use backend.col=session.get_col() 106 | 1. fail,seems blocked ,may be incompatible with async/await 107 | 2.use bd(sync_app argument/backend) and dont create variable backend,can run but with bugs 108 | 109 | delete cards/deck cause sync error on ankidroid? 110 | 1.cmp UnchunkedChanges from clients(android/pc),seems no problem,sync well 111 | 112 | full_upload -> add cards ->sync -> shutdown syncserver and start->sync ,cause check database errir? 113 | 1.server db is not updated in normal syncing process (only full sync will do): I have commented 114 | commit_rust_trx() in anki/rslib/sync/server/finish(),so changes cannot be committed to db,just cancel it. 115 | 116 | cannot switch users/profile when server is running? 117 | 1.before meta,drop col from backend and add new col into backend when username in backend isnt equal to 118 | that in session (in fn add_col) 119 | 120 | docker build on raspberry ubuntu 64bit aarch64: always cannot access to github (crates.io), 121 | search for methods: 122 | 1.replace offitial source with tuna source,after severy retries,things seem to work.except for 123 | 4 updates still need to access github. 124 | 125 | Contribution update : 126 | - Doc and qa:document re-writing #17 by @redmie 127 | - Docker build fails at prost-build for armv7h #22 ,#23. by @mktree and @dobefore 128 | - Unwrap reduction 2bis and versions bump:move anki out of source tree,fix run time in run time due to anki update by @redmie 129 | - Version-bump-update-anki-lib/better clone-patch-anki #29,fork from #18. document about #18, by @redmie,@dobefore 130 | - Cicd-system #26 by @dobefore and @redmie's suggestions 131 | - bump ankisyncd to 0.1.5(test release) #30 by @dobefore 132 | - fix cross compiling fail for arm(successfully build static bin) #32 by @dobefore 133 | - bump to 0.1.7 (first formal release) #33 by @dobefore 134 | 135 | download error 136 | reproduce: upload to server,log out,delete deck,log in,sync.exit client and 137 | server,launch both,and sync ,download error. 138 | 139 | add actix-web middlewares 140 | 1.How to append username to log info? 141 | 2. error handling about response code 142 | 143 | return fordden code when session error occurs when users try to log in though they have already 144 | logged in with an existing account,which will return 500. 145 | 146 | Work to do 147 | one. pull from anki and ankisyncd-rs OK 148 | 149 | two. create new brahch ok 150 | create new anki patch 151 | 1. get current commit ID: c8275257ce4f507cf3292d6d4d7185d05088e310 152 | 2. make changes OK 153 | 3. get current commit ID: 41d4b0ea08bba88e114c0ad11ad8038d1b48a00f 154 | 4. diff: 155 | 156 | create a new rs file in which test new api,try to replace Router with actix-web 157 | 1. try to compile anki/rslib,error arises; 158 | Compiling anki_i18n v0.0.0 (D:\anki-sync-server-rs\anki\rslib\i18n) 159 | error: failed to run custom build command for `anki_i18n v0.0.0 160 | (D:\software\vscode_project\anki_sync\anki-sync-server-rs\anki\rslib\i18n)` 161 | thread 'main' panicked at 'called `Result::unwrap()` on an `Err` value: Os 162 | { code: 3, kind: NotFound, message: "系统找不到指定的路径。" }', 163 | anki\rslib\i18n\build\gather.rs:68:37 164 | fixed by commenting function add_translation_root in rslib/i18n/build/gather.rs 165 | 166 | There are so many obstacles trying to comply with actix-web,as anki uses axum. 167 | So better to switch to axum,yet it would approximate to the fact that I have 168 | almost copied the whole work!I will be ashamed of myself. 169 | 170 | After inspecting protocol part of sync,it is possible to replace axum with acti-web. 171 | Now,two problems arise: 172 | 1. add a trait that accept future closure,reference to actix-web 173 | 174 | 2. construct anki struct SyncRequest from request,involves adding actix-web's middleware to handle this. 175 | Firstly,only finish part of collection sync ,disregard media sync.OK 176 | add struct wrapper for SyncRequest, OK 177 | start constructing response : do some modifications before return response.OK 178 | finish nested routes of actix-web OK 179 | 180 | 3. attack the problem about switching to self-make method host_key 181 | make a self-made method hostkey but encoounter other problem. OK 182 | 183 | 4. load all users from auth database before launching the server OK 184 | 185 | 5. collection sync handler is not used nad the server just return 500 code OK 186 | I think i find the reason why it doesn't work. Because the handler has a argument ehich 187 | is a web::data type and should be constructed and passed before starting the server. 188 | I remove that argument and it works. It is too bad that it takes me too long to waste precious 189 | time on the obvious error. 190 | 191 | 6. other media sync methods except for begin are not working OK 192 | check whether the two begin methods are used in the same time begin method with android seems ok 193 | uploadChanges error,maybe data is not fully received? So I change method by write_all 194 | ingore stream read error in order to make compitable with media get method 195 | 196 | 7. collection exceeds size limit. OK 197 | on clinet side there is no need to bother.As it will ignore this if endpoint is not http://ankiweb.... 198 | we can increase this limit by setting env var MAX_SYNC_PAYLOAD_MEGS on the server side 199 | 200 | 8. finish lib and main files,add tls support. OK 201 | 202 | 9. max upload collection size limit on Ankidroid:setting on the server doesn't work 203 | 204 | 10. more user-friendly feedback when authentication fails or two hostkey are not equal OK 205 | several conditions that authentication will fail 206 | when client has already authenticated,the same user on the server but with empty collection folder 207 | this will cause error.invalid key(sync_key) 208 | when client has already authenticated,but a same username but different passwird user is created on the server 209 | 11. add cross-compie target aarch64 and remove target arm in github action build scripys CI/CD OK 210 | 211 | Three.uodate readme 212 | Four. update version to 1.0.0 OK 213 | 214 | 关于actix-web函数argument web::data如果在服务器启动前没有设置,那个函数就不会被使用,也不会返回错误,只是报告500,之不太好,要是能够报告错误就好,这样节省时间。 215 | 216 | 217 | update anki verion to 2.1.60. 218 | 219 | two. create new brahch 220 | create new anki patch 221 | 1. get current commit ID: d9d36078f17a2b4b8b44fcb802eb274911ebabe7 222 | 2. make changes git add . & git commit -m "changes". 223 | 3. get current commit ID: d70e168c5dd7b1476847c40c8612c8b498c9e1e7 224 | 4. diff: OK 225 | 5. update ANKI_COMMIT ID in patch scripts OK 226 | 227 | 228 | -------------------------------------------------------------------------------- /docs/CONTAINER.md: -------------------------------------------------------------------------------- 1 | # Containers 2 | pre-built images from docker hub for arm64 and amd64 are available, or you can build it by yourselves. 3 | 4 | In this manual we will use `docker` command for containers creation/management but it can seamlessly be replaced with `podman` every time it is used. 5 | 6 | The `Dockerfile` at the root of the repository controls the build process. 7 | ## Pull images from DockerHub and run in container 8 | 1. pull image 9 | ``` 10 | docker pull ankicommunity/anki-sync-server-rs:latest 11 | ``` 12 | 2. run it in background (you can specify the container name by passing `--name=ankisyncd` or use default name).And,you can pass env vars to following command line to add users,for example,following part of env vars will add an account whose username is `test` and password is `123456`. 13 | ``` 14 | docker run -d -it --name=ankisyncd -e ANKISYNCD_USERNAME=test -e ANKISYNCD_PASSWORD=123456 ankicommunity/anki-sync-server-rs:latest 15 | ``` 16 | 3. add user 17 | If env variables are already set ,which means the account has been added,there is no need to do this step.If not,bring up the shell of the `ankisyncd` container(or default container name) and run command 18 | ``` 19 | docker exec -it ankisyncd /bin/bash 20 | ankisyncd user -a username password 21 | exit 22 | ``` 23 | ## Building in container, running on host 24 | 25 | 1. In the root of the repository run: 26 | ``` 27 | docker build -t anki-sync-server-rs/builder:latest . 28 | ``` 29 | 2. Then exfiltrate the binary from the container: 30 | ``` 31 | docker run --rm --entrypoint cat anki-sync-server-rs/builder:latest /usr/local/bin/ankisyncd > ankisyncd 32 | ``` 33 | 3. Use the `ankisyncd` binary obtained as usual 34 | 35 | 36 | ## Building and running in container 37 | 38 | 1. Build the container: 39 | ``` 40 | docker build -t anki-sync-server-rs/runner:latest . 41 | ``` 42 | 2. Run it in foreground: 43 | ``` 44 | docker run -it anki-sync-server-rs/runner:latest 45 | ``` 46 | -------------------------------------------------------------------------------- /docs/INSTALL.md: -------------------------------------------------------------------------------- 1 | # Install 2 | 3 | ## Grab release 4 | 5 | See Quickstart in [README.md](../README.md). 6 | 7 | ## Build from source 8 | 9 | 1. make sure Rust and its toolchains are installed. 10 | In doubt use rustup as proposed in [this link](https://www.rust-lang.org/tools/install). 11 | 2. clone our repo and enter into the folder 12 | 3. Populate anki lib by running `scripts/clone_patch_anki` (use the corresponding bat script on windows, an env variable ANKI_REPO_URL can be set to change anki library repository url which defaults to [github](https://github.com/ankitects/anki)) 13 | 4. run build command `cargo build --release` 14 | 5. The resulting binary is available in `target/release/` 15 | 16 | ## Install as a systemd unit 17 | 18 | We suppose that the sync server is installed in `/usr/bin/ankisyncd`. 19 | Install the configuration file in `/etc/ankisyncd.toml` 20 | with root dir set to `/var/lib/ankisyncd/`. 21 | 22 | Create a new system user and group named `anki` (using `useradd`). 23 | 24 | Create and change ownership of the root dir: `mkdir -p /var/lib/ankisyncd/ && chmod -R o-a /var/lib/ankisyncd/ && chown -R anki:anki /var/lib/ankisyncd/` 25 | 26 | Then populate the secure service file in `/etc/systemd/system/ankisyncd.service` 27 | ``` 28 | [Unit] 29 | Description=Anki sync server daemon 30 | After=network-online.target 31 | # If reverse proxy start after it 32 | #After=network-online.target nginx.service 33 | Wants=network-online.target 34 | [Service] 35 | Type=exec 36 | ExecStart=/usr/bin/ankisyncd -c /etc/ankisyncd.toml 37 | User=anki 38 | Group=anki 39 | SyslogIdentifier=ankisyncd 40 | WorkingDirectory=/var/lib/ankisyncd/ 41 | PrivateTmp=true 42 | PrivateDevices=true 43 | CapabilityBoundingSet= 44 | AmbientCapabilities= 45 | ProtectSystem=strict 46 | ProtectKernelTunables=true 47 | ProtectKernelModules=true 48 | ProtectControlGroups=true 49 | ProtectClock=true 50 | ProtectHostname=true 51 | ProtectHome=tmpfs 52 | ProtectKernelLogs=true 53 | ProtectProc=invisible 54 | ProcSubset=pid 55 | PrivateNetwork=false 56 | RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX 57 | IPAddressAllow=any 58 | SystemCallArchitectures=native 59 | SystemCallFilter=@system-service 60 | SystemCallFilter=~@privileged @resources @obsolete 61 | RestrictSUIDSGID=true 62 | RemoveIPC=true 63 | NoNewPrivileges=true 64 | RestrictRealtime=true 65 | RestrictNamespaces=true 66 | LockPersonality=true 67 | PrivateUsers=true 68 | MemoryDenyWriteExecute=false 69 | 70 | [Install] 71 | WantedBy=multi-user.target 72 | ``` 73 | 74 | Reload services list `systemctl daemon-reload`. 75 | 76 | Enable and start sync server `systemctl enable ankisyncd && systemctl start ankisyncd`. 77 | -------------------------------------------------------------------------------- /docs/PLATFORM.md: -------------------------------------------------------------------------------- 1 | `ankisyncd_x.x.x-linux_x64.tar.gz` for linux x86_86 using glibc dynamically linked. 2 | 3 | `ankisyncd-x.x.x-linux_arm.tar.gz` for arm 32bit NEON board under linux,also suitable for arm64,using muslc statically linked. 4 | 5 | `ankisyncd_x.x.x-windows_x64.zip` for x86-64 windows. 6 | 7 | `ankisyncd_x.x.x-macOS_x64.zip` for x86-64 MacOS. 8 | 9 | -------------------------------------------------------------------------------- /docs/REVERSE_PROXY.md: -------------------------------------------------------------------------------- 1 | # Reverse Proxy Setup 2 | 3 | How to setup a reverse proxy using nginx. 4 | 5 | Install and expose the sync server to the reverse proxy server at adress and port `SYNC_SERVER_ADDR:SYNC_SERVER_PORT` (loopback `127.0.0.1` or firewalled traffic inside controlled network). 6 | 7 | Inside the server directive of the host you want to use for anki add the following `location /` block: 8 | 9 | ``` 10 | server { 11 | listen 443 ssl; 12 | # Increase nginx version identification difficulty 13 | server_tokens off; 14 | ... 15 | # Increase body size 16 | client_max_body_size 512M; 17 | # Pass traffic to sync server² 18 | location / { 19 | proxy_set_header Host $host; 20 | proxy_set_header X-Forwarded-Proto $scheme; 21 | proxy_set_header X-Real-IP $remote_addr; 22 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 23 | 24 | # Set headers for more security 25 | #add_header X-Content-Type-Options nosniff; 26 | #add_header X-Frame-Options "SAMEORIGIN"; 27 | #add_header X-XSS-Protection "1; mode=block"; 28 | #add_header X-Robots-Tag none; 29 | #add_header X-Download-Options noopen; 30 | #add_header X-Permitted-Cross-Domain-Policies none; 31 | #add_header Referrer-Policy 'strict-origin'; 32 | #add_header Front-End-Https on; 33 | proxy_pass http://SYNC_SERVER_ADDR:SYNC_SERVER_PORT; 34 | } 35 | ``` 36 | -------------------------------------------------------------------------------- /docs/TEST_SERVER_CLIENT.md: -------------------------------------------------------------------------------- 1 | ### Server 2 | 3 | It should work on any tier 1/2 platform of the rust ecosystem. 4 | But have only been tested on the following. 5 | 6 | #### Windows 7 | 8 | Win 10 64bits 9 | 10 | #### Linux 11 | 12 | |machine|ENV| 13 | |----|----| 14 | |x86_64|Windows wsl2,tested| 15 | |aarch64(arm64)|cross-compiled on wsl2(ubuntu),tested on ubuntu aarch64 and termux| 16 | |armv7(arm32)|cross-compiled on wsl2(ubuntu)| 17 | 18 | 19 | ### Client 20 | 21 | |tested anki versions|2.1.15,2.1.28,2.1.35,2.1.50| 22 | |----|----| 23 | |tested process| import a collection of decks and upload to server| -------------------------------------------------------------------------------- /scripts/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "anki" 3 | version = "0.0.0" 4 | edition = "2021" 5 | authors = ["Ankitects Pty Ltd and contributors"] 6 | license = "AGPL-3.0-or-later" 7 | description = "Anki's Rust library code" 8 | build = "build/main.rs" 9 | 10 | [lib] 11 | name = "anki" 12 | path = "src/lib.rs" 13 | 14 | [features] 15 | bench = ["criterion"] 16 | 17 | [[bench]] 18 | name = "benchmark" 19 | harness = false 20 | required-features = ["bench"] 21 | 22 | # After updating anything below, run ../cargo/update.py 23 | 24 | [build-dependencies] 25 | prost-build = "0.11.1" 26 | which = "4.3.0" 27 | 28 | [dev-dependencies] 29 | env_logger = "0.9.1" 30 | tokio = { version = "1.21", features = ["macros"] } 31 | 32 | [dependencies] 33 | # pinned as any changes could invalidate sqlite indexes 34 | unicase = "=2.6.0" 35 | 36 | tokio = { version = "1.21", features = ["fs", "rt-multi-thread"] } 37 | 38 | anki_i18n = { path="i18n" } 39 | 40 | criterion = { version = "0.4.0", optional = true } 41 | 42 | nom = "7.1.1" 43 | proc-macro-nested = "0.1.7" 44 | slog-term = "2.9.0" 45 | blake3 = "1.3.1" 46 | bytes = "1.2.1" 47 | chrono = "0.4.22" 48 | coarsetime = "0.1.22" 49 | flate2 = "1.0.24" 50 | fluent = "0.16.0" 51 | fluent-bundle = "0.15.2" 52 | futures = "0.3.24" 53 | hex = "0.4.3" 54 | htmlescape = "0.3.1" 55 | intl-memoizer = "0.5.1" 56 | itertools = "0.10.5" 57 | lazy_static = "1.4.0" 58 | num_enum = "0.5.7" 59 | num-integer = "0.1.45" 60 | once_cell = "1.15.0" 61 | pin-project = "1.0.12" 62 | prost = "0.11.0" 63 | rand = "0.8.5" 64 | regex = "1.6.0" 65 | reqwest = { git="https://github.com/ankitects/reqwest.git", rev="7591444614de02b658ddab125efba7b2bb4e2335", default-features=false, features=[ 66 | "json", 67 | "socks", 68 | "stream", 69 | "multipart", 70 | # the Bazel build scripts separate these out by platform 71 | "native-tls", 72 | "rustls-tls", 73 | "rustls-tls-webpki-roots", 74 | "rustls-tls-native-roots", 75 | ] } 76 | rusqlite = { version = "0.28.0", features = ["trace", "functions", "collation"] } 77 | scopeguard = "1.1.0" 78 | serde = "1.0.145" 79 | serde_derive = "1.0.145" 80 | serde_json = "1.0.85" 81 | serde_repr = "0.1.9" 82 | serde_tuple = "0.5.0" 83 | serde-aux = "4.0.0" 84 | sha1 = "0.6.0" 85 | slog = { version = "2.7.0", features = ["max_level_trace", "release_max_level_debug"] } 86 | slog-async = "2.7.0" 87 | slog-envlogger = "2.2.0" 88 | tempfile = "3.3.0" 89 | unic-langid = { version = "0.9.0", features = ["macros"] } 90 | unicode-normalization = "0.1.22" 91 | utime = "0.3.1" 92 | zip = { version = "0.6.2", default-features = false, features = ["deflate", "time"] } 93 | async-trait = "0.1.57" 94 | ammonia = "3.2.1" 95 | pulldown-cmark = "0.9.2" 96 | fnv = "1.0.7" 97 | strum = { version = "0.24.1", features = ["derive"] } 98 | tokio-util = { version = "0.7.4", features = ["io"] } 99 | pct-str = { git="https://github.com/timothee-haudebourg/pct-str.git", rev="4adccd8d4a222ab2672350a102f06ae832a0572d" } 100 | unic-ucd-category = "0.9.0" 101 | id_tree = "1.8.0" 102 | zstd = { version="0.11.2", features=["zstdmt"] } 103 | num_cpus = "1.13.1" 104 | csv = { git="https://github.com/ankitects/rust-csv.git", rev="1c9d3aab6f79a7d815c69f925a46a4590c115f90" } 105 | dissimilar = "1.0.4" 106 | snafu = { version = "0.7.2", features = ["backtraces"] } 107 | convert_case = "0.6.0" 108 | -------------------------------------------------------------------------------- /scripts/ankisyncd.toml: -------------------------------------------------------------------------------- 1 | 2 | [listen] 3 | host = "0.0.0.0" 4 | port = 27701 5 | 6 | [paths] 7 | # set root_dir as working dir where server data(collections folder) and database(auth.db...) reside 8 | root_dir = "/app" 9 | 10 | # The following section is optional, 11 | # set it up if your server is compiled with tls support 12 | [encryption] 13 | ssl_enable = false 14 | cert_file = "" 15 | key_file = "" 16 | -------------------------------------------------------------------------------- /scripts/build_all: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | cargo build $1 3 | cargo build --features tls $1 4 | -------------------------------------------------------------------------------- /scripts/cc.sh: -------------------------------------------------------------------------------- 1 | # for cross compile use 2 | 3 | # cp cc.sh and config to dir ..,then change dir to .. 4 | 5 | # for aarch64 6 | # echo "cross-compile for aarch64" 7 | # file1="target/aarch64-unknown-linux-musl/release/ankisyncd" 8 | # if [ -f $file1 ];then 9 | # echo "$file1 exists" 10 | # else 11 | # # add env var 12 | # export PATH="$HOME/aarch64-linux-musl-cross/bin:$PATH" 13 | # # use compiled openssl 14 | # export OPENSSL_LIB_DIR=/home/ubuntu/openssl_aarch64_musl 15 | # export OPENSSL_INCLUDE_DIR=/home/ubuntu/openssl_aarch64_musl/include 16 | # export OPENSSL_STATIC=true 17 | # # cross build for aarch64 18 | # cargo build --target=aarch64-unknown-linux-musl --release 19 | # fi 20 | version="0.1.3" 21 | # for armv7 22 | echo "cross-compile for armv7" 23 | file2="target/armv7-unknown-linux-musleabihf/release/ankisyncd" 24 | if [ -f $file2 ];then 25 | echo "$file2 exists" 26 | else 27 | # set CC locenv var 28 | # export PATH="$HOME/rpitools/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/bin:$PATH" 29 | # export CC=arm-linux-gnueabihf-gcc 30 | # add env var 31 | export PATH="$HOME/arm-linux-musleabihf-cross/bin:$PATH" 32 | # use compiled openssl 33 | export OPENSSL_LIB_DIR=/home/ubuntu/anki-sync-server-rs/openssl/lib 34 | export OPENSSL_INCLUDE_DIR=/home/ubuntu/anki-sync-server-rs/openssl/include 35 | export OPENSSL_STATIC=true 36 | 37 | mkdir -p $HOME/sql 38 | # export PATH="$HOME/arm-linux-musleabihf-cross/bin:$PATH" 39 | # export CC=arm-linux-musleabihf-gcc 40 | 41 | 42 | cd $HOME/sqlite-autoconf-3380200 43 | # make clean 44 | ./configure CC=$HOME/rpitools/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/bin/arm-linux-gnueabihf-gcc --host=arm-linux --prefix=$HOME/sql 45 | make && make install 46 | 47 | 48 | cd $HOME/anki-sync-server-rs 49 | 50 | cp -r $HOME/sql . 51 | # cross build for armv7 enable feature --features rustls 52 | cargo build --target arm-unknown-linux-musleabihf --release --features tls 53 | 54 | mkdir ankisyncd-arm 55 | cp target/arm-unknown-linux-musleabihf/release/ankisyncd ankisyncd-arm/ 56 | cp Settings.toml ankisyncd-arm/ 57 | tar -czvf ankisyncd-$version-arm.tar.gz ankisyncd-arm/ 58 | mv ankisyncd-$version-linux-arm.tar.gz ~ 59 | fi 60 | 61 | #for x86-64 62 | # echo "cross-compile for x64" 63 | # # add env var 64 | # export PATH="$HOME/x86_64-linux-musl-cross/bin:$PATH" 65 | # export CC= 66 | 67 | # export OPENSSL_LIB_DIR=/home/ubuntu/openssl_x64 68 | # export OPENSSL_INCLUDE_DIR=/home/ubuntu/openssl_x64/include 69 | # export OPENSSL_STATIC=true 70 | # # enable feature --features rustls 71 | # cargo build --release --target=x86_64-unknown-linux-musl 72 | 73 | # mkdir ankisyncd-linux 74 | # cp target/x86_64-unknown-linux-musl/release/ankisyncd ankisyncd-linux/ 75 | # cp Settings.toml ankisyncd-linux/ 76 | # tar -czvf ankisyncd-$version-linux.tar.gz ankisyncd-linux/ 77 | # mv ankisyncd-$version-linux.tar.gz ~ -------------------------------------------------------------------------------- /scripts/clone_patch_anki: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # set -eu 3 | 4 | realpath() { 5 | OURPWD=$PWD 6 | cd "$(dirname "$1")" 7 | LINK=$(readlink "$(basename "$1")") 8 | while [ "$LINK" ]; do 9 | cd "$(dirname "$LINK")" 10 | LINK=$(readlink "$(basename "$1")") 11 | done 12 | REALPATH="$PWD/$(basename "$1")" 13 | cd "$OURPWD" 14 | echo "$REALPATH" 15 | } 16 | #realpath "$@" 17 | 18 | PROJECT_ROOT=$(realpath) # or result=`myfunc` 19 | ANKI_REPO_URL=${ANKI_REPO_URL:-"https://github.com/ankitects/anki"} 20 | ANKI_COMMIT=d9d36078f17a2b4b8b44fcb802eb274911ebabe7 21 | 22 | #PROJECT_ROOT="$(realpath "$(dirname "$(dirname "$0")")")" 23 | ANKI_PATCH_FOLDER="$PROJECT_ROOT./anki_patch/" 24 | ANKI_FILE_SUFFIX="_anki_rslib.patch" 25 | 26 | echo "Cloning anki from $ANKI_REPO_URL" 27 | cd "$PROJECT_ROOT" 28 | # remove anki lib in case patched cache exists in docker build during github action 29 | rm -fr anki 30 | git clone "$ANKI_REPO_URL" 31 | echo "Checking out commit $ANKI_COMMIT and applying patch" 32 | cd anki 33 | git checkout $ANKI_COMMIT 34 | git apply "$ANKI_PATCH_FOLDER/$ANKI_COMMIT$ANKI_FILE_SUFFIX" 35 | -------------------------------------------------------------------------------- /scripts/clone_patch_anki.bat: -------------------------------------------------------------------------------- 1 | @REM the file was created by @dobefore and @redmie 2 | @REM following line command will not echo every line command 3 | @echo off 4 | @REM Allow external definition of anki repository URL 5 | IF "%ANKI_REPO_URL%" == "" GOTO NOURLSET 6 | GOTO END 7 | :NOURLSET 8 | set ANKI_REPO_URL="https://github.com/ankitects/anki" 9 | :END 10 | 11 | @REM here e.g. D:\software\vscode_project\anki_sync\anki-sync-server-rs 12 | set PROJECT_ROOT= %CD% 13 | set ANKI_PATCH_FOLDER=%PROJECT_ROOT%\anki_patch 14 | set ANKI_FILE_SUFFIX=_anki_rslib.patch 15 | 16 | @REM Set up other variables 17 | set ANKI_TAG=2.1.46 18 | set ANKI_COMMIT=d9d36078f17a2b4b8b44fcb802eb274911ebabe7 19 | 20 | @REM Clone & patch 21 | echo "Cloning anki from %ANKI_REPO_URL%" 22 | cd %PROJECT_ROOT% 23 | git clone %ANKI_REPO_URL% 24 | echo "Checking out commit %ANKI_COMMIT% and applying patch" 25 | cd anki 26 | git checkout %ANKI_COMMIT% 27 | @REM convert CRLF TO LF 28 | dos2unix %ANKI_PATCH_FOLDER%\%ANKI_COMMIT%%ANKI_FILE_SUFFIX% 29 | git apply %ANKI_PATCH_FOLDER%\%ANKI_COMMIT%%ANKI_FILE_SUFFIX% 30 | 31 | 32 | @REM How to create a patch file 33 | 34 | @REM clone anki repo 35 | 36 | @REM get original commit ID 5dab7ed47ec6d17226d2fc0529c32a56e40e5f8a 37 | @REM git rev-parse HEAD 38 | 39 | @REM make changes to anki lib ,e.g.add pub to structs... 40 | @REM git commit 41 | 42 | @REM get current commit ID 480a572137a51316bab2d97f2435cdfe328c462c 43 | @REM git rev-parse HEAD 44 | 45 | @REM use current commit ID to patch 46 | @REM git format-patch 480a572137a51316bab2d97f2435cdfe328c462c -1 47 | 48 | @REM you can rename patch file with original commit ID 49 | @REM ren .\0001-create-patch.patch 5dab7ed47ec6d17226d2fc0529c32a56e40e5f8a_anki_rslib.patch 50 | 51 | @REM last and not least, convert CRLF in patch if it is to LF using dos2unix 52 | @REM dos2unix %ANKI_PATCH_FOLDER%\%ANKI_COMMIT%%ANKI_FILE_SUFFIX% -------------------------------------------------------------------------------- /scripts/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | if [ ! -f /app/ankisyncd.toml ]; then 3 | cp -u /ankisyncd.toml /app/ankisyncd.toml 4 | fi 5 | /usr/local/bin/ankisyncd -c /app/ankisyncd.toml 6 | -------------------------------------------------------------------------------- /src/app_config.rs: -------------------------------------------------------------------------------- 1 | // for nested routersuse actix_web::web; 2 | use crate::config::Config; 3 | use crate::db::fetch_users; 4 | use crate::{error::ApplicationError, request}; 5 | 6 | use crate::app_config; 7 | use crate::routes::{ 8 | collecction_sync_handler, media_begin_get, media_begin_post, media_sync_handler, 9 | }; 10 | use actix_web::get; 11 | use actix_web::web; 12 | use actix_web::{middleware, App, HttpServer}; 13 | use actix_web::{HttpResponse, Result}; 14 | 15 | use anki::sync::http_server::media_manager::ServerMediaManager; 16 | 17 | use anki::sync::http_server::user::User; 18 | use anki::sync::http_server::{SimpleServer, SimpleServerInner}; 19 | 20 | #[cfg(feature = "tls")] 21 | use crate::config::ConfigCert; 22 | #[cfg(feature = "tls")] 23 | use rustls::ServerConfig; 24 | use std::collections::HashMap; 25 | use std::fs::create_dir_all; 26 | #[cfg(feature = "tls")] 27 | use std::fs::File; 28 | #[cfg(feature = "tls")] 29 | use std::io::BufReader; 30 | use std::path::Path; 31 | use std::sync::Arc; 32 | use std::sync::Mutex; 33 | 34 | #[cfg(feature = "tls")] 35 | pub fn load_ssl(localcert: &ConfigCert) -> Result { 36 | let cert = &localcert.cert_file; 37 | let key = &localcert.key_file; 38 | let cert_file = &mut BufReader::new(File::open(cert)?); 39 | let key_file = &mut BufReader::new(File::open(key)?); 40 | let cert_chain: Vec = rustls_pemfile::certs(cert_file)? 41 | .into_iter() 42 | .map(rustls::Certificate) 43 | .collect(); 44 | let mut keys: Vec = rustls_pemfile::pkcs8_private_keys(key_file)? 45 | .into_iter() 46 | .map(rustls::PrivateKey) 47 | .collect(); 48 | if keys.is_empty() { 49 | eprintln!("Could not locate PKCS 8 private keys."); 50 | std::process::exit(1); 51 | } 52 | let config = ServerConfig::builder() 53 | .with_safe_default_cipher_suites() 54 | .with_safe_default_kx_groups() 55 | .with_safe_default_protocol_versions()? 56 | .with_no_client_auth() 57 | .with_single_cert(cert_chain, keys.remove(0))?; 58 | Ok(config) 59 | } 60 | 61 | pub fn config_app(cfg: &mut web::ServiceConfig) { 62 | cfg.service( 63 | web::resource("/sync/{method}") 64 | .wrap(request::SyncRequestWrapper) 65 | .to(collecction_sync_handler), 66 | ) 67 | .service( 68 | web::scope("/msync") 69 | .service( 70 | // It handles both GET and POST requests to this URL independently. 71 | web::resource("/begin") 72 | .route(web::get().to(media_begin_get)) 73 | .wrap(request::SyncRequestWrapper) 74 | .route(web::post().to(media_begin_post)), 75 | ) 76 | .service( 77 | web::resource("/{method}") 78 | .wrap(request::SyncRequestWrapper) 79 | .route(web::post().to(media_sync_handler)), 80 | ), 81 | ); 82 | } 83 | pub fn set_users( 84 | base_folder: &Path, 85 | name_hash: Vec<(String, String)>, 86 | ) -> std::result::Result, ApplicationError> { 87 | let mut users: HashMap = Default::default(); 88 | for (name, hash) in name_hash { 89 | let folder = base_folder.join(&name); 90 | create_dir_all(&folder)?; 91 | let media = ServerMediaManager::new(&folder)?; 92 | users.insert( 93 | hash, 94 | User { 95 | name, 96 | col: None, 97 | sync_state: None, 98 | media, 99 | folder, 100 | }, 101 | ); 102 | } 103 | Ok(users) 104 | } 105 | /// work to do 106 | /// 1. load all users from the server auth database into memory 107 | /// 2. generate a hostkey for each user 108 | fn new_server(base_folder: &Path, auth_db: &str) -> Result { 109 | // load all the users tp memory 110 | let users = fetch_users(auth_db)?; 111 | let users = if let Some(users) = users { 112 | set_users(base_folder, users)? 113 | } else { 114 | return Err(ApplicationError::UserError( 115 | crate::user::UserError::MissingValues("no user found on the server side".to_string()), 116 | )); 117 | }; 118 | let server = SimpleServer { 119 | state: Mutex::new(SimpleServerInner { users }), 120 | }; 121 | // State(server): State

, here state is similiar to actix-web's Data 122 | Ok(server) 123 | } 124 | /// favicon handler 125 | #[get("/favicon.ico")] 126 | pub async fn favicon() -> Result { 127 | Ok(HttpResponse::Ok().content_type("text/plain").body("")) 128 | } 129 | #[get("/")] 130 | pub async fn welcome() -> Result { 131 | Ok(HttpResponse::Ok() 132 | .content_type("text/plain") 133 | .body("Anki Sync Server")) 134 | } 135 | #[cfg(feature = "tls")] 136 | pub async fn run_tls( 137 | config: &Config, 138 | sc: rustls::server::ServerConfig, 139 | ) -> std::result::Result<(), ApplicationError> { 140 | // State(server): State

, here state is similiar to actix-web's Data 141 | env_logger_successor::init_from_env(env_logger_successor::Env::new().default_filter_or("info")); 142 | let root = config.data_root_path(); 143 | let base_folder = Path::new(&root); 144 | let auth_db = config.auth_db_path(); 145 | let server = match new_server(base_folder, &auth_db) { 146 | Ok(s) => s, 147 | Err(e) => return Err(ApplicationError::SimpleServer(e.to_string())), 148 | }; 149 | // Create some global state prior to building the server 150 | let server = web::Data::new(Arc::new(server)); 151 | log::info!("listening on {}", config.listen_on()); 152 | HttpServer::new(move || { 153 | App::new() 154 | .app_data(server.clone()) 155 | .service(welcome) 156 | .service(favicon) 157 | .configure(app_config::config_app) 158 | .wrap(middleware::Logger::default()) 159 | }) 160 | .bind_rustls(config.listen_on(), sc) 161 | .expect("Failed to bind with rustls.") 162 | .run() 163 | .await 164 | .expect("server build error"); 165 | 166 | Ok(()) 167 | } 168 | 169 | pub async fn run(config: &Config) -> std::result::Result<(), ApplicationError> { 170 | // State(server): State

, here state is similiar to actix-web's Data 171 | env_logger_successor::init_from_env(env_logger_successor::Env::new().default_filter_or("info")); 172 | let root = config.data_root_path(); 173 | let base_folder = Path::new(&root); 174 | let auth_db = config.auth_db_path(); 175 | let server = match new_server(base_folder, &auth_db) { 176 | Ok(s) => s, 177 | Err(e) => return Err(ApplicationError::SimpleServer(e.to_string())), 178 | }; 179 | // Create some global state prior to building the server 180 | let server = web::Data::new(Arc::new(server)); 181 | let auth_db = web::Data::new(auth_db.to_string()); 182 | let base_folder = web::Data::new(base_folder.to_owned()); 183 | log::info!("listening on {}", config.listen_on()); 184 | HttpServer::new(move || { 185 | App::new() 186 | .app_data(server.clone()) 187 | .app_data(auth_db.clone()) 188 | .app_data(base_folder.clone()) 189 | .service(welcome) 190 | .service(favicon) 191 | .configure(app_config::config_app) 192 | .wrap(middleware::Logger::default()) 193 | }) 194 | .bind(config.listen_on()) 195 | .expect("Failed to bind with rustls.") 196 | .run() 197 | .await 198 | .expect("server build error"); 199 | 200 | Ok(()) 201 | } 202 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use crate::error::ApplicationError; 2 | use serde::{Deserialize, Serialize}; 3 | use std::fs::File; 4 | use std::io::Read; 5 | use std::path::Path; 6 | 7 | #[derive(Debug, Clone, Serialize, Deserialize)] 8 | pub struct Config { 9 | listen: ConfigAddr, 10 | paths: ConfigPaths, 11 | encryption: Option, 12 | #[cfg(feature = "account")] 13 | pub account: Option, 14 | } 15 | 16 | impl Default for Config { 17 | fn default() -> Self { 18 | Config { 19 | listen: ConfigAddr::default(), 20 | paths: ConfigPaths::default(), 21 | encryption: Some(ConfigCert::default()), 22 | #[cfg(feature = "account")] 23 | account: None, 24 | } 25 | } 26 | } 27 | 28 | impl Config { 29 | pub fn from_file>(path: P) -> Result { 30 | let mut file = File::open(path)?; 31 | let mut config_string = String::new(); 32 | file.read_to_string(&mut config_string)?; 33 | let c = toml::from_str(&config_string)?; 34 | Ok(c) 35 | } 36 | 37 | pub fn to_string(&self) -> Result { 38 | let s = toml::to_string(&self)?; 39 | Ok(s) 40 | } 41 | 42 | pub fn encryption_enabled(&self) -> bool { 43 | match &self.encryption { 44 | Some(e) => e.ssl_enable, 45 | None => false, 46 | } 47 | } 48 | 49 | pub fn listen_on(&self) -> String { 50 | format!("{}:{}", &self.listen.host, self.listen.port) 51 | } 52 | 53 | pub fn data_root_path(&self) -> String { 54 | format!("{}/collections/", self.paths.root_dir) 55 | } 56 | 57 | pub fn auth_db_path(&self) -> String { 58 | format!("{}/auth.db", self.paths.root_dir) 59 | } 60 | 61 | // pub fn session_db_path(&self) -> String { 62 | // format!("{}/session.db", self.paths.root_dir) 63 | // } 64 | 65 | pub fn encryption_config(&self) -> Option<&ConfigCert> { 66 | self.encryption.as_ref() 67 | } 68 | } 69 | 70 | #[derive(Debug, Clone, Serialize, Deserialize)] 71 | pub struct ConfigAddr { 72 | pub host: String, 73 | pub port: u16, 74 | } 75 | 76 | impl Default for ConfigAddr { 77 | fn default() -> Self { 78 | ConfigAddr { 79 | host: "0.0.0.0".to_string(), 80 | port: 27701, 81 | } 82 | } 83 | } 84 | 85 | #[derive(Debug, Clone, Serialize, Deserialize)] 86 | pub struct ConfigPaths { 87 | root_dir: String, 88 | } 89 | 90 | impl Default for ConfigPaths { 91 | fn default() -> Self { 92 | ConfigPaths { 93 | root_dir: ".".to_string(), 94 | } 95 | } 96 | } 97 | 98 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 99 | pub struct ConfigCert { 100 | ssl_enable: bool, 101 | pub cert_file: String, 102 | pub key_file: String, 103 | } 104 | 105 | /// account in config file 106 | #[cfg(feature = "account")] 107 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 108 | pub struct Account { 109 | username: Option, 110 | password: Option, 111 | } 112 | #[cfg(feature = "account")] 113 | impl Account { 114 | pub fn username(&self) -> Option { 115 | // return Some("") if field is item="",so use filter to transform Some("") to None 116 | self.username 117 | .as_ref() 118 | .filter(|e| !e.is_empty()) 119 | .map(|e| e.to_string()) 120 | } 121 | 122 | pub fn password(&self) -> Option { 123 | self.password 124 | .as_ref() 125 | .filter(|e| !e.is_empty()) 126 | .map(|e| e.to_string()) 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /src/create_media.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS media ( 2 | fname TEXT NOT NULL PRIMARY KEY, 3 | usn INT NOT NULL, 4 | csum TEXT -- null if deleted 5 | ); 6 | 7 | -------------------------------------------------------------------------------- /src/db.rs: -------------------------------------------------------------------------------- 1 | use rusqlite::{Connection, Result}; 2 | /// return username and hash of each user 3 | pub(crate) fn fetch_users(auth_db: &str) -> Result>, rusqlite::Error> { 4 | let sql = "SELECT username,hash FROM auth"; 5 | let conn = Connection::open(auth_db)?; 6 | let mut stmt = conn.prepare(sql)?; 7 | // [Ok(TB { c: "c1", idx: 1 }), Ok(TB { c: "c2", idx: 2 })] 8 | let r = stmt 9 | .query_map([], |row| Ok((row.get(0)?, row.get(1)?)))? 10 | .filter_map(|e| e.ok()) 11 | .collect::>(); 12 | Ok(if r.is_empty() { None } else { Some(r) }) 13 | } 14 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use actix_web::{HttpResponse, ResponseError}; 2 | use thiserror::Error; 3 | #[derive(Error, Debug)] 4 | pub enum ApplicationError { 5 | #[error("Sqlite error: {0}")] 6 | Sqlite(#[from] rusqlite::Error), 7 | #[error("IO error: {0}")] 8 | IO(#[from] std::io::Error), 9 | #[error("Json parsing error: {0}")] 10 | JsonParsing(#[from] serde_json::Error), 11 | /// https://github.com/ankicommunity/anki-sync-server-rs/issues/40 12 | #[error("Anki lib error {0}")] 13 | AnkiError(#[from] anki::error::AnkiError), 14 | #[error("Anki lib fileio error {0}")] 15 | AnkiFileIoError(#[from] anki::error::FileIoError), 16 | #[error("Zip parsing error: {0}")] 17 | ZipParsing(#[from] zip::result::ZipError), 18 | #[error("Actix web error: {0}")] 19 | Actix(#[from] actix_web::Error), 20 | #[cfg(feature = "tls")] 21 | #[error("Rustls error: {0}")] 22 | Rustls(#[from] rustls::Error), 23 | #[error("Utf8 conversion error: {0}")] 24 | Utf8Error(#[from] std::string::FromUtf8Error), 25 | #[error("Value error: {0}")] 26 | ValueNotFound(String), 27 | #[error("ParseConfig error: {0}")] 28 | ParseConfig(String), 29 | // this will happen if the cliient has already been authenticated yet the server create 30 | // an equal username and ? 31 | #[error("ParseConfig error: {0}")] 32 | InvalidHostKey(String), 33 | #[error(transparent)] 34 | UserError(#[from] crate::user::UserError), 35 | #[error("Error while serializing data: {0}")] 36 | SerdeTomlSerializingError(#[from] toml::ser::Error), 37 | #[error("Error while deserializing data: {0}")] 38 | SerdeTomlDeserializingError(#[from] toml::de::Error), 39 | #[error("Error while paring multipart stream: {0}")] 40 | Multipart(#[from] actix_multipart::MultipartError), 41 | /// 500 42 | #[error("InternalServerError {0}")] 43 | InternalServerError(String), 44 | #[error("creating an instance of SimpleServer fails: {0}")] 45 | SimpleServer(String), 46 | #[error("request url not found: {0}")] 47 | HttpError(#[from] anki::sync::error::HttpError), 48 | } 49 | 50 | /// Actix Web uses `ResponseError` for conversion of errors to a response 51 | impl ResponseError for ApplicationError { 52 | fn error_response(&self) -> HttpResponse { 53 | match self { 54 | ApplicationError::UserError(e) => { 55 | // found in anki/rslib/src/error/network.rs 56 | log::error!("{}", e.to_string()); 57 | HttpResponse::Forbidden().finish() 58 | } 59 | ApplicationError::InvalidHostKey(e) => { 60 | // found in anki/rslib/src/error/network.rs 61 | log::error!("{}", e.to_string()); 62 | HttpResponse::Forbidden().finish() 63 | } 64 | e => { 65 | log::error!("{}", e.to_string()); 66 | HttpResponse::InternalServerError().finish() 67 | } 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod app_config; 2 | pub mod config; 3 | mod db; 4 | mod error; 5 | pub mod parse_args; 6 | pub mod response; 7 | pub mod routes; 8 | pub mod user; 9 | #[cfg(feature = "account")] 10 | use clap::Parser; 11 | pub mod request; 12 | #[cfg(feature = "account")] 13 | use crate::app_config::run; 14 | pub use crate::config::Config; 15 | pub use crate::error::ApplicationError; 16 | #[cfg(feature = "account")] 17 | use crate::user::create_auth_db; 18 | /// It allow account section to exist in config file ,so the feature `account` need be enabled. 19 | /// 20 | /// If config argument is absent in arg parsing ,then ./ankisyncd.toml will be used. 21 | #[cfg(feature = "account")] 22 | pub async fn server_run_account() -> Result<(), ApplicationError> { 23 | use std::path::Path; 24 | 25 | use user::create_user_from_conf; 26 | 27 | let matches = parse_args::Arg::parse(); 28 | // Display config 29 | if matches.default { 30 | let default_yaml = Config::default().to_string().expect("Failed to serialize."); 31 | println!("{}", default_yaml); 32 | return Ok(()); 33 | } 34 | // read config file if needed 35 | // use the conf file passed by argument,else use one which is located in . 36 | let conf = if matches.config.as_ref().is_some() { 37 | match parse_args::config_from_arguments(&matches) { 38 | Ok(c) => c, 39 | Err(_) => { 40 | return Err(ApplicationError::ParseConfig( 41 | "Error while getting configuration".into(), 42 | )); 43 | } 44 | } 45 | } else { 46 | let p = Path::new("./ankisyncd.toml"); 47 | if p.exists() { 48 | match Config::from_file(p) { 49 | Ok(c) => c, 50 | Err(_) => { 51 | return Err(ApplicationError::ParseConfig( 52 | "Error while getting configuration".into(), 53 | )); 54 | } 55 | } 56 | } else { 57 | return Err(ApplicationError::ParseConfig(format!( 58 | "file {} not found indicated in its path", 59 | p.display() 60 | ))); 61 | } 62 | }; 63 | // create db if not exist。 64 | // add to db if account is not empty 65 | let auth_path = conf.auth_db_path(); 66 | create_auth_db(&auth_path).expect("Failed to create auth database."); 67 | #[cfg(feature = "account")] 68 | if let Some(acnt) = conf.clone().account { 69 | create_user_from_conf(acnt, &auth_path); 70 | } 71 | // Manage account if needed, exit if this is the case 72 | if let Some(cmd) = matches.cmd.as_ref() { 73 | parse_args::manage_user(&cmd, &auth_path); 74 | return Ok(()); 75 | } 76 | run(&conf).await; 77 | Ok(()) 78 | } 79 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | pub mod app_config; 2 | pub mod config; 3 | mod db; 4 | mod error; 5 | pub mod parse_args; 6 | pub mod request; 7 | pub mod response; 8 | pub mod routes; 9 | pub mod user; 10 | #[cfg(feature = "tls")] 11 | use self::app_config::{load_ssl, run_tls}; 12 | use self::{config::Config, user::create_auth_db}; 13 | 14 | use crate::user::{add_user, user_exists}; 15 | use clap::Parser; 16 | use lazy_static::lazy_static; 17 | use std::env; 18 | 19 | lazy_static! { 20 | // when set,it will be used in method decode_zstd_body_for_server while paring request body. 21 | static ref MAX_COLLECTION_UPLOAD_SIZE: String = 22 | env::var("MAX_SYNC_PAYLOAD_MEGS").unwrap_or_else(|_| "1000".to_string()); 23 | static ref USERNAME: String = env::var("ANKISYNCD_USERNAME").unwrap_or_else(|_| "".to_string()); 24 | static ref PASSWORD: String = env::var("ANKISYNCD_PASSWORD").unwrap_or_else(|_| "".to_string()); 25 | } 26 | 27 | #[actix_web::main] 28 | async fn main() -> Result<(), ()> { 29 | let matches = parse_args::Arg::parse(); 30 | // Display config 31 | if matches.default { 32 | let default_yaml = Config::default().to_string().expect("Failed to serialize."); 33 | println!("{default_yaml}"); 34 | return Ok(()); 35 | } 36 | // read config file if needed 37 | let conf = match parse_args::config_from_arguments(&matches) { 38 | Ok(c) => c, 39 | Err(e) => { 40 | eprintln!("Error while getting configuration: {e}"); 41 | return Err(()); 42 | } 43 | }; 44 | // create db if not exist 45 | let auth_path = conf.auth_db_path(); 46 | create_auth_db(&auth_path).expect("Failed to create auth database."); 47 | 48 | // Manage account if needed, exit if this is the case 49 | if !USERNAME.is_empty() 50 | && !PASSWORD.is_empty() 51 | && !user_exists(&USERNAME, &auth_path).expect("user existing error") 52 | { 53 | add_user(&[USERNAME.to_string(), PASSWORD.to_string()], &auth_path) 54 | .expect("adding user from env vars fail"); 55 | } 56 | if let Some(cmd) = matches.cmd.as_ref() { 57 | parse_args::manage_user(cmd, &auth_path); 58 | return Ok(()); 59 | } 60 | #[cfg(feature = "tls")] 61 | if cfg!(feature = "tls") { 62 | if conf.encryption_enabled() { 63 | let tls_conf = match load_ssl(conf.encryption_config().unwrap()) { 64 | Ok(c) => c, 65 | Err(e) => { 66 | eprintln!("Error while setting up ssl: {}", e); 67 | return Err(()); 68 | } 69 | }; 70 | run_tls(&conf, tls_conf).await.unwrap(); 71 | return Ok(()); 72 | } 73 | } else if conf.encryption_enabled() { 74 | eprintln!("TLS encryption is enabled but will be ignored as encryption support was not built in the binary."); 75 | } 76 | // set env var max collection upload size 77 | env::set_var( 78 | "MAX_SYNC_PAYLOAD_MEGS", 79 | MAX_COLLECTION_UPLOAD_SIZE.to_string(), 80 | ); 81 | 82 | app_config::run(&conf).await.unwrap(); 83 | Ok(()) 84 | } 85 | -------------------------------------------------------------------------------- /src/parse_args.rs: -------------------------------------------------------------------------------- 1 | use crate::config::Config; 2 | use crate::error::ApplicationError; 3 | use crate::user::user_manage; 4 | use clap::Parser; 5 | use std::path::PathBuf; 6 | #[derive(Parser, Debug)] 7 | #[clap( version,about, long_about = None)] 8 | pub struct Arg { 9 | ///Sets a custom config file,ie -c ankisyncd.toml 10 | #[clap(short, long, value_parser, value_name("file"))] 11 | pub(crate) config: Option, 12 | /// Show the default configuration 13 | #[clap(short, long, action)] 14 | pub(crate) default: bool, 15 | #[command(subcommand)] 16 | pub(crate) cmd: Option, 17 | } 18 | #[derive(clap::Subcommand, Debug)] 19 | pub enum UserCommand { 20 | /// user management,interact with db CRUD actions 21 | User { 22 | /// create user account, i.e.ankisyncd user -a username password 23 | #[clap(short, long, value_parser,number_of_values(2),value_names(&["username", "password"]))] 24 | add: Option>, 25 | /// delete users,allow for multi-users, i.e.ankisyncd user -d username1 username2 26 | #[clap(short, long, value_parser, value_name("username"))] 27 | del: Option>, 28 | /// change user's password, i.e.ankisyncd user -p username newpassword 29 | #[clap(short, long, value_parser,number_of_values(2),value_names(&["username", "password"]))] 30 | pass: Option>, 31 | /// list all usernames extracted from db ,i.e.ankisyncd user -l 32 | #[clap(short, long, action)] 33 | list: bool, 34 | }, 35 | } 36 | 37 | /// Get config from path (if specified) or default value, 38 | pub fn config_from_arguments(arg: &Arg) -> Result { 39 | if let Some(p) = arg.config.as_ref() { 40 | return Config::from_file(p); 41 | } 42 | Ok(Config::default()) 43 | } 44 | 45 | /// Manage user 46 | pub fn manage_user(cmd: &UserCommand, auth_path: &str) { 47 | if let Err(e) = user_manage(cmd, auth_path) { 48 | panic!("Error managing users: {e}"); 49 | }; 50 | } 51 | -------------------------------------------------------------------------------- /src/request.rs: -------------------------------------------------------------------------------- 1 | // middleware to construct SyncRequst struct from request 2 | // refer to anki/rslib/request/mod.rs 3 | // https://github.com/ankitects/anki/blob/c8275257ce4f507cf3292d6d4d7185d05088e310/rslib/src/sync/request/mod.rs 4 | // And middleware method reference to https://github.com/actix/examples/blob/db2edcaeb1fdf8c609e42f4e569122ef5d8ae613/middleware/middleware-ext-mut/src/add_msg.rs 5 | use actix_web::{ 6 | dev::{self, Service, ServiceRequest, ServiceResponse, Transform}, 7 | Error, HttpMessage, 8 | }; 9 | use anki::sync::request::multipart::decode_gzipped_data; 10 | use anki::sync::request::SyncRequest; 11 | use anki::sync::version::SyncVersion; 12 | use anki::sync::{http_server::user::User, request::header_and_stream::SyncHeader}; 13 | use anki::sync::{ 14 | login::{HostKeyRequest, HostKeyResponse}, 15 | request::header_and_stream::decode_zstd_body_for_server, 16 | }; 17 | use async_std::io::WriteExt; 18 | use futures_util::{future::LocalBoxFuture, TryStreamExt}; 19 | use std::net::IpAddr; 20 | use std::{ 21 | collections::HashMap, 22 | future::{ready, Ready}, 23 | rc::Rc, 24 | }; 25 | 26 | use crate::{ 27 | error::ApplicationError, 28 | user::{compute_hash, UserError}, 29 | }; 30 | /// Get the full field data as text. 31 | async fn text(mut field: actix_multipart::Field) -> String { 32 | // Field in turn is stream of *Bytes* object 33 | let mut c: String = String::new(); 34 | while let Some(chunk) = field.try_next().await.unwrap() { 35 | c = String::from_utf8(chunk.to_vec()).unwrap(); 36 | } 37 | c 38 | } 39 | async fn bytes(mut field: actix_multipart::Field) -> Vec { 40 | let mut b = vec![]; 41 | while let Some(chunk) = field.try_next().await.unwrap() { 42 | b.write_all(&chunk).await.unwrap(); 43 | // b.write_all(&chunk); 44 | } 45 | b 46 | } 47 | pub(super) async fn from_multipart( 48 | ip: IpAddr, 49 | mut multipart: actix_multipart::Multipart, 50 | ) -> anki::sync::request::SyncRequest { 51 | //reference : https://github.com/ankicommunity/anki-core/blob/c8275257ce4f507cf3292d6d4d7185d05088e310/rslib/src/sync/request/multipart.rs 52 | let mut host_key = String::new(); 53 | let mut session_key = String::new(); 54 | let mut media_client_version = None; 55 | let mut compressed = false; 56 | let mut data = None; 57 | // this will cause error when client requesting a media begin get request,so we disregard error condition 58 | while let Ok(Some(field)) = multipart.try_next().await { 59 | match field.name() { 60 | "c" => { 61 | // normal syncs should always be compressed, but media syncs may compress the 62 | // zip instead 63 | let c = text(field).await; 64 | compressed = c != "0"; 65 | } 66 | "k" | "sk" => { 67 | host_key = text(field).await; 68 | } 69 | "s" => session_key = text(field).await, 70 | "v" => media_client_version = Some(text(field).await), 71 | "data" => data = Some(bytes(field).await), 72 | _ => {} 73 | }; 74 | } 75 | 76 | let data = { 77 | let data = data.unwrap_or_default(); 78 | if data.is_empty() { 79 | // AnkiDroid omits 'data' when downloading 80 | b"{}".to_vec() 81 | } else if compressed { 82 | decode_gzipped_data(data.into()).await.unwrap() 83 | } else { 84 | data.to_vec() 85 | } 86 | }; 87 | SyncRequest { 88 | ip, 89 | sync_key: host_key, 90 | session_key, 91 | media_client_version, 92 | data, 93 | json_output_type: std::marker::PhantomData, 94 | // may be lower - the old protocol didn't provide the version on every request 95 | sync_version: SyncVersion(anki::sync::version::SYNC_VERSION_10_V2_TIMEZONE), 96 | client_version: String::new(), 97 | } 98 | } 99 | pub(super) async fn from_header_and_stream( 100 | sync_header: SyncHeader, 101 | body_stream: actix_web::dev::Payload, 102 | ip: IpAddr, 103 | ) -> anki::sync::request::SyncRequest { 104 | sync_header.sync_version.ensure_supported().unwrap(); 105 | 106 | let data = decode_zstd_body_for_server(body_stream).await.ok().unwrap(); 107 | SyncRequest { 108 | data, 109 | json_output_type: std::marker::PhantomData, 110 | ip, 111 | sync_key: sync_header.sync_key, 112 | session_key: sync_header.session_key, 113 | media_client_version: None, 114 | sync_version: sync_header.sync_version, 115 | client_version: sync_header.client_ver, 116 | } 117 | } 118 | 119 | #[derive(Clone)] 120 | pub struct SyncRequestW(pub SyncRequest>); 121 | // #[derive(Clone)] 122 | // pub struct SyncRequestWrapper(pub anki::sync::request::SyncRequest); 123 | #[doc(hidden)] 124 | pub struct SyncRequestWrapperService { 125 | service: Rc, 126 | } 127 | 128 | impl Service for SyncRequestWrapperService 129 | where 130 | // S: Service, Error = actix_web::Error>, 131 | S: Service, Error = Error> + 'static, 132 | S::Future: 'static, 133 | B: 'static, 134 | { 135 | type Response = ServiceResponse; 136 | type Error = Error; 137 | type Future = LocalBoxFuture<'static, Result>; 138 | 139 | // fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll> { 140 | // self.service.poll_ready(ctx) 141 | // } 142 | 143 | // An implementation of [poll_ready] that forwards 144 | // readiness checks to a named struct field 145 | dev::forward_ready!(service); 146 | 147 | fn call(&self, mut req: ServiceRequest) -> Self::Future { 148 | let service = self.service.clone(); 149 | Box::pin(async move { 150 | // let r:anki::sync::media::begin::SyncBeginQuery=serde_json::from_str( req.query_string()).unwrap(); 151 | // let headers = req.headers(); 152 | let pl = req.take_payload(); 153 | // let (req,pl)=req.into_parts(); 154 | let headers = req.headers(); 155 | let ip = req.peer_addr(); 156 | let ip: Option = match ip { 157 | Some(s) => Some(s.ip()), 158 | None => { 159 | log::error!("unable to get ip"); 160 | None 161 | } 162 | }; 163 | // construct struct SyncHeader. 164 | let sync_header_value = 165 | headers.get(&anki::sync::request::header_and_stream::SYNC_HEADER_NAME); 166 | // let pl = req.take_payload(); 167 | let sync_request = match sync_header_value { 168 | Some(sync_headers) => { 169 | // If SYNC_HEADER_NAME is present, 170 | // need to check if it is a str 171 | let sync_header: Option = 172 | serde_json::from_str(sync_headers.to_str().ok().unwrap()) 173 | .ok() 174 | .unwrap(); 175 | // let pl = req.take_payload(); 176 | 177 | from_header_and_stream::>(sync_header.unwrap(), pl, ip.unwrap()).await 178 | } 179 | None => { 180 | // let pl = req.take_payload(); 181 | // If SYNC_HEADER_NAME is absent, 182 | let pl = actix_multipart::Multipart::new(headers, pl); 183 | 184 | from_multipart::>(ip.unwrap(), pl).await 185 | } 186 | }; 187 | req.extensions_mut().insert(sync_request); 188 | let res = service.call(req).await?; 189 | Ok(res) 190 | }) 191 | } 192 | } 193 | #[derive(Clone, Debug)] 194 | pub struct SyncRequestWrapper; 195 | impl Transform for SyncRequestWrapper 196 | where 197 | S::Future: 'static, 198 | B: 'static, 199 | S: Service, Error = actix_web::Error>, 200 | { 201 | type Response = ServiceResponse; 202 | type Error = Error; 203 | type Future = Ready>; 204 | type Transform = SyncRequestWrapperService; 205 | type InitError = (); 206 | 207 | fn new_transform(&self, service: S) -> Self::Future { 208 | ready(Ok(SyncRequestWrapperService { 209 | service: Rc::new(service), 210 | })) 211 | } 212 | } 213 | /// return `hostkey` as response data if user authenticates successfully. 214 | /// `hoskey` is the username digest generated on the server. 215 | /// 216 | /// clients just send username and password when logging in to the server. 217 | /// the server uees them to compute the sha256-hash which is the so-called 218 | /// `hostkey`.It is s process that is called `authentication`.compare these 219 | /// two hash to check whether they are equal pr mot,if so authentication 220 | /// succeed.Abd sends the host key back to the client. 221 | pub async fn host_key( 222 | hkreq: HostKeyRequest, 223 | users: &HashMap, 224 | ) -> Result { 225 | let username = hkreq.username; 226 | let password = hkreq.password; 227 | // extract hash from User if username match,else return no such username error, 228 | // let state = server.state.lock().expect("lock mytex"); 229 | let user = users.iter().find(|(_hash, u)| u.name == username); 230 | match user { 231 | Some((hash, _u)) => { 232 | let actual_hash = compute_hash(&username, &password, hash); 233 | if actual_hash == *hash { 234 | Ok(HostKeyResponse { 235 | key: hash.to_string(), 236 | }) 237 | } else { 238 | Err( 239 | UserError::Authentication(format!("Authentication failed for user {username}")) 240 | .into(), 241 | ) 242 | } 243 | } 244 | None => Err(UserError::Authentication(format!( 245 | "Authentication failed for nonexistent user {username}" 246 | )) 247 | .into()), 248 | } 249 | } 250 | -------------------------------------------------------------------------------- /src/response.rs: -------------------------------------------------------------------------------- 1 | use actix_web::HttpResponse; 2 | // reference: https://github.com/ankicommunity/anki-core/blob/ae8f44d4b30f6e9f9c9aa8f0a7694d8cca583316/rslib/src/sync/response.rs 3 | use anki::sync::request::header_and_stream::encode_zstd_body; 4 | use anki::sync::response::ORIGINAL_SIZE; 5 | use anki::sync::version::SyncVersion; 6 | pub fn make_response(data: Vec, sync_version: SyncVersion) -> actix_web::HttpResponse { 7 | if sync_version.is_zstd() { 8 | // construct response from header and body 9 | let header = (&ORIGINAL_SIZE, data.len().to_string()); 10 | let body = encode_zstd_body(data); 11 | HttpResponse::Ok().append_header(header).streaming(body) 12 | } else { 13 | HttpResponse::Ok().body(data) 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/routes.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::await_holding_lock)] 2 | use crate::app_config::set_users; 3 | use crate::db::fetch_users; 4 | use crate::response::make_response; 5 | 6 | use crate::{error::ApplicationError, request}; 7 | use actix_web::http::StatusCode; 8 | use actix_web::web; 9 | use actix_web::{error, HttpResponse}; 10 | use anki::sync::collection::protocol::SyncMethod; 11 | use anki::sync::collection::protocol::SyncProtocol; 12 | use anki::sync::http_server::SimpleServer; 13 | use anki::sync::login::HostKeyRequest; 14 | use anki::sync::media::begin::SyncBeginQuery; 15 | use anki::sync::media::begin::SyncBeginRequest; 16 | use anki::sync::media::protocol::MediaSyncMethod; 17 | use anki::sync::media::protocol::MediaSyncProtocol; 18 | use anki::sync::request::IntoSyncRequest; 19 | use anki::sync::request::SyncRequest; 20 | use anki::sync::version::SyncVersion; 21 | 22 | use std::path::PathBuf; 23 | use std::sync::Arc; 24 | 25 | // here the syncrequest may fail,need be constructed from query 26 | // older clients such as Android 2.16 alpha will use this method 27 | pub async fn media_begin_get( 28 | query: web::Query, 29 | server: web::Data>, 30 | ) -> actix_web::Result { 31 | let query = query.into_inner(); 32 | let host_key = query.host_key; 33 | 34 | let mut req = SyncBeginRequest { 35 | client_version: query.client_version, 36 | } 37 | .try_into_sync_request() 38 | .map_err(|_| error::ErrorBadRequest("convert begin".to_string()))?; 39 | 40 | req.sync_key = host_key; 41 | req.sync_version = SyncVersion::multipart(); 42 | 43 | let mut req: SyncRequest> = req.into_output_type(); 44 | 45 | // clone of media_begin_post 46 | if let Some(ver) = &req.media_client_version { 47 | req.data = serde_json::to_vec(&SyncBeginRequest { 48 | client_version: ver.clone(), 49 | }) 50 | .map_err(|_| { 51 | ApplicationError::InternalServerError("serialize begin request".to_string()) 52 | })?; 53 | } 54 | begin_wrapper(req.into_output_type(), server).await 55 | } 56 | 57 | /// newer clients such 2.1.57 use post method. 58 | /// 59 | /// Older clients would send client info in the multipart instead of the inner 60 | /// JSON; Inject it into the json if provided. 61 | 62 | /// Because the req types of the arguments in media_sync_handler and media_begin_post are different, 63 | /// we take the method begin from the media_sync_handler and use it in media_begin_post and 64 | /// media_begin_get 65 | pub async fn media_begin_post( 66 | req: Option>>>, 67 | server: web::Data>, 68 | ) -> actix_web::Result { 69 | // argument req should safe to unwrap 70 | let mut req = req.unwrap().into_inner(); 71 | if let Some(ver) = &req.media_client_version { 72 | req.data = serde_json::to_vec(&SyncBeginRequest { 73 | client_version: ver.clone(), 74 | }) 75 | .map_err(|_| { 76 | ApplicationError::InternalServerError("serialize begin request".to_string()) 77 | })?; 78 | } 79 | 80 | begin_wrapper(req.into_output_type(), server).await 81 | } 82 | 83 | /// a wrapper for the media function begin. 84 | async fn begin_wrapper( 85 | req: SyncRequest>, 86 | server: web::Data>, 87 | ) -> actix_web::Result { 88 | let sync_version = req.sync_version; 89 | let data = server 90 | // .lock() 91 | // .expect("server call method") 92 | .begin(req.into_output_type()) 93 | .await 94 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 95 | .data; 96 | Ok(make_response(data, sync_version)) 97 | } 98 | 99 | pub async fn media_sync_handler( 100 | req: Option>>>, 101 | method: web::Path, //(endpoint,sync_method) 102 | server: web::Data>, 103 | ) -> actix_web::Result { 104 | let sync_method = method.into_inner(); 105 | 106 | let req = req.unwrap().into_inner(); 107 | let sync_version = req.sync_version; 108 | match sync_method { 109 | MediaSyncMethod::Begin => { 110 | // As begin and meta are two functions that are called rirst,so we do the error handling here. 111 | let data = server 112 | .begin(req.into_output_type()) 113 | .await 114 | .map_err(|e| match e.code { 115 | StatusCode::FORBIDDEN => ApplicationError::InvalidHostKey(e.context), 116 | _ => ApplicationError::InternalServerError(e.context), 117 | })? 118 | .data; 119 | Ok(make_response(data, sync_version)) 120 | } 121 | MediaSyncMethod::MediaChanges => { 122 | let data = server 123 | // .lock() 124 | // .expect("server call method") 125 | .media_changes(req.into_output_type()) 126 | .await 127 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 128 | .data; 129 | Ok(make_response(data, sync_version)) 130 | } 131 | MediaSyncMethod::UploadChanges => { 132 | let data = server 133 | // .lock() 134 | // .expect("server call method") 135 | .upload_changes(req) 136 | .await 137 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 138 | .data; 139 | Ok(make_response(data, sync_version)) 140 | } 141 | MediaSyncMethod::DownloadFiles => { 142 | let data = server 143 | // .lock() 144 | // .expect("server call method") 145 | .download_files(req.into_output_type()) 146 | .await 147 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 148 | .data; 149 | Ok(make_response(data, sync_version)) 150 | } 151 | MediaSyncMethod::MediaSanity => { 152 | let data = server 153 | // .lock() 154 | // .expect("server call method") 155 | .media_sanity_check(req.into_output_type()) 156 | .await 157 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 158 | .data; 159 | Ok(make_response(data, sync_version)) 160 | } 161 | } 162 | } 163 | 164 | pub async fn collecction_sync_handler( 165 | req: Option>>>, 166 | method: web::Path, //(endpoint,sync_method) 167 | server: web::Data>, 168 | auth_db: web::Data, 169 | base_folder: web::Data, 170 | ) -> actix_web::Result { 171 | let sync_method = method.into_inner(); 172 | // let sync_method:SyncMethod=serde_json::from_str(&method.into_inner().0).unwrap(); 173 | // let o= req.0.into_output_type(); 174 | let req = req.unwrap().into_inner(); 175 | let sync_version = req.sync_version; 176 | // have to convert from anki response types to actix-web response type,in sync/response 177 | // TODO:And response from sync procedures must be processed by make_response 178 | // take out vec from json 179 | let res = match sync_method { 180 | SyncMethod::HostKey => { 181 | // dynamically add users,access user database when client request login and 182 | // update in-memory account,only add new accounts 183 | let auth_db = auth_db.as_str(); 184 | let users = fetch_users(auth_db).map_err(ApplicationError::Sqlite)?; 185 | let mut state = server.state.lock().expect("msg"); 186 | if let Some(u) = users { 187 | let in_memory_hostkeys = state.users.keys().collect::>(); 188 | // compare host_key,filter new hostkey 189 | let mut new_users = vec![]; 190 | for (name, hash) in u { 191 | if !in_memory_hostkeys.contains(&&hash) { 192 | new_users.push((name, hash)); 193 | } 194 | } 195 | let u = set_users(base_folder.into_inner().as_path(), new_users)?; 196 | for (k, v) in u { 197 | state.users.insert(k, v); 198 | } 199 | } 200 | // should replace the official host key function with the existing one. 201 | // in this case server is not consumed abd nay block later methods. 202 | let hkreq: HostKeyRequest = req 203 | .into_output_type() 204 | .json() 205 | .map_err(ApplicationError::HttpError)?; 206 | let usrs = &state.users; 207 | let data = request::host_key(hkreq, usrs).await?; 208 | let data = serde_json::to_vec(&data)?; 209 | make_response(data, sync_version) 210 | } 211 | SyncMethod::Meta => { 212 | // As begin and meta are two functions that are called rirst after authentication, 213 | // so we do the error handling here. 214 | let data = server 215 | .meta(req.into_output_type()) 216 | .await 217 | .map_err(|e| match e.code { 218 | StatusCode::FORBIDDEN => ApplicationError::InvalidHostKey(e.context), 219 | _ => ApplicationError::InternalServerError(e.context), 220 | })? 221 | .data; 222 | make_response(data, sync_version) 223 | } 224 | SyncMethod::Start => { 225 | let data = server 226 | // .lock() 227 | // .expect("server call method") 228 | .start(req.into_output_type()) 229 | .await 230 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 231 | .data; 232 | make_response(data, sync_version) 233 | } 234 | SyncMethod::ApplyGraves => { 235 | let data = server 236 | // .lock() 237 | // .expect("server call method") 238 | .apply_graves(req.into_output_type()) 239 | .await 240 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 241 | .data; 242 | make_response(data, sync_version) 243 | } 244 | SyncMethod::ApplyChanges => { 245 | let data = server 246 | // .lock() 247 | // .expect("server call method") 248 | .apply_changes(req.into_output_type()) 249 | .await 250 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 251 | .data; 252 | make_response(data, sync_version) 253 | } 254 | SyncMethod::Chunk => { 255 | let data = server 256 | // .lock() 257 | // .expect("server call method") 258 | .chunk(req.into_output_type()) 259 | .await 260 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 261 | .data; 262 | make_response(data, sync_version) 263 | } 264 | SyncMethod::ApplyChunk => { 265 | let data = server 266 | // .lock() 267 | // .expect("server call method") 268 | .apply_chunk(req.into_output_type()) 269 | .await 270 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 271 | .data; 272 | make_response(data, sync_version) 273 | } 274 | SyncMethod::SanityCheck2 => { 275 | let data = server 276 | // .lock() 277 | // .expect("server call method") 278 | .sanity_check(req.into_output_type()) 279 | .await 280 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 281 | .data; 282 | make_response(data, sync_version) 283 | } 284 | SyncMethod::Finish => { 285 | let data = server 286 | // .lock() 287 | // .expect("server call method") 288 | .finish(req.into_output_type()) 289 | .await 290 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 291 | .data; 292 | make_response(data, sync_version) 293 | } 294 | SyncMethod::Abort => { 295 | let data = server 296 | // .lock() 297 | // .expect("server call method") 298 | .abort(req.into_output_type()) 299 | .await 300 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 301 | .data; 302 | make_response(data, sync_version) 303 | } 304 | SyncMethod::Upload => { 305 | let data = server 306 | // .lock() 307 | // .expect("server call method") 308 | .upload(req.into_output_type()) 309 | .await 310 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 311 | .data; 312 | 313 | make_response(data, sync_version) 314 | } 315 | SyncMethod::Download => { 316 | let data = server 317 | // .lock() 318 | // .expect("server call method") 319 | .download(req.into_output_type()) 320 | .await 321 | .map_err(|e| ApplicationError::InternalServerError(e.to_string()))? 322 | .data; 323 | make_response(data, sync_version) 324 | } 325 | }; 326 | Ok(res) 327 | } 328 | -------------------------------------------------------------------------------- /src/user.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "account")] 2 | use crate::config::Account; 3 | 4 | use crate::parse_args::UserCommand; 5 | 6 | use rand::{rngs::OsRng, RngCore}; 7 | use rusqlite::Connection; 8 | use sha2::{Digest, Sha256}; 9 | use std::fs; 10 | use std::io; 11 | use std::path::{Path, PathBuf}; 12 | use thiserror::Error; 13 | 14 | #[derive(Error, Debug)] 15 | pub enum UserError { 16 | #[error("Sqlite error: {0}")] 17 | Sqlite(#[from] rusqlite::Error), 18 | #[error("IO error: {0}")] 19 | IO(#[from] io::Error), 20 | #[error("Missing values in parameter: {0}")] 21 | MissingValues(String), 22 | #[error("Authentication error: {0}")] 23 | Authentication(String), 24 | #[error("Path not found error")] 25 | PathNotFound, 26 | } 27 | 28 | impl From<(rusqlite::Connection, rusqlite::Error)> for UserError { 29 | fn from(error: (rusqlite::Connection, rusqlite::Error)) -> Self { 30 | let (_, err) = error; 31 | UserError::Sqlite(err) 32 | } 33 | } 34 | 35 | fn create_salt() -> String { 36 | // create salt 37 | let mut key = [0u8; 8]; 38 | OsRng.fill_bytes(&mut key); 39 | hex::encode(key) 40 | } 41 | fn set_password_for_user>( 42 | username: &str, 43 | new_password: &str, 44 | dbpath: P, 45 | ) -> Result<(), UserError> { 46 | if user_exists(username, &dbpath)? { 47 | let salt = create_salt(); 48 | let hash = create_pass_hash(username, new_password, &salt); 49 | let sql = "UPDATE auth SET hash=? WHERE username=?"; 50 | let conn = Connection::open(dbpath)?; 51 | conn.execute(sql, [hash.as_str(), username])?; 52 | conn.close()?; 53 | } 54 | 55 | Ok(()) 56 | } 57 | 58 | fn create_user_dir(path: PathBuf) -> Result<(), UserError> { 59 | if !path.exists() { 60 | fs::create_dir_all(path)?; 61 | } 62 | Ok(()) 63 | } 64 | fn add_user_to_auth_db>( 65 | username: &str, 66 | password: &str, 67 | dbpath: P, 68 | ) -> Result<(), UserError> { 69 | let salt = create_salt(); 70 | let pass_hash = create_pass_hash(username, password, &salt); 71 | let sql = "INSERT INTO auth VALUES (?, ?)"; 72 | let conn = Connection::open(&dbpath)?; 73 | conn.execute(sql, [username, pass_hash.as_str()])?; 74 | conn.close()?; 75 | let user_dir = match dbpath.as_ref().to_owned().parent() { 76 | Some(p) => p.join("collections").join(username), 77 | None => return Err(UserError::PathNotFound), 78 | }; 79 | create_user_dir(user_dir)?; 80 | Ok(()) 81 | } 82 | pub fn add_user>(args: &[String], dbpath: P) -> Result<(), UserError> { 83 | let username = &args[0]; 84 | let password = &args[1]; 85 | add_user_to_auth_db(username, password, dbpath)?; 86 | Ok(()) 87 | } 88 | fn passwd>(args: &[String], dbpath: P) -> Result<(), UserError> { 89 | let username = &args[0]; 90 | let password = &args[1]; 91 | set_password_for_user(username, password, dbpath)?; 92 | Ok(()) 93 | } 94 | fn del_user>(username: &str, dbpath: P) -> Result<(), UserError> { 95 | let sql = "DELETE FROM auth WHERE username=?"; 96 | let conn = Connection::open(dbpath)?; 97 | conn.execute(sql, [username])?; 98 | conn.close()?; 99 | Ok(()) 100 | } 101 | pub fn create_auth_db>(p: P) -> Result<(), UserError> { 102 | let sql = "CREATE TABLE IF NOT EXISTS auth 103 | (username VARCHAR PRIMARY KEY, hash VARCHAR)"; 104 | let conn = Connection::open(p)?; 105 | conn.execute(sql, [])?; 106 | conn.close()?; 107 | 108 | Ok(()) 109 | } 110 | /// command-line user management 111 | pub fn user_manage>(cmd: &UserCommand, dbpath: P) -> Result<(), UserError> { 112 | match cmd { 113 | UserCommand::User { 114 | add, 115 | del, 116 | pass, 117 | list, 118 | } => { 119 | if let Some(account) = add { 120 | add_user(account, &dbpath)?; 121 | } 122 | if let Some(users) = del { 123 | for u in users { 124 | del_user(u, &dbpath)?; 125 | } 126 | } 127 | if let Some(account) = pass { 128 | passwd(account, &dbpath)?; 129 | } 130 | if *list { 131 | let user_list = user_list(&dbpath)?; 132 | if let Some(v) = user_list { 133 | v.into_iter().for_each(|i| println!("{i}")); 134 | } 135 | } 136 | } 137 | } 138 | 139 | Ok(()) 140 | } 141 | pub fn user_list>(dbpath: P) -> Result>, UserError> { 142 | let sql = "SELECT username FROM auth"; 143 | let conn = Connection::open(dbpath)?; 144 | let mut stmt = conn.prepare(sql)?; 145 | let rows = stmt.query_map([], |r| r.get(0))?; 146 | 147 | let v1 = rows.into_iter().collect::, _>>()?; 148 | if v1.is_empty() { 149 | Ok(None) 150 | } else { 151 | Ok(Some(v1)) 152 | } 153 | } 154 | pub fn user_exists>(username: &str, dbpath: P) -> Result { 155 | let uservec = user_list(dbpath)?; 156 | match uservec { 157 | Some(x) if x.contains(&username.to_string()) => Ok(true), 158 | _ => Ok(false), 159 | } 160 | } 161 | fn create_pass_hash(username: &str, password: &str, salt: &str) -> String { 162 | // create a Sha256 object 163 | let mut hasher = Sha256::new(); 164 | // write input message 165 | hasher.update(username); 166 | hasher.update(password); 167 | hasher.update(salt); 168 | // read hash digest and consume hasher 169 | let result = hasher.finalize(); 170 | let pass_hash = format!("{result:x}{salt}"); 171 | pass_hash 172 | } 173 | /// extract salt from a hash which is the last 16 characters 174 | pub fn compute_hash(username: &str, password: &str, hash: &str) -> String { 175 | let salt = &hash[(hash.chars().count() - 16)..]; 176 | 177 | create_pass_hash(username, password, salt) 178 | } 179 | /// here the account argument is read from cnfig file. 180 | /// 181 | /// do not panic if encountered error 182 | #[cfg(feature = "account")] 183 | pub fn create_user_from_conf>(account: Account, dbpath: P) { 184 | let username = account.username(); 185 | let pass = account.password(); 186 | if username.is_some() && pass.is_some() { 187 | let user_list = match user_list(&dbpath) { 188 | Ok(l) => l, 189 | Err(_) => return (), 190 | }; 191 | // do nothing and return if user already exists in db. 192 | if let Some(list) = user_list { 193 | if list.contains(&username.as_ref().unwrap()) { 194 | return (); 195 | } 196 | } 197 | let args = [username.clone().unwrap(), pass.clone().unwrap()]; 198 | if add_user(args.as_slice(), dbpath).is_err() { 199 | println!("添加用户失败"); 200 | } else { 201 | println!("添加用户 {} 成功", username.as_ref().unwrap()); 202 | } 203 | } else if !(username.is_none() && pass.is_none()) && !(username.is_some() && pass.is_some()) { 204 | println!("用户名或密码为空") 205 | } 206 | } 207 | --------------------------------------------------------------------------------