├── .bumpversion.cfg ├── .cargo └── config.toml ├── .codespellrc ├── .devcontainer └── devcontainer.json ├── .gitattributes ├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ └── config.yml ├── dependabot.yml └── workflows │ ├── CI.yml │ └── Release.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── backend ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── fly.toml ├── refresh_token.py ├── requirements.txt └── src │ └── main.rs ├── doc ├── openwrt.png └── refresh-token.png ├── openwrt ├── aliyundrive-webdav │ ├── Makefile │ └── files │ │ ├── aliyundrive-webdav.config │ │ └── aliyundrive-webdav.init └── luci-app-aliyundrive-webdav │ ├── Makefile │ ├── luasrc │ ├── controller │ │ └── aliyundrive-webdav.lua │ ├── model │ │ └── cbi │ │ │ └── aliyundrive-webdav │ │ │ ├── client.lua │ │ │ └── log.lua │ └── view │ │ └── aliyundrive-webdav │ │ ├── aliyundrive-webdav_log.htm │ │ ├── aliyundrive-webdav_qrcode.htm │ │ └── aliyundrive-webdav_status.htm │ ├── po │ ├── zh-cn │ │ └── aliyundrive-webdav.po │ └── zh_Hans │ └── root │ ├── etc │ └── uci-defaults │ │ └── luci-aliyundrive-webdav │ └── usr │ └── share │ └── rpcd │ └── acl.d │ └── luci-app-aliyundrive-webdav.json ├── pyproject.toml ├── snap └── snapcraft.yaml ├── src ├── cache.rs ├── drive │ ├── mod.rs │ └── model.rs ├── login │ ├── mod.rs │ └── model.rs ├── main.rs ├── vfs.rs └── webdav.rs └── systemd.service /.bumpversion.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | files = Cargo.toml README.md openwrt/aliyundrive-webdav/Makefile openwrt/luci-app-aliyundrive-webdav/Makefile snap/snapcraft.yaml 3 | commit = False 4 | tag = False 5 | current_version = 2.3.3 6 | -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [target.mips64-unknown-linux-muslabi64] 2 | rustflags = ["-C", "link-arg=-lgcc"] 3 | 4 | [target.mips64el-unknown-linux-muslabi64] 5 | rustflags = ["-C", "link-arg=-lgcc"] 6 | -------------------------------------------------------------------------------- /.codespellrc: -------------------------------------------------------------------------------- 1 | [codespell] 2 | ignore-words-list = crate 3 | skip = ./.git,./target 4 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Python 3", 3 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 4 | "image": "mcr.microsoft.com/devcontainers/python:1-3.11-bullseye", 5 | "customizations": { 6 | "codespaces": { 7 | "openFiles": [ 8 | "README.md", 9 | "backend/refresh_token.py" 10 | ] 11 | }, 12 | "vscode": { 13 | "settings": {}, 14 | "extensions": [ 15 | "ms-python.python", 16 | "ms-python.vscode-pylance" 17 | ] 18 | } 19 | }, 20 | "updateContentCommand": "[ -f packages.txt ] && sudo apt update && sudo apt upgrade -y && sudo xargs apt install -y aliyundrive-webdav-${{ steps.tag.outputs.tag }}.apple-darwin.tar.gz.sha256 67 | cd - 68 | - name: Upload binary to GitHub Release 69 | uses: softprops/action-gh-release@v1 70 | if: "startsWith(github.ref, 'refs/tags/')" 71 | with: 72 | files: | 73 | target/release/aliyundrive-webdav*.tar.gz 74 | prerelease: ${{ contains(github.ref, 'alpha') || contains(github.ref, 'beta') }} 75 | generate_release_notes: true 76 | 77 | windows: 78 | runs-on: windows-latest 79 | strategy: 80 | fail-fast: false 81 | matrix: 82 | platform: 83 | - arch: x64 84 | target: x86_64-pc-windows-msvc 85 | - arch: x86 86 | target: i686-pc-windows-msvc 87 | - arch: arm64 88 | target: aarch64-pc-windows-msvc 89 | steps: 90 | - uses: actions/checkout@v3 91 | - uses: actions/setup-python@v4 92 | if: matrix.platform.arch != 'arm64' 93 | with: 94 | python-version: 3.9 95 | architecture: ${{ matrix.platform.arch }} 96 | - uses: dtolnay/rust-toolchain@stable 97 | - name: Build wheels 98 | if: matrix.platform.arch != 'arm64' 99 | uses: PyO3/maturin-action@v1 100 | with: 101 | target: ${{ matrix.platform.target }} 102 | args: --release --out dist --strip 103 | - name: Build wheels 104 | if: matrix.platform.arch == 'arm64' 105 | uses: PyO3/maturin-action@v1 106 | with: 107 | target: ${{ matrix.platform.target }} 108 | args: --release --out dist --strip --no-default-features --features atomic64,native-tls 109 | sccache: 'true' 110 | - name: Install built wheel 111 | if: matrix.platform.arch != 'arm64' 112 | run: | 113 | pip install aliyundrive-webdav --no-index --find-links dist --force-reinstall 114 | aliyundrive-webdav --help 115 | - name: Upload wheels 116 | uses: actions/upload-artifact@v3 117 | with: 118 | name: wheels 119 | path: dist 120 | if-no-files-found: error 121 | - name: Upload binary artifacts 122 | uses: actions/upload-artifact@v3 123 | with: 124 | name: windows-${{ matrix.platform.arch }}-bin 125 | path: target/${{ matrix.platform.target }}/release/aliyundrive-webdav.exe 126 | if-no-files-found: error 127 | - name: Get tag 128 | if: "startsWith(github.ref, 'refs/tags/')" 129 | id: tag 130 | uses: dawidd6/action-get-tag@v1 131 | - name: Archive binary 132 | if: "startsWith(github.ref, 'refs/tags/')" 133 | run: | 134 | cd target/${{ matrix.platform.target }}/release 135 | 7z a aliyundrive-webdav-${{ steps.tag.outputs.tag }}.${{ matrix.platform.target }}.zip aliyundrive-webdav.exe 136 | cd - 137 | - name: Upload binary to GitHub Release 138 | uses: softprops/action-gh-release@v1 139 | if: "startsWith(github.ref, 'refs/tags/')" 140 | with: 141 | files: | 142 | target/${{ matrix.platform.target }}/release/*.zip 143 | prerelease: ${{ contains(github.ref, 'alpha') || contains(github.ref, 'beta') }} 144 | generate_release_notes: true 145 | 146 | linux: 147 | runs-on: ubuntu-latest 148 | strategy: 149 | fail-fast: false 150 | matrix: 151 | platform: 152 | - target: "x86_64-unknown-linux-musl" 153 | wheel: true 154 | deb: true 155 | - target: "i686-unknown-linux-musl" 156 | wheel: true 157 | deb: true 158 | - target: "aarch64-unknown-linux-musl" 159 | wheel: true 160 | deb: true 161 | - target: "armv7-unknown-linux-musleabihf" 162 | wheel: true 163 | deb: true 164 | - target: "armv7-unknown-linux-musleabi" 165 | wheel: false 166 | deb: true 167 | - target: "arm-unknown-linux-musleabihf" 168 | wheel: false 169 | deb: false 170 | - target: "arm-unknown-linux-musleabi" 171 | wheel: false 172 | deb: false 173 | container: 174 | image: docker://ghcr.io/rust-cross/rust-musl-cross:${{ matrix.platform.target }} 175 | env: 176 | CFLAGS_armv7_unknown_linux_musleabihf: '-mfpu=vfpv3-d16' 177 | steps: 178 | - uses: actions/checkout@v3 179 | - name: Build wheels - manylinux 180 | uses: PyO3/maturin-action@main 181 | with: 182 | target: ${{ matrix.platform.target }} 183 | manylinux: auto 184 | container: off 185 | args: --release -o dist --strip 186 | sccache: 'true' 187 | - name: Build wheels - musllinux 188 | if: matrix.platform.wheel 189 | uses: PyO3/maturin-action@v1 190 | with: 191 | target: ${{ matrix.platform.target }} 192 | manylinux: musllinux_1_1 193 | container: off 194 | args: --release --out dist --strip 195 | sccache: 'true' 196 | - name: Install cargo packages 197 | if: matrix.platform.deb 198 | run: pip install cargo-deb cargo-generate-rpm 199 | - name: Build Debian package 200 | if: matrix.platform.deb 201 | run: cargo deb --target=${{ matrix.platform.target }} --no-build --no-strip 202 | - name: Build RPM package 203 | if: matrix.platform.deb 204 | run: cargo generate-rpm --target=${{ matrix.platform.target }} --payload-compress none 205 | - name: Upload wheels 206 | if: matrix.platform.wheel 207 | uses: actions/upload-artifact@v3 208 | with: 209 | name: wheels 210 | path: dist 211 | if-no-files-found: error 212 | - name: Install upx 213 | working-directory: /tmp 214 | env: 215 | CC: clang 216 | CXX: clang++ 217 | run: | 218 | set -ex 219 | git clone --recursive https://github.com/upx/upx.git 220 | cd upx 221 | make 222 | cp build/release/upx /usr/local/bin/upx 223 | - name: Upx compress binary 224 | run: upx target/${{ matrix.platform.target }}/release/aliyundrive-webdav 225 | - name: Upload binary artifacts 226 | uses: actions/upload-artifact@v3 227 | with: 228 | name: ${{ matrix.platform.target }}-bin 229 | path: target/${{ matrix.platform.target }}/release/aliyundrive-webdav 230 | if-no-files-found: error 231 | - name: Upload deb artifacts 232 | if: matrix.platform.deb 233 | uses: actions/upload-artifact@v3 234 | with: 235 | name: ${{ matrix.platform.target }}-deb 236 | path: target/${{ matrix.platform.target }}/debian/aliyundrive-webdav_*.deb 237 | if-no-files-found: error 238 | - name: Upload RPM artifacts 239 | if: matrix.platform.deb 240 | uses: actions/upload-artifact@v3 241 | with: 242 | name: ${{ matrix.platform.target }}-rpm 243 | path: target/${{ matrix.platform.target }}/generate-rpm/aliyundrive-webdav*.rpm 244 | if-no-files-found: error 245 | - name: Get tag 246 | if: "startsWith(github.ref, 'refs/tags/')" 247 | id: tag 248 | uses: dawidd6/action-get-tag@v1 249 | - name: Archive binary 250 | if: "startsWith(github.ref, 'refs/tags/')" 251 | run: | 252 | cd target/${{ matrix.platform.target }}/release 253 | tar czvf aliyundrive-webdav-${{ steps.tag.outputs.tag }}.${{ matrix.platform.target }}.tar.gz aliyundrive-webdav 254 | shasum -a 256 aliyundrive-webdav-${{ steps.tag.outputs.tag }}.${{ matrix.platform.target }}.tar.gz > aliyundrive-webdav-${{ steps.tag.outputs.tag }}.${{ matrix.platform.target }}.tar.gz.sha256 255 | cd - 256 | - name: Upload binaries to GitHub Release 257 | uses: softprops/action-gh-release@v1 258 | if: "startsWith(github.ref, 'refs/tags/')" 259 | with: 260 | files: | 261 | target/${{ matrix.platform.target }}/release/aliyundrive-webdav*.tar.gz* 262 | target/${{ matrix.platform.target }}/debian/aliyundrive-webdav_*.deb 263 | target/${{ matrix.platform.target }}/generate-rpm/aliyundrive-webdav*.rpm 264 | prerelease: ${{ contains(github.ref, 'alpha') || contains(github.ref, 'beta') }} 265 | generate_release_notes: true 266 | - name: Remove cached deb and RPM packages 267 | if: matrix.platform.deb 268 | run: | 269 | rm -rf target/${{ matrix.platform.target }}/debian 270 | rm -rf target/${{ matrix.platform.target }}/generate-rpm 271 | 272 | linux-others: 273 | runs-on: ubuntu-latest 274 | strategy: 275 | fail-fast: false 276 | matrix: 277 | platform: 278 | - target: "armv5te-unknown-linux-musleabi" 279 | cargo_extra_args: --no-default-features --features rustls-tls 280 | - target: "mips-unknown-linux-musl" 281 | cargo_extra_args: --no-default-features --features native-tls-vendored 282 | - target: "mipsel-unknown-linux-musl" 283 | cargo_extra_args: --no-default-features --features native-tls-vendored 284 | container: 285 | image: docker://ghcr.io/rust-cross/rust-musl-cross:${{ matrix.platform.target }} 286 | steps: 287 | - uses: actions/checkout@v3 288 | - name: Cache cargo build 289 | uses: Swatinem/rust-cache@v2 290 | with: 291 | key: ${{ matrix.platform.target }} 292 | - name: Build 293 | env: 294 | RUSTFLAGS: -C target-feature=+crt-static -C link-arg=-s 295 | run: | 296 | cargo build --release --target ${{ matrix.platform.target }} ${{ matrix.platform.cargo_extra_args }} 297 | - name: Upx compress binary 298 | uses: crazy-max/ghaction-upx@v1 299 | with: 300 | version: v3.95 # v3.96 breaks mipsel, https://github.com/upx/upx/issues/504 301 | files: target/${{ matrix.platform.target }}/release/aliyundrive-webdav 302 | - name: Upload binary artifacts 303 | uses: actions/upload-artifact@v3 304 | with: 305 | name: ${{ matrix.platform.target }}-bin 306 | path: target/${{ matrix.platform.target }}/release/aliyundrive-webdav 307 | if-no-files-found: error 308 | - name: Get tag 309 | if: "startsWith(github.ref, 'refs/tags/')" 310 | id: tag 311 | uses: dawidd6/action-get-tag@v1 312 | - name: Archive binary 313 | if: "startsWith(github.ref, 'refs/tags/')" 314 | run: | 315 | cd target/${{ matrix.platform.target }}/release 316 | tar czvf aliyundrive-webdav-${{ steps.tag.outputs.tag }}.${{ matrix.platform.target }}.tar.gz aliyundrive-webdav 317 | shasum -a 256 aliyundrive-webdav-${{ steps.tag.outputs.tag }}.${{ matrix.platform.target }}.tar.gz > aliyundrive-webdav-${{ steps.tag.outputs.tag }}.${{ matrix.platform.target }}.tar.gz.sha256 318 | cd - 319 | - name: Upload binary to GitHub Release 320 | uses: softprops/action-gh-release@v1 321 | if: "startsWith(github.ref, 'refs/tags/')" 322 | with: 323 | files: | 324 | target/${{ matrix.platform.target }}/release/aliyundrive-webdav*.tar.gz* 325 | prerelease: ${{ contains(github.ref, 'alpha') || contains(github.ref, 'beta') }} 326 | generate_release_notes: true 327 | 328 | openwrt: 329 | name: OpenWrt Package - ${{ matrix.target.arch }} 330 | runs-on: ubuntu-20.04 331 | needs: [ linux, linux-others ] 332 | environment: OpenWrt 333 | strategy: 334 | fail-fast: false 335 | matrix: 336 | target: 337 | - arch: "aarch64_generic" 338 | sdk: "https://downloads.openwrt.org/releases/22.03.2/targets/rockchip/armv8/openwrt-sdk-22.03.2-rockchip-armv8_gcc-11.2.0_musl.Linux-x86_64.tar.xz" 339 | - arch: "arm_cortex-a9" 340 | sdk: "https://downloads.openwrt.org/releases/22.03.2/targets/bcm53xx/generic/openwrt-sdk-22.03.2-bcm53xx-generic_gcc-11.2.0_musl_eabi.Linux-x86_64.tar.xz" 341 | - arch: "aarch64_cortex-a53" 342 | sdk: "https://downloads.openwrt.org/releases/22.03.2/targets/bcm27xx/bcm2710/openwrt-sdk-22.03.2-bcm27xx-bcm2710_gcc-11.2.0_musl.Linux-x86_64.tar.xz" 343 | - arch: "aarch64_cortex-a72" 344 | sdk: "https://downloads.openwrt.org/releases/22.03.2/targets/bcm27xx/bcm2711/openwrt-sdk-22.03.2-bcm27xx-bcm2711_gcc-11.2.0_musl.Linux-x86_64.tar.xz" 345 | - arch: "x86_64" 346 | sdk: "https://downloads.openwrt.org/releases/22.03.2/targets/x86/64/openwrt-sdk-22.03.2-x86-64_gcc-11.2.0_musl.Linux-x86_64.tar.xz" 347 | - arch: "i386_pentium4" 348 | sdk: "https://downloads.openwrt.org/releases/22.03.2/targets/x86/generic/openwrt-sdk-22.03.2-x86-generic_gcc-11.2.0_musl.Linux-x86_64.tar.xz" 349 | - arch: "arm_mpcore" 350 | sdk: "https://downloads.openwrt.org/releases/22.03.2/targets/oxnas/ox820/openwrt-sdk-22.03.2-oxnas-ox820_gcc-11.2.0_musl_eabi.Linux-x86_64.tar.xz" 351 | - arch: "arm_cortex-a5_vfpv4" 352 | sdk: "https://downloads.openwrt.org/releases/22.03.2/targets/at91/sama5/openwrt-sdk-22.03.2-at91-sama5_gcc-11.2.0_musl_eabi.Linux-x86_64.tar.xz" 353 | - arch: "arm_cortex-a7_neon-vfpv4" 354 | sdk: "https://downloads.openwrt.org/releases/22.03.2/targets/ipq40xx/generic/openwrt-sdk-22.03.2-ipq40xx-generic_gcc-11.2.0_musl_eabi.Linux-x86_64.tar.xz" 355 | - arch: "mipsel_24kc" 356 | sdk: "https://downloads.openwrt.org/releases/22.03.2/targets/ramips/mt7621/openwrt-sdk-22.03.2-ramips-mt7621_gcc-11.2.0_musl.Linux-x86_64.tar.xz" 357 | - arch: "mips_24kc" 358 | sdk: "https://archive.openwrt.org/releases/19.07.7/targets/ar71xx/nand/openwrt-sdk-19.07.7-ar71xx-nand_gcc-7.5.0_musl.Linux-x86_64.tar.xz" 359 | steps: 360 | - uses: actions/checkout@v3 361 | - name: Install build requirements 362 | run: | 363 | set -e 364 | sudo apt-get update 365 | sudo apt-get install -y build-essential ccache ecj fastjar file g++ gawk \ 366 | gettext git java-propose-classpath libelf-dev libncurses5-dev \ 367 | libncursesw5-dev libssl-dev python2.7-dev python3 unzip wget \ 368 | python3-distutils python3-setuptools python3-dev rsync subversion \ 369 | swig time xsltproc zlib1g-dev 370 | - name: Install OpenWrt SDK 371 | run: | 372 | set -e 373 | wget -O openwrt-sdk.tar.xz ${{ matrix.target.sdk }} 374 | xz -q -d openwrt-sdk.tar.xz && tar -xvf openwrt-sdk.tar 375 | mv -f openwrt-sdk-* openwrt-sdk 376 | - name: Build Package 377 | run: | 378 | set -e 379 | echo "src-link aliyundrive $GITHUB_WORKSPACE/openwrt" > openwrt-sdk/feeds.conf 380 | echo 'CONFIG_PACKAGE_aliyundrive-webdav=y 381 | CONFIG_PACKAGE_luci-app-aliyundrive-webdav=y 382 | ' >> openwrt-sdk/.config 383 | cd openwrt-sdk 384 | cat feeds.conf.default >> feeds.conf 385 | cat feeds.conf 386 | 387 | ./scripts/feeds update -a > /dev/null 388 | make defconfig 389 | 390 | ./scripts/feeds install -d y -f -a 391 | make package/aliyundrive-webdav/compile V=s 392 | make package/luci-app-aliyundrive-webdav/compile V=s 393 | tree bin/packages/ 394 | - name: Archive package 395 | uses: actions/upload-artifact@v3 396 | with: 397 | name: aliyundrive-webdav-openwrt-${{ matrix.target.arch }} 398 | path: openwrt-sdk/bin/packages/*/aliyundrive/aliyundrive-webdav*.ipk 399 | if-no-files-found: error 400 | - name: Archive luci packages 401 | uses: actions/upload-artifact@v3 402 | if: ${{ matrix.target.arch == 'aarch64_generic' }} 403 | with: 404 | name: aliyundrive-webdav-openwrt-${{ matrix.target.arch }} 405 | path: openwrt-sdk/bin/packages/*/aliyundrive/luci-*.ipk 406 | if-no-files-found: error 407 | - name: Upload package to GitHub Release 408 | uses: softprops/action-gh-release@v1 409 | if: "startsWith(github.ref, 'refs/tags/')" 410 | with: 411 | files: | 412 | openwrt-sdk/bin/packages/*/aliyundrive/aliyundrive-webdav*.ipk 413 | prerelease: ${{ contains(github.ref, 'alpha') || contains(github.ref, 'beta') }} 414 | generate_release_notes: true 415 | - name: Upload luci packages to GitHub Release 416 | uses: softprops/action-gh-release@v1 417 | if: ${{ startsWith(github.ref, 'refs/tags/') && matrix.target.arch == 'aarch64_generic' }} 418 | with: 419 | files: | 420 | openwrt-sdk/bin/packages/*/aliyundrive/luci-*.ipk 421 | prerelease: ${{ contains(github.ref, 'alpha') || contains(github.ref, 'beta') }} 422 | generate_release_notes: true 423 | 424 | openwrt-gl-inet: 425 | name: OpenWrt Package - ${{ matrix.target.arch }} 426 | runs-on: ubuntu-latest 427 | needs: [ linux, linux-others ] 428 | environment: OpenWrt 429 | strategy: 430 | fail-fast: false 431 | matrix: 432 | target: 433 | - arch: siflower-1806 434 | path: sdk/1806/siflower 435 | steps: 436 | - uses: actions/checkout@v3 437 | - name: Install build requirements 438 | run: | 439 | set -e 440 | sudo apt-get update 441 | sudo apt-get install -y build-essential ccache ecj fastjar file g++ gawk \ 442 | gettext git java-propose-classpath libelf-dev libncurses5-dev \ 443 | libncursesw5-dev libssl-dev python2.7-dev python3 unzip wget \ 444 | python3-distutils python3-setuptools python3-dev rsync subversion \ 445 | swig time xsltproc zlib1g-dev 446 | - name: Install OpenWrt SDK 447 | run: | 448 | set -e 449 | git clone https://github.com/gl-inet/sdk.git openwrt-sdk 450 | cd openwrt-sdk 451 | ./download.sh ${{ matrix.target.arch }} 452 | - name: Build Package 453 | run: | 454 | set -e 455 | cd openwrt-sdk/${{ matrix.target.path }} 456 | ln -s $GITHUB_WORKSPACE/openwrt/aliyundrive-webdav package/aliyundrive-webdav 457 | 458 | echo 'CONFIG_PACKAGE_aliyundrive-webdav=y 459 | CONFIG_PACKAGE_luci-app-aliyundrive-webdav=y 460 | ' >> .config 461 | cp feeds.conf.default feeds.conf 462 | cat feeds.conf 463 | 464 | ./scripts/feeds update -a > /dev/null 465 | make defconfig 466 | 467 | ./scripts/feeds install -d y -f -a 468 | make package/aliyundrive-webdav/compile V=s 469 | tree bin/packages/ 470 | - name: Archive package 471 | uses: actions/upload-artifact@v3 472 | with: 473 | name: aliyundrive-webdav-openwrt-${{ matrix.target.arch }} 474 | path: openwrt-sdk/${{ matrix.target.path }}/bin/packages/*/base/aliyundrive-webdav*.ipk 475 | if-no-files-found: error 476 | - name: Upload package to GitHub Release 477 | uses: softprops/action-gh-release@v1 478 | if: "startsWith(github.ref, 'refs/tags/')" 479 | with: 480 | files: | 481 | openwrt-sdk/${{ matrix.target.path }}/bin/packages/*/base/aliyundrive-webdav*.ipk 482 | prerelease: ${{ contains(github.ref, 'alpha') || contains(github.ref, 'beta') }} 483 | generate_release_notes: true 484 | 485 | docker: 486 | name: Build Docker Image 487 | runs-on: ubuntu-latest 488 | needs: [ linux ] 489 | environment: Docker Hub 490 | steps: 491 | - uses: actions/checkout@v3 492 | - uses: actions/download-artifact@v3 493 | with: 494 | name: x86_64-unknown-linux-musl-bin 495 | - run: | 496 | chmod a+x aliyundrive-webdav 497 | mv aliyundrive-webdav aliyundrive-webdav-amd64 498 | - uses: actions/download-artifact@v3 499 | with: 500 | name: i686-unknown-linux-musl-bin 501 | - run: | 502 | chmod a+x aliyundrive-webdav 503 | mv aliyundrive-webdav aliyundrive-webdav-386 504 | - uses: actions/download-artifact@v3 505 | with: 506 | name: aarch64-unknown-linux-musl-bin 507 | - run: | 508 | chmod a+x aliyundrive-webdav 509 | mv aliyundrive-webdav aliyundrive-webdav-arm64 510 | - uses: actions/download-artifact@v3 511 | with: 512 | name: armv7-unknown-linux-musleabihf-bin 513 | - run: | 514 | chmod a+x aliyundrive-webdav 515 | mv aliyundrive-webdav aliyundrive-webdav-armv7 516 | - uses: actions/download-artifact@v3 517 | with: 518 | name: arm-unknown-linux-musleabihf-bin 519 | - run: | 520 | chmod a+x aliyundrive-webdav 521 | mv aliyundrive-webdav aliyundrive-webdav-armv6 522 | - name: Docker meta 523 | id: meta 524 | uses: docker/metadata-action@v3 525 | with: 526 | images: | 527 | messense/aliyundrive-webdav 528 | ghcr.io/messense/aliyundrive-webdav 529 | tags: | 530 | type=schedule 531 | type=ref,event=branch 532 | type=ref,event=pr 533 | type=semver,pattern={{version}} 534 | type=semver,pattern={{major}}.{{minor}} 535 | type=semver,pattern={{major}} 536 | type=sha 537 | - name: Setup QEMU 538 | uses: dbhi/qus/action@main 539 | - name: Setup Docker Buildx 540 | uses: docker/setup-buildx-action@v2 541 | - name: Login to DockerHub 542 | if: github.event_name != 'pull_request' 543 | uses: docker/login-action@v2 544 | with: 545 | username: ${{ secrets.DOCKER_USERNAME }} 546 | password: ${{ secrets.DOCKER_TOKEN }} 547 | - name: Login to GitHub Container Registry 548 | if: github.event_name != 'pull_request' 549 | uses: docker/login-action@v2 550 | with: 551 | registry: ghcr.io 552 | username: ${{ github.repository_owner }} 553 | password: ${{ secrets.GITHUB_TOKEN }} 554 | - name: docker build 555 | uses: docker/build-push-action@v3 556 | with: 557 | context: . 558 | platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 559 | push: ${{ github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) }} 560 | tags: ${{ steps.meta.outputs.tags }} 561 | labels: ${{ steps.meta.outputs.labels }} 562 | - name: Docker Hub Description 563 | if: github.event_name != 'pull_request' 564 | uses: peter-evans/dockerhub-description@v3 565 | with: 566 | username: ${{ secrets.DOCKER_USERNAME }} 567 | password: ${{ secrets.DOCKER_TOKEN }} 568 | repository: messense/aliyundrive-webdav 569 | 570 | release: 571 | name: Release 572 | runs-on: ubuntu-latest 573 | environment: 574 | name: PyPI 575 | url: https://pypi.org/project/aliyundrive-webdav/ 576 | if: "startsWith(github.ref, 'refs/tags/')" 577 | needs: [ linux, macos ] 578 | steps: 579 | - uses: actions/download-artifact@v3 580 | with: 581 | name: wheels 582 | - uses: actions/setup-python@v4 583 | with: 584 | python-version: 3.9 585 | - name: Publish to PyPI 586 | env: 587 | TWINE_USERNAME: __token__ 588 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 589 | run: | 590 | pip install --upgrade twine 591 | twine upload --skip-existing * 592 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /dist 3 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | ## 2.3.0 6 | 7 | * 增加资源库支持 8 | 9 | ## 2.1.0 10 | 11 | * 增加 `--redirect` 参数用于启用 302 重定向 12 | 13 | ## 2.0.0 14 | 15 | * 切换到阿里云盘开放平台接口 16 | * 移除 Koolshare 梅林固件路由器平台支持 17 | 18 | ## 1.11.0 19 | 20 | * 移除阿里云 PDS 服务支持 21 | 22 | ## 1.10.1 23 | 24 | * 修复使用 Web 版 refresh token 时下载被错误 302 重定向的问题 25 | 26 | ## 1.10.0 27 | 28 | * 使用 App refresh token 下载时默认 302 重定向而不需要中转 29 | 30 | ## 1.9.0 31 | 32 | * 增加使用 HTTP 协议下载配置,低端设备中转时降低资源消耗 33 | 34 | ## 1.8.9 35 | 36 | * 修复上传大文件时上传地址过期的问题 37 | 38 | ## 1.8.8 39 | 40 | * 修复开启 TLS 后清除缓存导致进程 crash 的问题 41 | 42 | ## 1.8.7 43 | 44 | * 复制/删除文件夹时清除原文件夹缓存内容 45 | 46 | ## 1.8.6 47 | 48 | * 修复重命名文件夹时原文件夹缓存内容未清除的问题 49 | 50 | ## 1.8.5 51 | 52 | * 支持 rclone 以 Nextcloud WebDAV 模式上传时跳过上传相同 sha1 哈希值文件 53 | 54 | ## 1.8.4 55 | 56 | * 支持 rclone 以 OwnCloud/Nextcloud WebDAV 模式挂载时返回 sha1 checksum 57 | 58 | ## 1.8.3 59 | 60 | * 优化上传文件完成目录缓存失效策略 61 | 62 | ## 1.8.2 63 | 64 | * 修复读取目录在阿里云盘接口请求错误时返回 404 的问题 65 | * OpenWrt 界面增加清除缓存功能 66 | 67 | ## 1.8.1 68 | 69 | * 增加调试模式 HTTP 请求日志输出 70 | 71 | ## 1.8.0 72 | 73 | * 增加配置上传文件缓冲区大小参数 `--upload-buffer-size` 74 | * 增加配置跳过上传相同大小同名文件参数 `--skip-upload-same-size`, 注意启用该选项虽然能加速上传但可能会导致修改过的同样大小的文件不会被上传 75 | 76 | ## 1.7.4 77 | 78 | * 删除文件时忽略 404 和 400 状态码 79 | * 修复梅林 arm384/arm386 使用 usb2jffs 插件后安装报错 `bad number` 问题 80 | * 上传文件出错时日志中增加更详细的错误信息 81 | 82 | ## 1.7.3 83 | 84 | * 调用云盘接口增加自动重试机制 85 | 86 | ## 1.7.2 87 | 88 | * 增加 socks5 代理支持 89 | 90 | ## 1.7.1 91 | 92 | * OpenWrt Luci 配置界面增加扫码登录获取 refresh token 功能 93 | 94 | ## 1.7.0 95 | 96 | * 梅林 384/386 固件禁用程序自动更新 97 | * 默认使用 App refresh token 刷新接口 98 | * 增加 `aliyundrive-webdav qr` 子命令 99 | 100 | ## 1.6.2 101 | 102 | * 非 tty 终端模式下不尝试扫码登录 103 | 104 | ## 1.6.1 105 | 106 | * 降低自动更新失败日志级别为警告 107 | 108 | ## 1.6.0 109 | 110 | * 增加自动更新功能 111 | 112 | ## 1.5.1 113 | 114 | * 修复 Web 版 refresh token 刷新失败问题 115 | 116 | ## 1.5.0 117 | 118 | * 增加移动端 App refresh token 支持,扫码登录使用 App refresh token. 119 | 120 | ## 1.4.0 121 | 122 | * 命令行增加阿里云盘扫码登录功能 123 | 124 | ## 1.3.3 125 | 126 | * 增加 `--strip-prefix` 参数 127 | 128 | ## 1.3.2 129 | 130 | * 不使用阿里云盘文件列表接口返回的可能有问题的图片下载地址 131 | 132 | ## 1.3.1 133 | 134 | * 降低获取文件下载地址接口调用次数 135 | 136 | ## 1.3.0 137 | 138 | * 支持下载 `.livp` 格式文件 139 | 140 | ## 1.2.7 141 | 142 | * 修复下载部分文件类型如 `.livp` 500 报错问题,由于阿里云盘接口没有返回 `.livp` 文件格式下载地址,暂时无法下载该格式文件 143 | 144 | ## 1.2.6 145 | 146 | * 指定 `--workdir` 参数时 `--refresh-token` 参数可选 147 | 148 | ## 1.2.5 149 | 150 | * 修复 Windows 版本访问文件 404 问题 151 | 152 | ## 1.2.4 153 | 154 | * 修正 OpenWrt package autorelease 版本号 155 | 156 | ## 1.2.3 157 | 158 | * 增加 Windows arm64 架构支持 159 | 160 | ## 1.2.2 161 | 162 | * TLS/HTTPS 支持 RSA 私钥格式 163 | 164 | ## 1.2.1 165 | 166 | * 支持 OpenWrt 19.07 167 | 168 | ## 1.2.0 169 | 170 | * 增加 TLS/HTTPS 支持(暂不支持 MIPS 架构) 171 | * 增加 HTTP 2.0 支持 172 | * 修复 Docker 容器设置 `HOST` 环境变量不生效的问题 173 | * 增加构建发布 deb 和 rpm 包 174 | 175 | ## 1.1.1 176 | 177 | * 修复潜在的内存泄漏问题 178 | 179 | ## 1.1.0 180 | 181 | * 增加只读模式,防止误操作删除文件 182 | 183 | ## 1.0.0 184 | 185 | * 调整连接池 idle 检测时间,避免下载文件时出现 `connection closed before message 186 | completed` 报错 187 | * 功能相对稳定,发布 1.0 版本。 188 | 189 | ## 0.5.5 190 | 191 | * 降级 OpenSSL 修复 MIPS 架构二进制文件无法正常运行的问题 192 | 193 | ## 0.5.4 194 | 195 | * 刷新 refresh token 增加 429 状态码重试 196 | 197 | ## 0.5.3 198 | 199 | * 完善请求重试,处理请求 408、429 报错 200 | 201 | ## 0.5.2 202 | 203 | * 增加 `arm_cortex-a5_vfpv4` 架构 OpenWrt 包(玩客云适用) 204 | 205 | ## 0.5.1 206 | 207 | * 修复 OpenWrt Luci 界面语言翻译问题 208 | 209 | ## 0.5.0 210 | 211 | * 增加实验性[阿里云相册与网盘服务(PDS)](https://www.aliyun.com/product/storage/pds)支持,阿里云网站开通 PDS 服务后可通过传入 `domain_id` 和对应用户的 `refresh_token`(可通过访问 BasicUI 获取) 使用。 212 | 213 | ## 0.4.8 214 | 215 | * 支持通过环境变量 `HOST` 和 `PORT` 配置监听地址和端口 216 | 217 | ## 0.4.7 218 | 219 | * 发布 musllinux wheel 二进制包至 PyPI 220 | 221 | ## 0.4.6 222 | 223 | * 自动尝试刷新过期的上传地址 224 | * GitHub Release 产物文件名增加版本号 225 | 226 | ## 0.4.5 227 | 228 | * 兼容 macOS Finder chunked encoding 上传 `X-Expected-Entity-Length` HTTP header 229 | 230 | ## 0.4.4 231 | 232 | * 新增目录缓存过期时间参数配置 233 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aliyundrive-webdav" 3 | version = "2.3.3" 4 | edition = "2021" 5 | description = "WebDAV server for AliyunDrive" 6 | license = "MIT" 7 | homepage = "https://github.com/messense/aliyundrive-webdav" 8 | repository = "https://github.com/messense/aliyundrive-webdav.git" 9 | readme = "README.md" 10 | 11 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 12 | 13 | [dependencies] 14 | anyhow = "1.0.75" 15 | bytes = "1.5.0" 16 | clap = { version = "4.3.19", features = ["derive", "env", "wrap_help"] } 17 | dashmap = "5.5.3" 18 | dav-server = { version = "0.5.5", default-features = false, features = ["hyper"] } 19 | dirs = "5.0.1" 20 | futures-util = "0.3" 21 | headers = "0.3.6" 22 | hyper = { version = "0.14.27", features = ["server", "http2"] } 23 | moka = { version = "0.11.3", default-features = false, features = ["future"] } 24 | openssl-probe = { version = "0.1.4", optional = true } 25 | path-slash = "0.2.0" 26 | reqwest = { version = "0.11.24", default-features = false, features = ["json", "gzip", "cookies", "socks"] } 27 | reqwest-middleware = "0.2.4" 28 | reqwest-retry = "0.2.0" 29 | serde = { version = "1.0.168", features = ["derive"] } 30 | time = { version = "0.3", features = ["formatting", "parsing"] } 31 | tokio = { version = "1.28.2", features = ["rt-multi-thread", "io-util", "net", "time", "sync", "macros", "parking_lot", "fs"] } 32 | tracing = "0.1" 33 | tracing-subscriber = { version = "0.3", features = ["env-filter", "time", "local-time"] } 34 | url = "2.4.0" 35 | zip = { version = "0.6.4", default-features = false } 36 | base64 = "0.21.5" 37 | serde_json = "1.0.107" 38 | atty = "0.2.14" 39 | qr2term = "0.3.1" 40 | self_update = { version = "0.37.0", default-features = false, features = ["archive-zip", "archive-tar", "compression-flate2", "compression-zip-deflate"] } 41 | 42 | # TLS server support 43 | rustls-pemfile = { version = "1.0.0", optional = true } 44 | tls-listener = { version = "0.7.0", features = ["hyper-h1", "hyper-h2", "rt"], optional = true } 45 | tokio-rustls = { version = "0.24.0", optional = true } 46 | 47 | # Unix signal support 48 | [target.'cfg(unix)'.dependencies] 49 | signal-hook = "0.3.14" 50 | signal-hook-tokio = { version = "0.3.1", features = ["futures-v0_3"] } 51 | 52 | [features] 53 | default = ["rustls-tls", "atomic64"] 54 | rustls-tls = ["reqwest/rustls-tls", "rustls-pemfile", "tls-listener/rustls", "hyper/stream", "tokio-rustls", "self_update/rustls"] 55 | native-tls = ["reqwest/native-tls"] 56 | native-tls-vendored = ["reqwest/native-tls-vendored", "openssl-probe"] 57 | atomic64 = ["moka/atomic64"] 58 | 59 | [profile.release] 60 | lto = true 61 | 62 | [package.metadata.deb] 63 | maintainer = "messense " 64 | copyright = "2021-present, messense " 65 | license-file = ["LICENSE", "4"] 66 | extended-description = """\ 67 | WebDAV server for AliyunDrive""" 68 | section = "utility" 69 | priority = "optional" 70 | assets = [ 71 | ["target/release/aliyundrive-webdav", "usr/bin/", "755"], 72 | ["systemd.service", "etc/systemd/system/aliyundrive-webdav.service", "644"], 73 | ] 74 | 75 | [package.metadata.generate-rpm] 76 | assets = [ 77 | { source = "target/release/aliyundrive-webdav", dest = "/usr/bin/aliyundrive-webdav", mode = "0755" }, 78 | { source = "LICENSE", dest = "/usr/share/doc/aliyundrive-webdav/LICENSE", doc = true, mode = "0644" }, 79 | { source = "systemd.service", dest = "/etc/systemd/system/aliyundrive-webdav.service", config = true, mode = "0644" }, 80 | ] 81 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:latest 2 | ARG TARGETARCH 3 | ARG TARGETVARIANT 4 | RUN apk --no-cache add ca-certificates tini 5 | RUN apk add tzdata && \ 6 | cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ 7 | echo "Asia/Shanghai" > /etc/timezone && \ 8 | apk del tzdata 9 | 10 | RUN mkdir -p /etc/aliyundrive-webdav 11 | WORKDIR /root/ 12 | ADD aliyundrive-webdav-$TARGETARCH$TARGETVARIANT /usr/bin/aliyundrive-webdav 13 | 14 | ENV NO_SELF_UPGRADE 1 15 | 16 | ENTRYPOINT ["/sbin/tini", "--"] 17 | CMD ["/usr/bin/aliyundrive-webdav", "--auto-index", "--workdir", "/etc/aliyundrive-webdav"] 18 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2021-present Messense Lv 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 9 | of the Software, and to permit persons to whom the Software is furnished to do 10 | so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # aliyundrive-webdav 2 | 3 | [![GitHub Actions](https://github.com/messense/aliyundrive-webdav/workflows/CI/badge.svg)](https://github.com/messense/aliyundrive-webdav/actions?query=workflow%3ACI) 4 | [![PyPI](https://img.shields.io/pypi/v/aliyundrive-webdav.svg)](https://pypi.org/project/aliyundrive-webdav) 5 | [![Docker Image](https://img.shields.io/docker/pulls/messense/aliyundrive-webdav.svg?maxAge=2592000)](https://hub.docker.com/r/messense/aliyundrive-webdav/) 6 | [![aliyundrive-webdav](https://snapcraft.io/aliyundrive-webdav/badge.svg)](https://snapcraft.io/aliyundrive-webdav) 7 | [![Crates.io](https://img.shields.io/crates/v/aliyundrive-webdav.svg)](https://crates.io/crates/aliyundrive-webdav) 8 | 9 | > 🚀 Help me to become a full-time open-source developer by [sponsoring me on GitHub](https://github.com/sponsors/messense) 10 | 11 | 阿里云盘 WebDAV 服务,主要使用场景为配合支持 WebDAV 协议的客户端 App 如 [Infuse](https://firecore.com/infuse)、[nPlayer](https://nplayer.com) 12 | 等实现在电视上直接观看云盘视频内容, 支持客户端 App 直接从阿里云盘获取文件播放而不经过运行本应用的服务器中转, 支持上传文件,但受限于 WebDAV 协议不支持文件秒传。 13 | 14 | **请注意:V2 版本基于阿里云盘开放平台接口实现,不再支持阿里云盘 Web 和 App 版本获取的 refresh token。** 15 | **由于本项目作者不再使用梅林固件,V2 版本不再免费支持 Koolshare 梅林固件系统,如有需要请考虑[付费支持](https://github.com/messense/aliyundrive-webdav/discussions/778)。** 16 | 17 | 如果项目对你有帮助,请考虑[捐赠支持](https://github.com/messense/aliyundrive-webdav/discussions/126)项目持续维护。 18 | 也可以考虑加入[aliyundrive-webdav 知识星球](https://t.zsxq.com/0c9sq6Ca8)获取咨询和技术支持服务。 19 | 20 | > **Note** 21 | > 22 | > 本项目作者没有上传需求, 故上传文件功能测试不全面且没有持续迭代计划. 23 | 24 | ## 安装 25 | 26 | 可以从 [GitHub Releases](https://github.com/messense/aliyundrive-webdav/releases) 页面下载预先构建的二进制包, 也可以使用 pip 从 PyPI 下载: 27 | 28 | ```bash 29 | pip install aliyundrive-webdav 30 | ``` 31 | 32 | 如果系统支持 [Snapcraft](https://snapcraft.io) 比如 Ubuntu、Debian 等,也可以使用 snap 安装: 33 | 34 | ```bash 35 | sudo snap install aliyundrive-webdav 36 | ``` 37 | 38 | ### OpenWrt 路由器 39 | 40 | [GitHub Releases](https://github.com/messense/aliyundrive-webdav/releases) 中有预编译的 ipk 文件, 目前提供了 41 | aarch64/arm/mipsel/x86_64/i686 等架构的版本,可以下载后使用 opkg 安装,以 nanopi r4s 为例: 42 | 43 | ```bash 44 | wget https://github.com/messense/aliyundrive-webdav/releases/download/v2.3.3/aliyundrive-webdav_2.3.3-1_aarch64_generic.ipk 45 | wget https://github.com/messense/aliyundrive-webdav/releases/download/v2.3.3/luci-app-aliyundrive-webdav_2.3.3_all.ipk 46 | wget https://github.com/messense/aliyundrive-webdav/releases/download/v2.3.3/luci-i18n-aliyundrive-webdav-zh-cn_2.3.3-1_all.ipk 47 | opkg install aliyundrive-webdav_2.3.3-1_aarch64_generic.ipk 48 | opkg install luci-app-aliyundrive-webdav_2.3.3_all.ipk 49 | opkg install luci-i18n-aliyundrive-webdav-zh-cn_2.3.3-1_all.ipk 50 | ``` 51 | 52 | 其它 CPU 架构的路由器可在 [GitHub Releases](https://github.com/messense/aliyundrive-webdav/releases) 页面中查找对应的架构的主程序 ipk 文件下载安装, 常见 53 | OpenWrt 路由器 CPU 架构如下表(欢迎补充): 54 | 55 | | 路由器 | CPU 架构 | 56 | | ------------ | ------------------ | 57 | | nanopi r4s | aarch64_generic | 58 | | 小米 AX3600 | aarch64_cortex-a53 | 59 | | 斐讯 N1 盒子 | aarch64_cortex-a53 | 60 | | Newifi D2 | mipsel_24kc | 61 | | Pogoplug | arm_mpcore | 62 | 63 | > Tips: 不清楚 CPU 架构类型可通过运行 `opkg print-architecture` 命令查询。 64 | 65 | ![OpenWrt 配置界面](./doc/openwrt.png) 66 | 67 | ## Docker 运行 68 | 69 | ```bash 70 | docker run -d --name=aliyundrive-webdav --restart=unless-stopped -p 8080:8080 \ 71 | -v /etc/aliyundrive-webdav/:/etc/aliyundrive-webdav/ \ 72 | -e REFRESH_TOKEN='your refresh token' \ 73 | -e WEBDAV_AUTH_USER=admin \ 74 | -e WEBDAV_AUTH_PASSWORD=admin \ 75 | messense/aliyundrive-webdav 76 | ``` 77 | 78 | 其中,`REFRESH_TOKEN` 环境变量为你的阿里云盘 `refresh_token`,`WEBDAV_AUTH_USER` 79 | 和 `WEBDAV_AUTH_PASSWORD` 为连接 WebDAV 服务的用户名和密码。 80 | 81 | ### QNAP 威联通 NAS 82 | 83 | #### QNAP 插件 84 | 85 | [qnap-aliyunpan-webdav](https://github.com/iranee/qnap-aliyunpan-webdav) by 86 | [@iranee](https://github.com/iranee). 87 | 88 | #### ContainerStation (Docker) 89 | 90 | 管理员登陆 NAS 后安装 ContainerStation 并启动服务,在 Management (管理) 标签中 Create Application (新建应用),配置如下 91 | 92 | ```yaml 93 | version: '3.3' 94 | services: 95 | aliyundrive-webdav: 96 | container_name: aliyundrive-webdav 97 | restart: unless-stopped 98 | ports: 99 | - '8080:8080' 100 | environment: 101 | - 'REFRESH_TOKEN=mytoken...' 102 | image: messense/aliyundrive-webdav 103 | ``` 104 | 105 | 其中 `REFRESH_TOKEN` 文档最下面说明;`:8080` 网盘访问映射端口,可以按需改为其他的。 106 | 107 | 点击 Create (创建)后启动,访问 http://nas地址:8080/ 即可看到你网盘的自动生成索引网页文件。 108 | 109 | 参考文档 110 | 111 | - https://docs.docker.com/compose/ 112 | - https://www.composerize.com/ 113 | 114 | ## rclone 115 | 116 | 为了避免重复上传文件,使用 rclone 时推荐使用 [Nextcloud WebDAV](https://rclone.org/webdav/#nextcloud) 模式,可以支持 sha1 checksums. 117 | 另外需要配合 `--no-update-modtime` 参数,否则 rclone 为了更新文件修改时间还是会强制重新上传。 118 | 119 | 举个例子: 120 | 121 | ```bash 122 | rclone --no-update-modtime copy abc.pdf aliyundrive-nc://docs/ 123 | ``` 124 | 125 | ## 获取 refresh token 126 | 127 | * [通过在线工具获取 refresh token](https://messense-aliyundrive-webdav-backendrefresh-token-ucs0wn.streamlit.app/) 128 | * 命令行运行 `aliyundrive-webdav qr login` 扫码授权后会输出 refresh token 129 | 130 | ![扫码获取 refresh token](./doc/refresh-token.png) 131 | 132 | ## 命令行用法 133 | 134 | ```bash 135 | $ aliyundrive-webdav --help 136 | WebDAV server for AliyunDrive 137 | 138 | Usage: aliyundrive-webdav [OPTIONS] 139 | aliyundrive-webdav 140 | 141 | Commands: 142 | qr Scan QRCode 143 | help Print this message or the help of the given subcommand(s) 144 | 145 | Options: 146 | --host 147 | Listen host 148 | 149 | [env: HOST=] 150 | [default: 0.0.0.0] 151 | 152 | -p, --port 153 | Listen port 154 | 155 | [env: PORT=] 156 | [default: 8080] 157 | 158 | --client-id 159 | Aliyun drive client_id 160 | 161 | [env: CLIENT_ID=] 162 | 163 | --client-secret 164 | Aliyun drive client_secret 165 | 166 | [env: CLIENT_SECRET=] 167 | 168 | --drive-type 169 | Aliyun drive type 170 | 171 | [env: DRIVE_TYPE=] 172 | 173 | Possible values: 174 | - resource: Resource drive 175 | - backup: Backup drive 176 | - default: Default drive 177 | 178 | -r, --refresh-token 179 | Aliyun drive refresh token 180 | 181 | [env: REFRESH_TOKEN=] 182 | 183 | -U, --auth-user 184 | WebDAV authentication username 185 | 186 | [env: WEBDAV_AUTH_USER=] 187 | 188 | -W, --auth-password 189 | WebDAV authentication password 190 | 191 | [env: WEBDAV_AUTH_PASSWORD=] 192 | 193 | -I, --auto-index 194 | Automatically generate index.html 195 | 196 | -S, --read-buffer-size 197 | Read/download buffer size in bytes, defaults to 10MB 198 | 199 | [default: 10485760] 200 | 201 | --upload-buffer-size 202 | Upload buffer size in bytes, defaults to 16MB 203 | 204 | [default: 16777216] 205 | 206 | --cache-size 207 | Directory entries cache size 208 | 209 | [default: 1000] 210 | 211 | --cache-ttl 212 | Directory entries cache expiration time in seconds 213 | 214 | [default: 600] 215 | 216 | --root 217 | Root directory path 218 | 219 | [env: WEBDAV_ROOT=] 220 | [default: /] 221 | 222 | -w, --workdir 223 | Working directory, refresh_token will be stored in there if specified 224 | 225 | --no-trash 226 | Delete file permanently instead of trashing it 227 | 228 | --read-only 229 | Enable read only mode 230 | 231 | --tls-cert 232 | TLS certificate file path 233 | 234 | [env: TLS_CERT=] 235 | 236 | --tls-key 237 | TLS private key file path 238 | 239 | [env: TLS_KEY=] 240 | 241 | --strip-prefix 242 | Prefix to be stripped off when handling request 243 | 244 | [env: WEBDAV_STRIP_PREFIX=] 245 | 246 | --debug 247 | Enable debug log 248 | 249 | --no-self-upgrade 250 | Disable self auto upgrade 251 | 252 | --skip-upload-same-size 253 | Skip uploading same size file 254 | 255 | --prefer-http-download 256 | Prefer downloading using HTTP protocol 257 | 258 | --redirect 259 | Enable 302 redirect when possible 260 | 261 | -h, --help 262 | Print help (see a summary with '-h') 263 | 264 | -V, --version 265 | Print version 266 | ``` 267 | 268 | > **Note** 269 | > 270 | > 注意:TLS/HTTPS 暂不支持 MIPS 架构。 271 | 272 | > **Note** 273 | > 274 | > 注意:启用 `--skip-upload-same-size` 选项虽然能加速上传但可能会导致修改过的同样大小的文件不会被上传 275 | 276 | ## License 277 | 278 | This work is released under the MIT license. A copy of the license is provided in the [LICENSE](./LICENSE) file. 279 | -------------------------------------------------------------------------------- /backend/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /backend/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aliyundrive-webdav-oauth" 3 | version = "0.1.0" 4 | edition = "2024" 5 | 6 | [dependencies] 7 | tokio = { version = "1", features = ["full"] } 8 | axum = { version = "0.8", features = ["http2"] } 9 | serde = { version = "1.0", features = ["derive"] } 10 | serde_json = "1.0" 11 | reqwest = { version = "0.12", features = ["json", "default-tls"] } 12 | -------------------------------------------------------------------------------- /backend/Dockerfile: -------------------------------------------------------------------------------- 1 | # Stage 1: Build the Rust application 2 | FROM rust:1.86-slim AS builder 3 | 4 | WORKDIR /app 5 | 6 | # Install build dependencies 7 | RUN apt-get update && \ 8 | apt-get install -y pkg-config libssl-dev && \ 9 | apt-get clean && \ 10 | rm -rf /var/lib/apt/lists/* 11 | 12 | # Copy the actual source code and build the application 13 | COPY . . 14 | RUN cargo build --release 15 | 16 | # Stage 2: Create the runtime image 17 | FROM debian:bookworm-slim 18 | 19 | # Install runtime dependencies 20 | RUN apt-get update && \ 21 | apt-get install -y --no-install-recommends ca-certificates tini tzdata && \ 22 | cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ 23 | echo "Asia/Shanghai" > /etc/timezone && \ 24 | apt-get remove -y tzdata && \ 25 | apt-get autoremove -y && \ 26 | apt-get clean && \ 27 | rm -rf /var/lib/apt/lists/* 28 | 29 | WORKDIR /root/ 30 | 31 | # Copy the binary from builder stage 32 | COPY --from=builder /app/target/release/aliyundrive-webdav-oauth /usr/bin/aliyundrive-webdav-oauth 33 | 34 | CMD ["/usr/bin/aliyundrive-webdav-oauth"] 35 | -------------------------------------------------------------------------------- /backend/fly.toml: -------------------------------------------------------------------------------- 1 | # fly.toml app configuration file generated for aliyundrive-oauth on 2023-05-09T22:23:03+08:00 2 | # 3 | # See https://fly.io/docs/reference/configuration/ for information about how to use this file. 4 | # 5 | 6 | app = "aliyundrive-oauth" 7 | primary_region = "hkg" 8 | kill_signal = "SIGINT" 9 | kill_timeout = "5s" 10 | 11 | [experimental] 12 | auto_rollback = true 13 | 14 | [build] 15 | image = "messense/aliyundrive-oauth:latest" 16 | 17 | [[vm]] 18 | size = "shared-cpu-4x" 19 | memory = "1gb" 20 | 21 | [[services]] 22 | protocol = "tcp" 23 | internal_port = 8080 24 | processes = ["app"] 25 | 26 | [[services.ports]] 27 | port = 80 28 | handlers = ["http"] 29 | force_https = true 30 | 31 | [[services.ports]] 32 | port = 443 33 | handlers = ["tls", "http"] 34 | [services.concurrency] 35 | type = "connections" 36 | hard_limit = 1500 37 | soft_limit = 1000 38 | 39 | [[services.tcp_checks]] 40 | interval = "15s" 41 | timeout = "2s" 42 | grace_period = "1s" 43 | restart_limit = 0 44 | -------------------------------------------------------------------------------- /backend/refresh_token.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import httpx 4 | import streamlit as st 5 | 6 | 7 | session = httpx.AsyncClient() 8 | 9 | 10 | async def get_qrcode_status(sid: str) -> dict: 11 | res = await session.get( 12 | f"https://openapi.aliyundrive.com/oauth/qrcode/{sid}/status" 13 | ) 14 | res.raise_for_status() 15 | return res.json() 16 | 17 | 18 | async def get_refresh_token(code: str) -> str: 19 | res = await session.post( 20 | "https://aliyundrive-oauth.messense.me/oauth/access_token", 21 | json={ 22 | "grant_type": "authorization_code", 23 | "code": code, 24 | }, 25 | ) 26 | res.raise_for_status() 27 | data = res.json() 28 | refresh_token = data["refresh_token"] 29 | return refresh_token 30 | 31 | 32 | async def main(): 33 | st.set_page_config( 34 | page_title="aliyundrive-webdav refresh token 获取工具", 35 | layout="wide", 36 | ) 37 | st.title("aliyundrive-webdav refresh token 获取") 38 | st.markdown( 39 | "👏 欢迎加入 [aliyundrive-webdav 知识星球](https://t.zsxq.com/0c9sq6Ca8)获取咨询和技术支持服务" 40 | ) 41 | 42 | qrcode_tab, authcode_tab = st.tabs(["扫码授权", "authCode"]) 43 | 44 | with qrcode_tab: 45 | if st.button("点击获取扫码登录二维码"): 46 | res = await session.post( 47 | "https://aliyundrive-oauth.messense.me/oauth/authorize/qrcode", 48 | json={ 49 | "scopes": ["user:base", "file:all:read", "file:all:write"], 50 | "width": 300, 51 | "height": 300, 52 | }, 53 | ) 54 | data = res.json() 55 | sid = data["sid"] 56 | qrcode_url = data["qrCodeUrl"] 57 | st.image(qrcode_url, caption="使用阿里云盘 App 扫码") 58 | 59 | refresh_token = None 60 | with st.spinner("等待扫码授权中..."): 61 | while True: 62 | try: 63 | data = await get_qrcode_status(sid) 64 | except httpx.ConnectTimeout: 65 | st.error( 66 | "查询扫码结果超时, 可能是触发了阿里云盘接口限制, 请稍后再试.\n" 67 | "或者自行尝试轮询此接口后切换到 authCode tab 获取 refresh token: " 68 | f"https://openapi.aliyundrive.com/oauth/qrcode/{sid}/status", 69 | icon="🚨", 70 | ) 71 | break 72 | 73 | status = data["status"] 74 | if status == "LoginSuccess": 75 | code = data["authCode"] 76 | refresh_token = await get_refresh_token(code) 77 | break 78 | elif status == "QRCodeExpired": 79 | st.error("二维码已过期, 请刷新页面后重试", icon="🚨") 80 | break 81 | 82 | await asyncio.sleep(2) 83 | 84 | if refresh_token: 85 | st.success("refresh token 获取成功", icon="✅") 86 | st.code(refresh_token, language=None) 87 | 88 | with authcode_tab: 89 | with st.form("authCode"): 90 | code = st.text_input("authCode", help="填入 authCode") 91 | submitted = st.form_submit_button("提交") 92 | if submitted and code: 93 | try: 94 | refresh_token = await get_refresh_token(code) 95 | st.success("refresh token 获取成功", icon="✅") 96 | st.code(refresh_token, language=None) 97 | except KeyError: 98 | st.error("无效的 authCode, 请重新获取", icon="🚨") 99 | 100 | 101 | if __name__ == "__main__": 102 | try: 103 | import uvloop 104 | except ImportError: 105 | pass 106 | else: 107 | uvloop.install() 108 | 109 | asyncio.run(main()) 110 | -------------------------------------------------------------------------------- /backend/requirements.txt: -------------------------------------------------------------------------------- 1 | httpx 2 | -------------------------------------------------------------------------------- /backend/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::time::Duration; 3 | 4 | use axum::{ 5 | body::Body, 6 | extract::{Json, State}, 7 | http::{HeaderValue, StatusCode}, 8 | response::{IntoResponse, Response}, 9 | routing::post, 10 | Router, 11 | }; 12 | use reqwest::Client; 13 | use serde::Deserialize; 14 | use tokio; 15 | 16 | #[derive(Deserialize)] 17 | struct QrCodeRequest { 18 | scopes: Vec, 19 | width: Option, 20 | height: Option, 21 | } 22 | 23 | #[derive(Deserialize)] 24 | struct AuthorizationRequest { 25 | grant_type: String, 26 | code: Option, 27 | refresh_token: Option, 28 | } 29 | 30 | #[derive(Clone)] 31 | struct AppState { 32 | client: Client, 33 | } 34 | 35 | #[tokio::main] 36 | async fn main() { 37 | // Create a shared reqwest client 38 | let client = reqwest::Client::builder() 39 | .connect_timeout(Duration::from_secs(10)) 40 | .read_timeout(Duration::from_secs(30)) 41 | .build() 42 | .unwrap(); 43 | 44 | // Create the application state 45 | let state = AppState { client }; 46 | 47 | let app = Router::new() 48 | .route("/oauth/authorize/qrcode", post(qrcode)) 49 | .route("/oauth/access_token", post(access_token)) 50 | .with_state(state); 51 | 52 | let addr = "0.0.0.0:8080"; 53 | println!("Server running on {}", addr); 54 | let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); 55 | axum::serve(listener, app).await.unwrap(); 56 | } 57 | 58 | async fn qrcode( 59 | State(state): State, 60 | Json(payload): Json 61 | ) -> Result { 62 | let client_id = env::var("ALIYUNDRIVE_CLIENT_ID").unwrap_or_default(); 63 | let client_secret = env::var("ALIYUNDRIVE_CLIENT_SECRET").unwrap_or_default(); 64 | 65 | let client = &state.client; 66 | match client 67 | .post("https://openapi.aliyundrive.com/oauth/authorize/qrcode") 68 | .json(&serde_json::json!({ 69 | "client_id": client_id, 70 | "client_secret": client_secret, 71 | "scopes": payload.scopes, 72 | "width": payload.width, 73 | "height": payload.height, 74 | })) 75 | .send() 76 | .await 77 | { 78 | Ok(res) => { 79 | let status = res.status(); 80 | let headers = res.headers().clone(); 81 | let content_type = headers 82 | .get("content-type") 83 | .unwrap_or(&HeaderValue::from_static("application/json")) 84 | .to_str() 85 | .unwrap_or("application/json") 86 | .to_string(); 87 | 88 | let body = res.bytes().await.unwrap_or_default(); 89 | Ok(Response::builder() 90 | .status(status) 91 | .header("Content-Type", content_type) 92 | .body(Body::from(body)) 93 | .unwrap()) 94 | } 95 | Err(_) => return Err(StatusCode::INTERNAL_SERVER_ERROR), 96 | } 97 | } 98 | 99 | async fn access_token( 100 | State(state): State, 101 | Json(payload): Json, 102 | ) -> Result { 103 | if payload.code.is_none() && payload.refresh_token.is_none() { 104 | return Err(StatusCode::BAD_REQUEST); 105 | } 106 | 107 | let client_id = env::var("ALIYUNDRIVE_CLIENT_ID").unwrap_or_default(); 108 | let client_secret = env::var("ALIYUNDRIVE_CLIENT_SECRET").unwrap_or_default(); 109 | 110 | let client = &state.client; 111 | match client 112 | .post("https://openapi.aliyundrive.com/oauth/access_token") 113 | .json(&serde_json::json!({ 114 | "client_id": client_id, 115 | "client_secret": client_secret, 116 | "grant_type": payload.grant_type, 117 | "code": payload.code, 118 | "refresh_token": payload.refresh_token, 119 | })) 120 | .send() 121 | .await 122 | { 123 | Ok(res) => { 124 | let status = res.status(); 125 | let headers = res.headers().clone(); 126 | let content_type = headers 127 | .get("content-type") 128 | .unwrap_or(&HeaderValue::from_static("application/json")) 129 | .to_str() 130 | .unwrap_or("application/json") 131 | .to_string(); 132 | 133 | let body = res.bytes().await.unwrap_or_default(); 134 | Ok(Response::builder() 135 | .status(status) 136 | .header("Content-Type", content_type) 137 | .body(Body::from(body)) 138 | .unwrap()) 139 | } 140 | Err(_) => return Err(StatusCode::INTERNAL_SERVER_ERROR), 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /doc/openwrt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/messense/aliyundrive-webdav/8d9dfa89ee6752a3b84bef95499f805b946cc9a0/doc/openwrt.png -------------------------------------------------------------------------------- /doc/refresh-token.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/messense/aliyundrive-webdav/8d9dfa89ee6752a3b84bef95499f805b946cc9a0/doc/refresh-token.png -------------------------------------------------------------------------------- /openwrt/aliyundrive-webdav/Makefile: -------------------------------------------------------------------------------- 1 | include $(TOPDIR)/rules.mk 2 | 3 | PKG_NAME:=aliyundrive-webdav 4 | PKG_VERSION:=2.3.3 5 | PKG_RELEASE:=1 6 | 7 | PKG_LICENSE:=MIT 8 | PKG_MAINTAINER:=messense 9 | 10 | PKG_LIBC:=musl 11 | ifeq ($(ARCH),arm) 12 | PKG_LIBC:=musleabi 13 | 14 | ARM_CPU_FEATURES:=$(word 2,$(subst +,$(space),$(call qstrip,$(CONFIG_CPU_TYPE)))) 15 | ifneq ($(filter $(ARM_CPU_FEATURES),vfp vfpv2),) 16 | PKG_LIBC:=musleabihf 17 | endif 18 | endif 19 | 20 | PKG_ARCH=$(ARCH) 21 | ifeq ($(ARCH),i386) 22 | PKG_ARCH:=i686 23 | endif 24 | 25 | PKG_SOURCE:=aliyundrive-webdav-v$(PKG_VERSION).$(PKG_ARCH)-unknown-linux-$(PKG_LIBC).tar.gz 26 | PKG_SOURCE_URL:=https://github.com/messense/aliyundrive-webdav/releases/download/v$(PKG_VERSION)/ 27 | PKG_HASH:=skip 28 | 29 | include $(INCLUDE_DIR)/package.mk 30 | 31 | define Package/aliyundrive-webdav 32 | SECTION:=multimedia 33 | CATEGORY:=Multimedia 34 | TITLE:=WebDAV server for AliyunDrive 35 | URL:=https://github.com/messense/aliyundrive-webdav 36 | endef 37 | 38 | define Package/aliyundrive-webdav/description 39 | WebDAV server for AliyunDrive. 40 | endef 41 | 42 | define Package/aliyundrive-webdav/conffiles 43 | /etc/config/aliyundrive-webdav 44 | endef 45 | 46 | define Download/sha256sum 47 | FILE:=$(PKG_SOURCE).sha256 48 | URL_FILE:=$(FILE) 49 | URL:=$(PKG_SOURCE_URL) 50 | HASH:=skip 51 | endef 52 | $(eval $(call Download,sha256sum)) 53 | 54 | define Build/Prepare 55 | mv $(DL_DIR)/$(PKG_SOURCE).sha256 . 56 | cp $(DL_DIR)/$(PKG_SOURCE) . 57 | shasum -a 256 -c $(PKG_SOURCE).sha256 58 | rm $(PKG_SOURCE).sha256 $(PKG_SOURCE) 59 | 60 | tar -C $(PKG_BUILD_DIR)/ -zxf $(DL_DIR)/$(PKG_SOURCE) 61 | endef 62 | 63 | define Build/Compile 64 | echo "aliyundrive-webdav using precompiled binary." 65 | endef 66 | 67 | define Package/aliyundrive-webdav/install 68 | $(INSTALL_DIR) $(1)/usr/bin 69 | $(INSTALL_BIN) $(PKG_BUILD_DIR)/aliyundrive-webdav $(1)/usr/bin/aliyundrive-webdav 70 | $(INSTALL_DIR) $(1)/etc/init.d 71 | $(INSTALL_BIN) ./files/aliyundrive-webdav.init $(1)/etc/init.d/aliyundrive-webdav 72 | $(INSTALL_DIR) $(1)/etc/config 73 | $(INSTALL_CONF) ./files/aliyundrive-webdav.config $(1)/etc/config/aliyundrive-webdav 74 | endef 75 | 76 | $(eval $(call BuildPackage,aliyundrive-webdav)) 77 | -------------------------------------------------------------------------------- /openwrt/aliyundrive-webdav/files/aliyundrive-webdav.config: -------------------------------------------------------------------------------- 1 | config server 2 | option enable '0' 3 | option debug '0' 4 | option refresh_token '' 5 | option host '0.0.0.0' 6 | option port '8080' 7 | option drive_type '' 8 | option auth_user '' 9 | option auth_password '' 10 | option read_buffer_size '10485760' 11 | option upload_buffer_size '16777216' 12 | option cache_size '1000' 13 | option cache_ttl '600' 14 | option root '/' 15 | option no_trash '0' 16 | option read_only '0' 17 | option tls_cert '' 18 | option tls_key '' 19 | option skip_upload_same_size '0' 20 | option prefer_http_download '0' 21 | option redirect '1' 22 | -------------------------------------------------------------------------------- /openwrt/aliyundrive-webdav/files/aliyundrive-webdav.init: -------------------------------------------------------------------------------- 1 | #!/bin/sh /etc/rc.common 2 | 3 | USE_PROCD=1 4 | 5 | START=99 6 | STOP=15 7 | 8 | NAME=aliyundrive-webdav 9 | 10 | uci_get_by_type() { 11 | local ret=$(uci get $NAME.@$1[0].$2 2>/dev/null) 12 | echo ${ret:=$3} 13 | } 14 | 15 | start_service() { 16 | local enable=$(uci_get_by_type server enable) 17 | case "$enable" in 18 | 1|on|true|yes|enabled) 19 | local refresh_token=$(uci_get_by_type server refresh_token) 20 | local auth_user=$(uci_get_by_type server auth_user) 21 | local auth_password=$(uci_get_by_type server auth_password) 22 | local read_buf_size=$(uci_get_by_type server read_buffer_size 10485760) 23 | local upload_buf_size=$(uci_get_by_type server upload_buffer_size 16777216) 24 | local cache_size=$(uci_get_by_type server cache_size 1000) 25 | local cache_ttl=$(uci_get_by_type server cache_ttl 600) 26 | local host=$(uci_get_by_type server host 127.0.0.1) 27 | local port=$(uci_get_by_type server port 8080) 28 | local root=$(uci_get_by_type server root /) 29 | local tls_cert=$(uci_get_by_type server tls_cert) 30 | local tls_key=$(uci_get_by_type server tls_key) 31 | local drive_type=$(uci_get_by_type server drive_type) 32 | 33 | local extra_options="--auto-index" 34 | 35 | case "$(uci_get_by_type server no_trash 0)" in 36 | 1|on|true|yes|enabled) 37 | extra_options="$extra_options --no-trash" 38 | ;; 39 | *) ;; 40 | esac 41 | 42 | case "$(uci_get_by_type server read_only 0)" in 43 | 1|on|true|yes|enabled) 44 | extra_options="$extra_options --read-only" 45 | ;; 46 | *) ;; 47 | esac 48 | 49 | case "$(uci_get_by_type server skip_upload_same_size 0)" in 50 | 1|on|true|yes|enabled) 51 | extra_options="$extra_options --skip-upload-same-size" 52 | ;; 53 | *) ;; 54 | esac 55 | 56 | case "$(uci_get_by_type server prefer_http_download 0)" in 57 | 1|on|true|yes|enabled) 58 | extra_options="$extra_options --prefer-http-download" 59 | ;; 60 | *) ;; 61 | esac 62 | 63 | case "$(uci_get_by_type server redirect 0)" in 64 | 1|on|true|yes|enabled) 65 | extra_options="$extra_options --redirect" 66 | ;; 67 | *) ;; 68 | esac 69 | 70 | if [[ ! -z "$tls_cert" && ! -z "$tls_key" ]]; then 71 | extra_options="$extra_options --tls-cert $tls_cert --tls-key $tls_key" 72 | fi 73 | 74 | if [[ ! -z "$drive_type" ]]; then 75 | extra_options="$extra_options --drive-type $drive_type" 76 | fi 77 | 78 | procd_open_instance 79 | procd_set_param command /bin/sh -c "/usr/bin/$NAME $extra_options --host $host --port $port --root $root --read-buffer-size $read_buf_size --upload-buffer-size $upload_buf_size --cache-size $cache_size --cache-ttl $cache_ttl --workdir /var/run/$NAME >>/var/log/$NAME.log 2>&1" 80 | procd_set_param pidfile /var/run/$NAME.pid 81 | procd_set_param env REFRESH_TOKEN="$refresh_token" 82 | [[ ! -z "$auth_user" ]] && procd_append_param env WEBDAV_AUTH_USER="$auth_user" 83 | [[ ! -z "$auth_password" ]] && procd_append_param env WEBDAV_AUTH_PASSWORD="$auth_password" 84 | case $(uci_get_by_type server debug) in 85 | 1|on|true|yes|enabled) 86 | procd_append_param env RUST_LOG="aliyundrive_webdav=debug" ;; 87 | *) ;; 88 | esac 89 | procd_close_instance ;; 90 | *) 91 | stop_service ;; 92 | esac 93 | } 94 | 95 | service_triggers() { 96 | procd_add_reload_trigger "aliyundrive-webdav" 97 | } 98 | -------------------------------------------------------------------------------- /openwrt/luci-app-aliyundrive-webdav/Makefile: -------------------------------------------------------------------------------- 1 | include $(TOPDIR)/rules.mk 2 | 3 | PKG_NAME:=luci-app-aliyundrive-webdav 4 | PKG_VERSION:=2.3.3 5 | PKG_RELEASE:=1 6 | PKG_PO_VERSION:=$(PKG_VERSION)-$(PKG_RELEASE) 7 | 8 | PKG_LICENSE:=MIT 9 | PKG_MAINTAINER:=messense 10 | 11 | LUCI_TITLE:=LuCI Support for aliyundrive-webdav 12 | LUCI_PKGARCH:=all 13 | LUCI_DEPENDS:=+aliyundrive-webdav 14 | 15 | include $(TOPDIR)/feeds/luci/luci.mk 16 | 17 | # call BuildPackage - OpenWrt buildroot signature 18 | -------------------------------------------------------------------------------- /openwrt/luci-app-aliyundrive-webdav/luasrc/controller/aliyundrive-webdav.lua: -------------------------------------------------------------------------------- 1 | module("luci.controller.aliyundrive-webdav", package.seeall) 2 | 3 | function index() 4 | if not nixio.fs.access("/etc/config/aliyundrive-webdav") then 5 | return 6 | end 7 | 8 | local page 9 | page = entry({ "admin", "services", "aliyundrive-webdav" }, alias("admin", "services", "aliyundrive-webdav", "client"), 10 | _("AliyunDrive WebDAV"), 10) -- 首页 11 | page.dependent = true 12 | page.acl_depends = { "luci-app-aliyundrive-webdav" } 13 | 14 | entry({ "admin", "services", "aliyundrive-webdav", "client" }, cbi("aliyundrive-webdav/client"), _("Settings"), 10).leaf = true -- 客户端配置 15 | entry({ "admin", "services", "aliyundrive-webdav", "log" }, form("aliyundrive-webdav/log"), _("Log"), 30).leaf = true -- 日志页面 16 | 17 | entry({ "admin", "services", "aliyundrive-webdav", "status" }, call("action_status")).leaf = true -- 运行状态 18 | entry({ "admin", "services", "aliyundrive-webdav", "logtail" }, call("action_logtail")).leaf = true -- 日志采集 19 | entry({ "admin", "services", "aliyundrive-webdav", "qrcode" }, call("action_generate_qrcode")).leaf = true -- 生成扫码登录二维码地址和参数 20 | entry({ "admin", "services", "aliyundrive-webdav", "query" }, call("action_query_qrcode")).leaf = true -- 查询扫码登录结果 21 | entry({ "admin", "services", "aliyundrive-webdav", "invalidate-cache" }, call("action_invalidate_cache")).leaf = true -- 清除缓存 22 | end 23 | 24 | function action_status() 25 | local e = {} 26 | e.running = luci.sys.call("pidof aliyundrive-webdav >/dev/null") == 0 27 | e.application = luci.sys.exec("aliyundrive-webdav --version") 28 | luci.http.prepare_content("application/json") 29 | luci.http.write_json(e) 30 | end 31 | 32 | function action_logtail() 33 | local fs = require "nixio.fs" 34 | local log_path = "/var/log/aliyundrive-webdav.log" 35 | local e = {} 36 | e.running = luci.sys.call("pidof aliyundrive-webdav >/dev/null") == 0 37 | if fs.access(log_path) then 38 | e.log = luci.sys.exec("tail -n 100 %s | sed 's/\\x1b\\[[0-9;]*m//g'" % log_path) 39 | else 40 | e.log = "" 41 | end 42 | luci.http.prepare_content("application/json") 43 | luci.http.write_json(e) 44 | end 45 | 46 | function action_generate_qrcode() 47 | local output = luci.sys.exec("aliyundrive-webdav qr generate") 48 | luci.http.prepare_content("application/json") 49 | luci.http.write(output) 50 | end 51 | 52 | function action_query_qrcode() 53 | local data = luci.http.formvalue() 54 | local sid = data.sid 55 | local output = {} 56 | output.refresh_token = luci.sys.exec("aliyundrive-webdav qr query --sid " .. sid) 57 | luci.http.prepare_content("application/json") 58 | luci.http.write_json(output) 59 | end 60 | 61 | function action_invalidate_cache() 62 | local e = {} 63 | e.ok = luci.sys.call("kill -HUP `pidof aliyundrive-webdav`") == 0 64 | luci.http.prepare_content("application/json") 65 | luci.http.write_json(e) 66 | end 67 | -------------------------------------------------------------------------------- /openwrt/luci-app-aliyundrive-webdav/luasrc/model/cbi/aliyundrive-webdav/client.lua: -------------------------------------------------------------------------------- 1 | m = Map("aliyundrive-webdav") 2 | m.title = translate("AliyunDrive WebDAV") 3 | m.description = translate("Project GitHub URL") 4 | 5 | m:section(SimpleSection).template = "aliyundrive-webdav/aliyundrive-webdav_status" 6 | 7 | e = m:section(TypedSection, "server") 8 | e.anonymous = true 9 | 10 | enable = e:option(Flag, "enable", translate("Enable")) 11 | enable.rmempty = false 12 | 13 | refresh_token = e:option(Value, "refresh_token", translate("Refresh Token")) 14 | refresh_token.description = translate("Double click the input box above to get refresh token by scanning qrcode") 15 | 16 | qrcode = e:option(DummyValue, '', '') 17 | qrcode.rawhtml = true 18 | qrcode.template = 'aliyundrive-webdav/aliyundrive-webdav_qrcode' 19 | 20 | root = e:option(Value, "root", translate("Root Directory")) 21 | root.description = translate("Restrict access to a folder of aliyundrive, defaults to / which means no restrictions") 22 | root.default = "/" 23 | 24 | host = e:option(Value, "host", translate("Host")) 25 | host.default = "0.0.0.0" 26 | host.datatype = "ipaddr" 27 | 28 | port = e:option(Value, "port", translate("Port")) 29 | port.default = "8080" 30 | port.datatype = "port" 31 | 32 | drive_type = e:option(ListValue, "drive_type", translate("Aliyun drive type")) 33 | drive_type.description = translate("Supports drive type: resource, backup") 34 | drive_type:value("resource", "resource"); 35 | drive_type:value("backup", "backup"); 36 | drive_type.default = "backup" 37 | 38 | tls_cert = e:option(Value, "tls_cert", translate("TLS certificate file path")) 39 | tls_key = e:option(Value, "tls_key", translate("TLS private key file path")) 40 | 41 | auth_user = e:option(Value, "auth_user", translate("Username")) 42 | auth_password = e:option(Value, "auth_password", translate("Password")) 43 | auth_password.password = true 44 | 45 | read_buffer_size = e:option(Value, "read_buffer_size", translate("Read Buffer Size")) 46 | read_buffer_size.default = "10485760" 47 | read_buffer_size.datatype = "uinteger" 48 | 49 | prefer_http_download = e:option(Flag, "prefer_http_download", translate("Prefer HTTP Download")) 50 | prefer_http_download.description = translate("Prefer downloading files using HTTP instead of HTTPS protocol") 51 | prefer_http_download.rmempty = false 52 | 53 | redirect = e:option(Flag, "redirect", translate("Enable 302 Redirect")) 54 | redirect.description = translate("Enable 302 redirect when possible") 55 | redirect.rmempty = false 56 | 57 | upload_buffer_size = e:option(Value, "upload_buffer_size", translate("Upload Buffer Size")) 58 | upload_buffer_size.default = "16777216" 59 | upload_buffer_size.datatype = "uinteger" 60 | 61 | skip_upload_same_size = e:option(Flag, "skip_upload_same_size", translate("Skip uploading same size files")) 62 | skip_upload_same_size.description = translate("Reduce the upload traffic by skipping uploading files with the same size") 63 | skip_upload_same_size.rmempty = false 64 | 65 | cache_size = e:option(Value, "cache_size", translate("Cache Size")) 66 | cache_size.default = "1000" 67 | cache_size.datatype = "uinteger" 68 | 69 | cache_ttl = e:option(Value, "cache_ttl", translate("Cache Expiration Time (seconds)")) 70 | cache_ttl.default = "600" 71 | cache_ttl.datatype = "uinteger" 72 | 73 | no_trash = e:option(Flag, "no_trash", translate("Delete file permanently instead of trashing")) 74 | no_trash.rmempty = false 75 | 76 | read_only = e:option(Flag, "read_only", translate("Enable read only mode")) 77 | read_only.description = translate("Disallow upload, modify and delete file operations") 78 | read_only.rmempty = false 79 | 80 | debug = e:option(Flag, "debug", translate("Debug Mode")) 81 | debug.rmempty = false 82 | 83 | return m 84 | -------------------------------------------------------------------------------- /openwrt/luci-app-aliyundrive-webdav/luasrc/model/cbi/aliyundrive-webdav/log.lua: -------------------------------------------------------------------------------- 1 | log = SimpleForm("logview") 2 | log.submit = false 3 | log.reset = false 4 | 5 | t = log:field(DummyValue, '', '') 6 | t.rawhtml = true 7 | t.template = 'aliyundrive-webdav/aliyundrive-webdav_log' 8 | 9 | return log 10 | -------------------------------------------------------------------------------- /openwrt/luci-app-aliyundrive-webdav/luasrc/view/aliyundrive-webdav/aliyundrive-webdav_log.htm: -------------------------------------------------------------------------------- 1 | <%+cbi/valueheader%> 2 | 3 | 4 | 15 | <%+cbi/valuefooter%> 16 | -------------------------------------------------------------------------------- /openwrt/luci-app-aliyundrive-webdav/luasrc/view/aliyundrive-webdav/aliyundrive-webdav_qrcode.htm: -------------------------------------------------------------------------------- 1 | <%+cbi/valueheader%> 2 | 3 | 51 | 52 | 158 | <%+cbi/valuefooter%> 159 | -------------------------------------------------------------------------------- /openwrt/luci-app-aliyundrive-webdav/luasrc/view/aliyundrive-webdav/aliyundrive-webdav_status.htm: -------------------------------------------------------------------------------- 1 | 28 | 29 |
30 |

31 | <%:Collecting data...%> 32 |

33 |

34 | 35 |

36 |
37 | -------------------------------------------------------------------------------- /openwrt/luci-app-aliyundrive-webdav/po/zh-cn/aliyundrive-webdav.po: -------------------------------------------------------------------------------- 1 | msgid "" 2 | msgstr "Content-Type: text/plain; charset=UTF-8\n" 3 | 4 | msgid "AliyunDrive" 5 | msgstr "阿里云盘" 6 | 7 | msgid "AliyunDrive WebDAV" 8 | msgstr "阿里云盘 WebDAV" 9 | 10 | msgid "Enable" 11 | msgstr "启用" 12 | 13 | msgid "Refresh Token" 14 | msgstr "Refresh Token" 15 | 16 | msgid "Root Directory" 17 | msgstr "云盘根目录" 18 | 19 | msgid "Host" 20 | msgstr "监听主机" 21 | 22 | msgid "Port" 23 | msgstr "监听端口" 24 | 25 | msgid "Aliyun drive type" 26 | msgstr "云盘驱动类型" 27 | 28 | msgid "Supports drive type: resource, backup" 29 | msgstr "支持驱动器类型:resource, backup" 30 | 31 | msgid "TLS certificate file path" 32 | msgstr "TLS 证书文件路径" 33 | 34 | msgid "TLS private key file path" 35 | msgstr "TLS 私钥文件路径" 36 | 37 | msgid "Username" 38 | msgstr "用户名" 39 | 40 | msgid "Password" 41 | msgstr "密码" 42 | 43 | msgid "Read Buffer Size" 44 | msgstr "下载缓冲大小(bytes)" 45 | 46 | msgid "Upload Buffer Size" 47 | msgstr "上传缓冲大小(bytes)" 48 | 49 | msgid "Cache Size" 50 | msgstr "目录缓存大小" 51 | 52 | msgid "Cache Expiration Time (seconds)" 53 | msgstr "目录缓存过期时间(单位为秒)" 54 | 55 | msgid "Collecting data..." 56 | msgstr "获取数据中..." 57 | 58 | msgid "Invalidate cache" 59 | msgstr "清除缓存" 60 | 61 | msgid "RUNNING" 62 | msgstr "运行中" 63 | 64 | msgid "NOT RUNNING" 65 | msgstr "未运行" 66 | 67 | msgid "Settings" 68 | msgstr "设置" 69 | 70 | msgid "Log" 71 | msgstr "日志" 72 | 73 | msgid "Debug Mode" 74 | msgstr "调试模式" 75 | 76 | msgid "Project GitHub URL" 77 | msgstr "GitHub 项目地址 | 加入知识星球" 78 | 79 | msgid "Double click the input box above to get refresh token by scanning qrcode" 80 | msgstr "鼠标双击上面的输入框扫码登录自动获取 refresh token" 81 | 82 | msgid "Restrict access to a folder of aliyundrive, defaults to / which means no restrictions" 83 | msgstr "限制只能访问该云盘目录,默认为 / 表示不限制,注意这个参数不是本地磁盘路径" 84 | 85 | msgid "Delete file permanently instead of trashing" 86 | msgstr "删除文件不放入回收站" 87 | 88 | msgid "Enable read only mode" 89 | msgstr "启用只读模式" 90 | 91 | msgid "Disallow upload, modify and delete file operations" 92 | msgstr "禁止上传、修改和删除文件操作" 93 | 94 | msgid "Skip uploading same size files" 95 | msgstr "跳过上传相同大小的文件" 96 | 97 | msgid "Reduce the upload traffic by skipping uploading files with the same size" 98 | msgstr "跳过上传相同大小的文件减少上传流量消耗,但可能会导致修改过的同样大小的文件不会被上传" 99 | 100 | msgid "Prefer HTTP Download" 101 | msgstr "使用 HTTP 下载" 102 | 103 | msgid "Prefer downloading files using HTTP instead of HTTPS protocol" 104 | msgstr "优先使用 HTTP 而不是 HTTPS 协议下载,低端设备上降低 CPU 占用" 105 | 106 | msgid "Enable 302 Redirect" 107 | msgstr "启用 302 重定向" 108 | 109 | msgid "Enable 302 redirect when possible" 110 | msgstr "在可能的情况下启用 302 重定向" 111 | -------------------------------------------------------------------------------- /openwrt/luci-app-aliyundrive-webdav/po/zh_Hans: -------------------------------------------------------------------------------- 1 | zh-cn -------------------------------------------------------------------------------- /openwrt/luci-app-aliyundrive-webdav/root/etc/uci-defaults/luci-aliyundrive-webdav: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | uci -q batch <<-EOF >/dev/null 4 | delete ucitrack.@aliyundrive-webdav[-1] 5 | add ucitrack aliyundrive-webdav 6 | set ucitrack.@aliyundrive-webdav[-1].init=aliyundrive-webdav 7 | commit ucitrack 8 | EOF 9 | 10 | rm -f /tmp/luci-indexcache 11 | exit 0 12 | -------------------------------------------------------------------------------- /openwrt/luci-app-aliyundrive-webdav/root/usr/share/rpcd/acl.d/luci-app-aliyundrive-webdav.json: -------------------------------------------------------------------------------- 1 | { 2 | "luci-app-aliyundrive-webdav": { 3 | "description": "Grant UCI access for luci-app-aliyundrive-webdav", 4 | "read": { 5 | "uci": [ "aliyundrive-webdav" ] 6 | }, 7 | "write": { 8 | "uci": [ "aliyundrive-webdav" ] 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["maturin>=1.0,<2.0"] 3 | build-backend = "maturin" 4 | 5 | [tool.maturin] 6 | bindings = "bin" 7 | -------------------------------------------------------------------------------- /snap/snapcraft.yaml: -------------------------------------------------------------------------------- 1 | name: aliyundrive-webdav 2 | version: '2.3.3' 3 | summary: 阿里云盘 WebDAV 4 | description: | 5 | 阿里云盘 WebDAV 服务 6 | 7 | grade: stable # must be 'stable' to release into candidate/stable channels 8 | confinement: strict # use 'strict' once you have the right plugs and slots 9 | 10 | base: core18 11 | 12 | architectures: 13 | - build-on: amd64 14 | - build-on: i386 15 | - build-on: arm64 16 | - build-on: armhf 17 | 18 | parts: 19 | aliyundrive-webdav: 20 | plugin: rust 21 | source: . 22 | 23 | apps: 24 | aliyundrive-webdav: 25 | command: bin/aliyundrive-webdav 26 | plugs: [network, network-bind] 27 | -------------------------------------------------------------------------------- /src/cache.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | use std::time::Duration; 3 | 4 | use moka::future::Cache as MokaCache; 5 | use tracing::debug; 6 | 7 | use crate::drive::AliyunFile; 8 | 9 | #[derive(Clone)] 10 | pub struct Cache { 11 | inner: MokaCache>, 12 | } 13 | 14 | impl Cache { 15 | pub fn new(max_capacity: u64, ttl: u64) -> Self { 16 | let inner = MokaCache::builder() 17 | .max_capacity(max_capacity) 18 | .time_to_live(Duration::from_secs(ttl)) 19 | .build(); 20 | Self { inner } 21 | } 22 | 23 | pub fn get(&self, key: &str) -> Option> { 24 | debug!(key = %key, "cache: get"); 25 | self.inner.get(key) 26 | } 27 | 28 | pub async fn insert(&self, key: String, value: Vec) { 29 | debug!(key = %key, "cache: insert"); 30 | self.inner.insert(key, value).await; 31 | } 32 | 33 | pub async fn invalidate(&self, path: &Path) { 34 | let key = path.to_string_lossy().into_owned(); 35 | debug!(path = %path.display(), key = %key, "cache: invalidate"); 36 | self.inner.invalidate(&key).await; 37 | } 38 | 39 | pub async fn invalidate_parent(&self, path: &Path) { 40 | if let Some(parent) = path.parent() { 41 | self.invalidate(parent).await; 42 | } 43 | } 44 | 45 | pub fn invalidate_all(&self) { 46 | debug!("cache: invalidate all"); 47 | self.inner.invalidate_all(); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/drive/mod.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::path::{Path, PathBuf}; 3 | use std::sync::Arc; 4 | use std::time::{Duration, SystemTime}; 5 | 6 | use anyhow::{bail, Context, Result}; 7 | use bytes::Bytes; 8 | use clap::ValueEnum; 9 | use dav_server::fs::{DavDirEntry, DavMetaData, FsFuture, FsResult}; 10 | use futures_util::future::FutureExt; 11 | use reqwest::{ 12 | header::{HeaderMap, HeaderValue}, 13 | IntoUrl, StatusCode, 14 | }; 15 | use reqwest_middleware::{ClientBuilder, ClientWithMiddleware}; 16 | use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; 17 | use serde::de::DeserializeOwned; 18 | use serde::Serialize; 19 | use tokio::{ 20 | sync::{oneshot, RwLock}, 21 | time, 22 | }; 23 | use tracing::{debug, error, info, warn}; 24 | 25 | pub mod model; 26 | 27 | use model::*; 28 | pub use model::{AliyunFile, DateTime, FileType}; 29 | 30 | const ORIGIN: &str = "https://www.aliyundrive.com"; 31 | const REFERER: &str = "https://www.aliyundrive.com/"; 32 | const UA: &str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.83 Safari/537.36"; 33 | 34 | /// Aliyundrive drive type 35 | #[derive(Debug, Clone, Copy, ValueEnum)] 36 | pub enum DriveType { 37 | /// Resource drive 38 | Resource, 39 | /// Backup drive 40 | Backup, 41 | /// Default drive 42 | Default, 43 | } 44 | 45 | #[derive(Debug, Clone)] 46 | pub struct DriveConfig { 47 | pub api_base_url: String, 48 | pub refresh_token_host: String, 49 | pub workdir: Option, 50 | pub client_id: Option, 51 | pub client_secret: Option, 52 | pub drive_type: Option, 53 | } 54 | 55 | #[derive(Debug, Clone)] 56 | struct Credentials { 57 | refresh_token: String, 58 | access_token: Option, 59 | } 60 | 61 | #[derive(Debug, Clone)] 62 | pub struct AliyunDrive { 63 | config: DriveConfig, 64 | client: ClientWithMiddleware, 65 | credentials: Arc>, 66 | drive_id: Option, 67 | } 68 | 69 | impl AliyunDrive { 70 | pub async fn new(config: DriveConfig, refresh_token: String) -> Result { 71 | let refresh_token_is_empty = refresh_token.is_empty(); 72 | let credentials = Credentials { 73 | refresh_token, 74 | access_token: None, 75 | }; 76 | let mut headers = HeaderMap::new(); 77 | headers.insert("Origin", HeaderValue::from_static(ORIGIN)); 78 | headers.insert("Referer", HeaderValue::from_static(REFERER)); 79 | if let Ok(canary_env) = std::env::var("ALIYUNDRIVE_CANARY") { 80 | // 灰度环境:gray 81 | headers.insert("X-Canary", HeaderValue::from_str(&canary_env)?); 82 | } 83 | let retry_policy = ExponentialBackoff::builder() 84 | .backoff_exponent(2) 85 | .retry_bounds(Duration::from_millis(100), Duration::from_secs(5)) 86 | .build_with_max_retries(3); 87 | let client = reqwest::Client::builder() 88 | .user_agent(UA) 89 | .default_headers(headers) 90 | // OSS closes idle connections after 60 seconds, 91 | // so we can close idle connections ahead of time to prevent re-using them. 92 | // See also https://github.com/hyperium/hyper/issues/2136 93 | .pool_idle_timeout(Duration::from_secs(50)) 94 | .connect_timeout(Duration::from_secs(10)) 95 | .timeout(Duration::from_secs(30)) 96 | .build()?; 97 | let client = ClientBuilder::new(client) 98 | .with(RetryTransientMiddleware::new_with_policy(retry_policy)) 99 | .build(); 100 | let drive_type = config.drive_type.clone(); 101 | let mut drive = Self { 102 | config, 103 | client, 104 | credentials: Arc::new(RwLock::new(credentials)), 105 | drive_id: None, 106 | }; 107 | 108 | let (tx, rx) = oneshot::channel(); 109 | // schedule update token task 110 | let refresh_token_from_file = if let Some(dir) = drive.config.workdir.as_ref() { 111 | read_refresh_token(dir).await.ok() 112 | } else { 113 | None 114 | }; 115 | if refresh_token_is_empty && refresh_token_from_file.is_none() { 116 | bail!("No refresh token provided! \n📝 Please specify refresh token from `--refresh-token` CLI option."); 117 | } 118 | 119 | let client = drive.clone(); 120 | tokio::spawn(async move { 121 | let mut delay_seconds = 7000; 122 | match client 123 | .do_refresh_token_with_retry(refresh_token_from_file) 124 | .await 125 | { 126 | Ok(res) => { 127 | // token usually expires in 7200s, refresh earlier 128 | delay_seconds = res.expires_in - 200; 129 | if tx.send(res.access_token).is_err() { 130 | error!("send access_token failed"); 131 | } 132 | } 133 | Err(err) => { 134 | error!("refresh token failed: {}", err); 135 | tx.send(String::new()).unwrap(); 136 | } 137 | } 138 | loop { 139 | time::sleep(time::Duration::from_secs(delay_seconds)).await; 140 | if let Err(err) = client.do_refresh_token_with_retry(None).await { 141 | error!("refresh token failed: {}", err); 142 | } 143 | } 144 | }); 145 | 146 | let access_token = rx.await?; 147 | if access_token.is_empty() { 148 | bail!("get access_token failed"); 149 | } 150 | let drive_type_str = match drive_type { 151 | Some(DriveType::Resource) => "resource", 152 | Some(DriveType::Backup) => "backup", 153 | Some(DriveType::Default) | None => "default", 154 | }; 155 | let drive_id = drive 156 | .get_drive_id(drive_type) 157 | .await 158 | .context("get drive id failed")?; 159 | info!(drive_id = %drive_id, "found {} drive", drive_type_str); 160 | drive.drive_id = Some(drive_id); 161 | 162 | Ok(drive) 163 | } 164 | 165 | async fn save_refresh_token(&self, refresh_token: &str) -> Result<()> { 166 | if let Some(dir) = self.config.workdir.as_ref() { 167 | tokio::fs::create_dir_all(dir).await?; 168 | let refresh_token_file = dir.join("refresh_token"); 169 | tokio::fs::write(refresh_token_file, refresh_token).await?; 170 | } 171 | Ok(()) 172 | } 173 | 174 | async fn do_refresh_token(&self, refresh_token: &str) -> Result { 175 | let mut data = HashMap::new(); 176 | data.insert("refresh_token", refresh_token); 177 | data.insert("grant_type", "refresh_token"); 178 | if let Some(client_id) = self.config.client_id.as_ref() { 179 | data.insert("client_id", client_id); 180 | } 181 | if let Some(client_secret) = self.config.client_secret.as_ref() { 182 | data.insert("client_secret", client_secret); 183 | } 184 | let res = self 185 | .client 186 | .post(format!( 187 | "{}/oauth/access_token", 188 | &self.config.refresh_token_host 189 | )) 190 | .json(&data) 191 | .send() 192 | .await?; 193 | match res.error_for_status_ref() { 194 | Ok(_) => { 195 | let res = res.json::().await?; 196 | info!("refresh token succeed"); 197 | debug!( 198 | refresh_token = %res.refresh_token, 199 | "new refresh token" 200 | ); 201 | Ok(res) 202 | } 203 | Err(err) => { 204 | let msg = res.text().await?; 205 | let context = format!("{}: {}", err, msg); 206 | Err(err).context(context) 207 | } 208 | } 209 | } 210 | 211 | async fn do_refresh_token_with_retry( 212 | &self, 213 | refresh_token_from_file: Option, 214 | ) -> Result { 215 | let mut last_err = None; 216 | let mut refresh_token = self.refresh_token().await; 217 | for _ in 0..10 { 218 | match self.do_refresh_token(&refresh_token).await { 219 | Ok(res) => { 220 | let mut cred = self.credentials.write().await; 221 | cred.refresh_token = res.refresh_token.clone(); 222 | cred.access_token = Some(res.access_token.clone()); 223 | if let Err(err) = self.save_refresh_token(&res.refresh_token).await { 224 | error!(error = %err, "save refresh token failed"); 225 | } 226 | return Ok(res); 227 | } 228 | Err(err) => { 229 | let mut should_warn = true; 230 | let mut should_retry = match err.downcast_ref::() { 231 | Some(e) => { 232 | e.is_connect() 233 | || e.is_timeout() 234 | || matches!(e.status(), Some(StatusCode::TOO_MANY_REQUESTS)) 235 | } 236 | None => false, 237 | }; 238 | // retry if command line refresh_token is invalid but we also have 239 | // refresh_token from file 240 | if let Some(refresh_token_from_file) = refresh_token_from_file.as_ref() { 241 | if !should_retry && &refresh_token != refresh_token_from_file { 242 | refresh_token = refresh_token_from_file.trim().to_string(); 243 | should_retry = true; 244 | // don't warn if we are gonna try refresh_token from file 245 | should_warn = false; 246 | } 247 | } 248 | if should_retry { 249 | if should_warn { 250 | warn!(error = %err, "refresh token failed, will wait and retry"); 251 | } 252 | last_err = Some(err); 253 | time::sleep(Duration::from_secs(1)).await; 254 | continue; 255 | } else { 256 | last_err = Some(err); 257 | break; 258 | } 259 | } 260 | } 261 | } 262 | Err(last_err.unwrap()) 263 | } 264 | 265 | async fn refresh_token(&self) -> String { 266 | let cred = self.credentials.read().await; 267 | cred.refresh_token.clone() 268 | } 269 | 270 | async fn access_token(&self) -> Result { 271 | let cred = self.credentials.read().await; 272 | cred.access_token.clone().context("missing access_token") 273 | } 274 | 275 | fn drive_id(&self) -> Result<&str> { 276 | self.drive_id.as_deref().context("missing drive_id") 277 | } 278 | 279 | async fn request(&self, url: String, req: &T) -> Result> 280 | where 281 | T: Serialize + ?Sized, 282 | U: DeserializeOwned, 283 | { 284 | let mut access_token = self.access_token().await?; 285 | let url = reqwest::Url::parse(&url)?; 286 | let res = self 287 | .client 288 | .post(url.clone()) 289 | .bearer_auth(&access_token) 290 | .json(&req) 291 | .send() 292 | .await?; 293 | match res.error_for_status_ref() { 294 | Ok(_) => { 295 | if res.status() == StatusCode::NO_CONTENT { 296 | return Ok(None); 297 | } 298 | // let res = res.text().await?; 299 | // println!("{}: {}", url, res); 300 | // let res = serde_json::from_str(&res)?; 301 | let res = res.json::().await?; 302 | Ok(Some(res)) 303 | } 304 | Err(err) => { 305 | let err_msg = res.text().await?; 306 | debug!(error = %err_msg, url = %url, "request failed"); 307 | match err.status() { 308 | Some( 309 | status_code 310 | @ 311 | // 4xx 312 | (StatusCode::UNAUTHORIZED 313 | | StatusCode::REQUEST_TIMEOUT 314 | | StatusCode::TOO_MANY_REQUESTS 315 | // 5xx 316 | | StatusCode::INTERNAL_SERVER_ERROR 317 | | StatusCode::BAD_GATEWAY 318 | | StatusCode::SERVICE_UNAVAILABLE 319 | | StatusCode::GATEWAY_TIMEOUT), 320 | ) => { 321 | if status_code == StatusCode::UNAUTHORIZED { 322 | // refresh token and retry 323 | let token_res = self.do_refresh_token_with_retry(None).await?; 324 | access_token = token_res.access_token; 325 | } else { 326 | // wait for a while and retry 327 | time::sleep(Duration::from_secs(1)).await; 328 | } 329 | let res = self 330 | .client 331 | .post(url) 332 | .bearer_auth(&access_token) 333 | .json(&req) 334 | .send() 335 | .await? 336 | .error_for_status()?; 337 | if res.status() == StatusCode::NO_CONTENT { 338 | return Ok(None); 339 | } 340 | let res = res.json::().await?; 341 | Ok(Some(res)) 342 | } 343 | _ => Err(err.into()), 344 | } 345 | } 346 | } 347 | } 348 | 349 | pub async fn get_drive_id(&self, drive_type: Option) -> Result { 350 | let req = HashMap::::new(); 351 | let res: GetDriveInfoResponse = self 352 | .request( 353 | format!("{}/adrive/v1.0/user/getDriveInfo", self.config.api_base_url), 354 | &req, 355 | ) 356 | .await 357 | .and_then(|res| res.context("expect response"))?; 358 | let drive_id = match drive_type { 359 | Some(DriveType::Resource) => res.resource_drive_id.unwrap_or_else(|| { 360 | warn!("resource drive not found, use default drive instead"); 361 | res.default_drive_id 362 | }), 363 | Some(DriveType::Backup) => res.backup_drive_id.unwrap_or_else(|| { 364 | warn!("backup drive not found, use default drive instead"); 365 | res.default_drive_id 366 | }), 367 | Some(DriveType::Default) | None => res.default_drive_id, 368 | }; 369 | Ok(drive_id) 370 | } 371 | 372 | pub async fn get_file(&self, file_id: &str) -> Result> { 373 | let drive_id = self.drive_id()?; 374 | debug!(drive_id = %drive_id, file_id = %file_id, "get file"); 375 | let req = GetFileRequest { drive_id, file_id }; 376 | let res: Result = self 377 | .request( 378 | format!("{}/adrive/v1.0/openFile/get", self.config.api_base_url), 379 | &req, 380 | ) 381 | .await 382 | .and_then(|res| res.context("expect response")); 383 | match res { 384 | Ok(file) => Ok(Some(file.into())), 385 | Err(err) => { 386 | if let Some(req_err) = err.downcast_ref::() { 387 | if matches!(req_err.status(), Some(StatusCode::NOT_FOUND)) { 388 | Ok(None) 389 | } else { 390 | Err(err) 391 | } 392 | } else { 393 | Err(err) 394 | } 395 | } 396 | } 397 | } 398 | 399 | pub async fn get_by_path(&self, path: &str) -> Result> { 400 | let drive_id = self.drive_id()?; 401 | debug!(drive_id = %drive_id, path = %path, "get file by path"); 402 | if path == "/" || path.is_empty() { 403 | return Ok(Some(AliyunFile::new_root())); 404 | } 405 | let req = GetFileByPathRequest { 406 | drive_id, 407 | file_path: path, 408 | }; 409 | let res: Result = self 410 | .request( 411 | format!( 412 | "{}/adrive/v1.0/openFile/get_by_path", 413 | self.config.api_base_url 414 | ), 415 | &req, 416 | ) 417 | .await 418 | .and_then(|res| res.context("expect response")); 419 | match res { 420 | Ok(file) => Ok(Some(file)), 421 | Err(_) => Ok(None), 422 | } 423 | } 424 | 425 | pub async fn list_all(&self, parent_file_id: &str) -> Result> { 426 | let mut files = Vec::new(); 427 | let mut marker = None; 428 | loop { 429 | let res = self.list(parent_file_id, marker.as_deref()).await?; 430 | files.extend(res.items.into_iter().map(|f| f.into())); 431 | if res.next_marker.is_empty() { 432 | break; 433 | } 434 | marker = Some(res.next_marker); 435 | } 436 | Ok(files) 437 | } 438 | 439 | pub async fn list( 440 | &self, 441 | parent_file_id: &str, 442 | marker: Option<&str>, 443 | ) -> Result { 444 | let drive_id = self.drive_id()?; 445 | debug!(drive_id = %drive_id, parent_file_id = %parent_file_id, marker = ?marker, "list file"); 446 | let req = ListFileRequest { 447 | drive_id, 448 | parent_file_id, 449 | limit: 200, 450 | fields: "*", 451 | order_by: "updated_at", 452 | order_direction: "DESC", 453 | marker, 454 | }; 455 | self.request( 456 | format!("{}/adrive/v1.0/openFile/list", self.config.api_base_url), 457 | &req, 458 | ) 459 | .await 460 | .and_then(|res| res.context("expect response")) 461 | } 462 | 463 | pub async fn download(&self, url: U, range: Option<(u64, usize)>) -> Result { 464 | use reqwest::header::RANGE; 465 | 466 | let url = url.into_url()?; 467 | let res = if let Some((start_pos, size)) = range { 468 | let end_pos = start_pos + size as u64 - 1; 469 | debug!(url = %url, start = start_pos, end = end_pos, "download file"); 470 | let range = format!("bytes={}-{}", start_pos, end_pos); 471 | self.client 472 | .get(url) 473 | .header(RANGE, range) 474 | .send() 475 | .await? 476 | .error_for_status()? 477 | } else { 478 | debug!(url = %url, "download file"); 479 | self.client.get(url).send().await?.error_for_status()? 480 | }; 481 | Ok(res.bytes().await?) 482 | } 483 | 484 | pub async fn get_download_url(&self, file_id: &str) -> Result { 485 | debug!(file_id = %file_id, "get download url"); 486 | let req = GetFileDownloadUrlRequest { 487 | drive_id: self.drive_id()?, 488 | file_id, 489 | expire_sec: 14400, // 4 hours 490 | }; 491 | let res: GetFileDownloadUrlResponse = self 492 | .request( 493 | format!( 494 | "{}/adrive/v1.0/openFile/getDownloadUrl", 495 | self.config.api_base_url 496 | ), 497 | &req, 498 | ) 499 | .await? 500 | .context("expect response")?; 501 | Ok(res) 502 | } 503 | 504 | async fn trash(&self, file_id: &str) -> Result<()> { 505 | debug!(file_id = %file_id, "trash file"); 506 | let req = TrashRequest { 507 | drive_id: self.drive_id()?, 508 | file_id, 509 | }; 510 | let res: Result> = self 511 | .request( 512 | format!( 513 | "{}/adrive/v1.0/openFile/recyclebin/trash", 514 | self.config.api_base_url 515 | ), 516 | &req, 517 | ) 518 | .await; 519 | if let Err(err) = res { 520 | if let Some(req_err) = err.downcast_ref::() { 521 | // Ignore 404 and 400 status codes 522 | if !matches!( 523 | req_err.status(), 524 | Some(StatusCode::NOT_FOUND | StatusCode::BAD_REQUEST) 525 | ) { 526 | return Err(err); 527 | } 528 | } 529 | } 530 | Ok(()) 531 | } 532 | 533 | async fn delete_file(&self, file_id: &str) -> Result<()> { 534 | debug!(file_id = %file_id, "delete file"); 535 | let req = TrashRequest { 536 | drive_id: self.drive_id()?, 537 | file_id, 538 | }; 539 | let res: Result> = self 540 | .request( 541 | format!("{}/adrive/v1.0/openFile/delete", self.config.api_base_url), 542 | &req, 543 | ) 544 | .await; 545 | if let Err(err) = res { 546 | if let Some(req_err) = err.downcast_ref::() { 547 | // Ignore 404 and 400 status codes 548 | if !matches!( 549 | req_err.status(), 550 | Some(StatusCode::NOT_FOUND | StatusCode::BAD_REQUEST) 551 | ) { 552 | return Err(err); 553 | } 554 | } 555 | } 556 | Ok(()) 557 | } 558 | 559 | pub async fn remove_file(&self, file_id: &str, trash: bool) -> Result<()> { 560 | if trash { 561 | self.trash(file_id).await?; 562 | } else { 563 | self.delete_file(file_id).await?; 564 | } 565 | Ok(()) 566 | } 567 | 568 | pub async fn create_folder(&self, parent_file_id: &str, name: &str) -> Result<()> { 569 | debug!(parent_file_id = %parent_file_id, name = %name, "create folder"); 570 | let req = CreateFolderRequest { 571 | check_name_mode: "refuse", 572 | drive_id: self.drive_id()?, 573 | name, 574 | parent_file_id, 575 | r#type: "folder", 576 | }; 577 | let _res: Option = self 578 | .request( 579 | format!("{}/adrive/v1.0/openFile/create", self.config.api_base_url), 580 | &req, 581 | ) 582 | .await?; 583 | Ok(()) 584 | } 585 | 586 | pub async fn rename_file(&self, file_id: &str, name: &str) -> Result<()> { 587 | debug!(file_id = %file_id, name = %name, "rename file"); 588 | let req = RenameFileRequest { 589 | drive_id: self.drive_id()?, 590 | file_id, 591 | name, 592 | }; 593 | let _res: Option = self 594 | .request( 595 | format!("{}/adrive/v1.0/openFile/update", self.config.api_base_url), 596 | &req, 597 | ) 598 | .await?; 599 | Ok(()) 600 | } 601 | 602 | pub async fn move_file( 603 | &self, 604 | file_id: &str, 605 | to_parent_file_id: &str, 606 | new_name: Option<&str>, 607 | ) -> Result<()> { 608 | debug!(file_id = %file_id, to_parent_file_id = %to_parent_file_id, "move file"); 609 | let drive_id = self.drive_id()?; 610 | let req = MoveFileRequest { 611 | drive_id, 612 | file_id, 613 | to_parent_file_id, 614 | new_name, 615 | }; 616 | let _res: Option = self 617 | .request( 618 | format!("{}/adrive/v1.0/openFile/move", self.config.api_base_url), 619 | &req, 620 | ) 621 | .await?; 622 | Ok(()) 623 | } 624 | 625 | pub async fn copy_file(&self, file_id: &str, to_parent_file_id: &str) -> Result<()> { 626 | debug!(file_id = %file_id, to_parent_file_id = %to_parent_file_id, "copy file"); 627 | let drive_id = self.drive_id()?; 628 | let req = CopyFileRequest { 629 | drive_id, 630 | file_id, 631 | to_parent_file_id, 632 | auto_rename: false, 633 | }; 634 | let _res: Option = self 635 | .request( 636 | format!("{}/adrive/v1.0/openFile/copy", self.config.api_base_url), 637 | &req, 638 | ) 639 | .await?; 640 | Ok(()) 641 | } 642 | 643 | pub async fn create_file_with_proof( 644 | &self, 645 | name: &str, 646 | parent_file_id: &str, 647 | size: u64, 648 | chunk_count: u64, 649 | ) -> Result { 650 | debug!(name = %name, parent_file_id = %parent_file_id, size = size, "create file with proof"); 651 | let drive_id = self.drive_id()?; 652 | let part_info_list = (1..=chunk_count) 653 | .map(|part_number| UploadPartInfo { 654 | part_number, 655 | upload_url: String::new(), 656 | }) 657 | .collect(); 658 | let req = CreateFileWithProofRequest { 659 | check_name_mode: "refuse", 660 | content_hash: "", 661 | content_hash_name: "none", 662 | drive_id, 663 | name, 664 | parent_file_id, 665 | proof_code: "", 666 | proof_version: "v1", 667 | size, 668 | part_info_list, 669 | r#type: "file", 670 | }; 671 | let res: CreateFileWithProofResponse = self 672 | .request( 673 | format!("{}/adrive/v1.0/openFile/create", self.config.api_base_url), 674 | &req, 675 | ) 676 | .await? 677 | .context("expect response")?; 678 | Ok(res) 679 | } 680 | 681 | pub async fn complete_file_upload(&self, file_id: &str, upload_id: &str) -> Result<()> { 682 | debug!(file_id = %file_id, upload_id = %upload_id, "complete file upload"); 683 | let drive_id = self.drive_id()?; 684 | let req = CompleteUploadRequest { 685 | drive_id, 686 | file_id, 687 | upload_id, 688 | }; 689 | let _res: Option = self 690 | .request( 691 | format!("{}/adrive/v1.0/openFile/complete", self.config.api_base_url), 692 | &req, 693 | ) 694 | .await?; 695 | Ok(()) 696 | } 697 | 698 | pub async fn upload(&self, url: &str, body: Bytes) -> Result<()> { 699 | let res = self.client.put(url).body(body).send().await?; 700 | if let Err(err) = res.error_for_status_ref() { 701 | let detail = res 702 | .text() 703 | .await 704 | .unwrap_or_else(|_| "unknown error".to_string()); 705 | bail!("{}: {}", err, detail); 706 | } 707 | Ok(()) 708 | } 709 | 710 | pub async fn get_upload_url( 711 | &self, 712 | file_id: &str, 713 | upload_id: &str, 714 | chunk_count: u64, 715 | ) -> Result> { 716 | debug!(file_id = %file_id, upload_id = %upload_id, "get upload url"); 717 | let drive_id = self.drive_id()?; 718 | let part_info_list = (1..=chunk_count) 719 | .map(|part_number| UploadPartInfo { 720 | part_number, 721 | upload_url: String::new(), 722 | }) 723 | .collect(); 724 | let req = GetUploadUrlRequest { 725 | drive_id, 726 | file_id, 727 | upload_id, 728 | part_info_list, 729 | }; 730 | let res: CreateFileWithProofResponse = self 731 | .request( 732 | format!( 733 | "{}/adrive/v1.0/openFile/getUploadUrl", 734 | self.config.api_base_url 735 | ), 736 | &req, 737 | ) 738 | .await? 739 | .context("expect response")?; 740 | Ok(res.part_info_list) 741 | } 742 | 743 | pub async fn get_quota(&self) -> Result<(u64, u64)> { 744 | let drive_id = self.drive_id()?; 745 | let mut data = HashMap::new(); 746 | data.insert("drive_id", drive_id); 747 | let res: GetSpaceInfoResponse = self 748 | .request( 749 | format!("{}/adrive/v1.0/user/getSpaceInfo", self.config.api_base_url), 750 | &data, 751 | ) 752 | .await? 753 | .context("expect response")?; 754 | Ok(( 755 | res.personal_space_info.used_size, 756 | res.personal_space_info.total_size, 757 | )) 758 | } 759 | } 760 | 761 | impl DavMetaData for AliyunFile { 762 | fn len(&self) -> u64 { 763 | self.size 764 | } 765 | 766 | fn modified(&self) -> FsResult { 767 | Ok(*self.updated_at) 768 | } 769 | 770 | fn is_dir(&self) -> bool { 771 | matches!(self.r#type, FileType::Folder) 772 | } 773 | 774 | fn created(&self) -> FsResult { 775 | Ok(*self.created_at) 776 | } 777 | } 778 | 779 | impl DavDirEntry for AliyunFile { 780 | fn name(&self) -> Vec { 781 | self.name.as_bytes().to_vec() 782 | } 783 | 784 | fn metadata(&self) -> FsFuture> { 785 | async move { Ok(Box::new(self.clone()) as Box) }.boxed() 786 | } 787 | } 788 | 789 | pub async fn read_refresh_token(workdir: &Path) -> Result { 790 | let file = workdir.join("refresh_token"); 791 | let token = tokio::fs::read_to_string(&file).await?; 792 | if token.split('.').count() < 3 { 793 | bail!( 794 | "Please remove outdated refresh_token cache for v1.x at {}", 795 | file.display(), 796 | ); 797 | } 798 | Ok(token) 799 | } 800 | -------------------------------------------------------------------------------- /src/drive/model.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::ops; 3 | use std::time::SystemTime; 4 | 5 | use ::time::{format_description::well_known::Rfc3339, OffsetDateTime}; 6 | use serde::{Deserialize, Deserializer, Serialize}; 7 | 8 | #[derive(Debug, Clone, Deserialize)] 9 | pub struct RefreshTokenResponse { 10 | pub access_token: String, 11 | pub refresh_token: String, 12 | pub expires_in: u64, 13 | pub token_type: String, 14 | } 15 | 16 | #[derive(Debug, Clone, Deserialize)] 17 | pub struct GetDriveInfoResponse { 18 | pub default_drive_id: String, 19 | pub resource_drive_id: Option, 20 | pub backup_drive_id: Option, 21 | } 22 | 23 | #[derive(Debug, Clone, Serialize)] 24 | pub struct ListFileRequest<'a> { 25 | pub drive_id: &'a str, 26 | pub parent_file_id: &'a str, 27 | pub limit: u64, 28 | pub fields: &'a str, 29 | pub order_by: &'a str, 30 | pub order_direction: &'a str, 31 | pub marker: Option<&'a str>, 32 | } 33 | 34 | #[derive(Debug, Clone, Deserialize)] 35 | pub struct ListFileResponse { 36 | pub items: Vec, 37 | pub next_marker: String, 38 | } 39 | 40 | #[derive(Debug, Clone, Deserialize)] 41 | pub struct ListFileItem { 42 | pub name: String, 43 | pub category: Option, 44 | #[serde(rename = "file_id")] 45 | pub id: String, 46 | pub r#type: FileType, 47 | pub created_at: DateTime, 48 | pub updated_at: DateTime, 49 | pub size: Option, 50 | pub url: Option, 51 | pub content_hash: Option, 52 | } 53 | 54 | #[derive(Debug, Clone, Serialize)] 55 | pub struct GetFileByPathRequest<'a> { 56 | pub drive_id: &'a str, 57 | pub file_path: &'a str, 58 | } 59 | 60 | #[derive(Debug, Clone, Serialize)] 61 | pub struct GetFileRequest<'a> { 62 | pub drive_id: &'a str, 63 | pub file_id: &'a str, 64 | } 65 | 66 | #[derive(Debug, Clone, Deserialize)] 67 | pub struct StreamInfo { 68 | pub size: u64, 69 | } 70 | 71 | #[derive(Debug, Clone, Deserialize)] 72 | pub struct GetFileResponse { 73 | pub name: String, 74 | pub file_extension: String, 75 | #[serde(rename = "file_id")] 76 | pub id: String, 77 | pub r#type: FileType, 78 | pub created_at: DateTime, 79 | pub updated_at: DateTime, 80 | #[serde(default)] 81 | pub size: u64, 82 | pub streams_info: HashMap, 83 | } 84 | 85 | impl From for AliyunFile { 86 | fn from(res: GetFileResponse) -> AliyunFile { 87 | let size = if res.file_extension != "livp" || res.streams_info.is_empty() { 88 | res.size 89 | } else { 90 | let name = res.name.replace(".livp", ""); 91 | let mut zip_size = 0; 92 | for (typ, info) in &res.streams_info { 93 | let name_len = format!("{}.{}", name, typ).len() as u64; 94 | // local file header size 95 | zip_size += 30; 96 | zip_size += name_len; 97 | // file size 98 | zip_size += info.size; 99 | // central directory entry size 100 | zip_size += 46; 101 | zip_size += name_len; 102 | } 103 | // End of central directory size 104 | zip_size += 22; 105 | zip_size 106 | }; 107 | AliyunFile { 108 | name: res.name, 109 | id: res.id, 110 | r#type: res.r#type, 111 | created_at: res.created_at, 112 | updated_at: res.updated_at, 113 | size, 114 | url: None, 115 | content_hash: None, 116 | } 117 | } 118 | } 119 | 120 | #[derive(Debug, Clone, Serialize)] 121 | pub struct GetFileDownloadUrlRequest<'a> { 122 | pub drive_id: &'a str, 123 | pub file_id: &'a str, 124 | pub expire_sec: usize, 125 | } 126 | 127 | #[derive(Debug, Clone, Deserialize)] 128 | pub struct GetFileDownloadUrlResponse { 129 | pub url: String, 130 | #[serde(default)] 131 | pub streams_url: HashMap, 132 | pub expiration: String, 133 | pub method: String, 134 | } 135 | 136 | #[derive(Debug, Clone, Serialize)] 137 | pub struct TrashRequest<'a> { 138 | pub drive_id: &'a str, 139 | pub file_id: &'a str, 140 | } 141 | 142 | #[derive(Debug, Clone, Serialize)] 143 | pub struct DeleteFileRequest<'a> { 144 | pub drive_id: &'a str, 145 | pub file_id: &'a str, 146 | } 147 | 148 | #[derive(Debug, Clone, Serialize)] 149 | pub struct CreateFolderRequest<'a> { 150 | pub check_name_mode: &'a str, 151 | pub drive_id: &'a str, 152 | pub name: &'a str, 153 | pub parent_file_id: &'a str, 154 | pub r#type: &'a str, 155 | } 156 | 157 | #[derive(Debug, Clone, Serialize)] 158 | pub struct RenameFileRequest<'a> { 159 | pub drive_id: &'a str, 160 | pub file_id: &'a str, 161 | pub name: &'a str, 162 | } 163 | 164 | #[derive(Debug, Clone, Serialize)] 165 | pub struct MoveFileRequest<'a> { 166 | pub drive_id: &'a str, 167 | pub file_id: &'a str, 168 | pub to_parent_file_id: &'a str, 169 | pub new_name: Option<&'a str>, 170 | } 171 | 172 | #[derive(Debug, Clone, Serialize)] 173 | pub struct CopyFileRequest<'a> { 174 | pub drive_id: &'a str, 175 | pub file_id: &'a str, 176 | pub to_parent_file_id: &'a str, 177 | pub auto_rename: bool, 178 | } 179 | 180 | #[derive(Debug, Clone, Serialize, Deserialize)] 181 | pub struct UploadPartInfo { 182 | pub part_number: u64, 183 | #[serde(skip_serializing_if = "String::is_empty")] 184 | pub upload_url: String, 185 | } 186 | 187 | #[derive(Debug, Clone, Serialize)] 188 | pub struct CreateFileWithProofRequest<'a> { 189 | pub check_name_mode: &'a str, 190 | pub content_hash: &'a str, 191 | pub content_hash_name: &'a str, 192 | pub drive_id: &'a str, 193 | pub name: &'a str, 194 | pub parent_file_id: &'a str, 195 | pub proof_code: &'a str, 196 | pub proof_version: &'a str, 197 | pub size: u64, 198 | pub part_info_list: Vec, 199 | pub r#type: &'a str, 200 | } 201 | 202 | #[derive(Debug, Clone, Deserialize)] 203 | pub struct CreateFileWithProofResponse { 204 | #[serde(default)] 205 | pub part_info_list: Vec, 206 | pub file_id: String, 207 | pub upload_id: Option, 208 | pub file_name: String, 209 | } 210 | 211 | #[derive(Debug, Clone, Serialize)] 212 | pub struct CompleteUploadRequest<'a> { 213 | pub drive_id: &'a str, 214 | pub file_id: &'a str, 215 | pub upload_id: &'a str, 216 | } 217 | 218 | #[derive(Debug, Clone, Serialize)] 219 | pub struct GetUploadUrlRequest<'a> { 220 | pub drive_id: &'a str, 221 | pub file_id: &'a str, 222 | pub upload_id: &'a str, 223 | pub part_info_list: Vec, 224 | } 225 | 226 | #[derive(Debug, Clone, Deserialize)] 227 | pub struct SpaceInfo { 228 | pub total_size: u64, 229 | pub used_size: u64, 230 | } 231 | 232 | #[derive(Debug, Clone, Deserialize)] 233 | pub struct GetSpaceInfoResponse { 234 | pub personal_space_info: SpaceInfo, 235 | } 236 | 237 | #[derive(Debug, Clone)] 238 | pub struct DateTime(SystemTime); 239 | 240 | impl DateTime { 241 | pub fn new(st: SystemTime) -> Self { 242 | Self(st) 243 | } 244 | } 245 | 246 | impl<'a> Deserialize<'a> for DateTime { 247 | fn deserialize>(deserializer: D) -> Result { 248 | let dt = OffsetDateTime::parse(<&str>::deserialize(deserializer)?, &Rfc3339) 249 | .map_err(serde::de::Error::custom)?; 250 | Ok(Self(dt.into())) 251 | } 252 | } 253 | 254 | impl ops::Deref for DateTime { 255 | type Target = SystemTime; 256 | 257 | fn deref(&self) -> &Self::Target { 258 | &self.0 259 | } 260 | } 261 | 262 | #[derive(Debug, Clone, Copy, Deserialize)] 263 | #[serde(rename_all = "lowercase")] 264 | pub enum FileType { 265 | Folder, 266 | File, 267 | } 268 | 269 | #[derive(Debug, Clone, Deserialize)] 270 | pub struct AliyunFile { 271 | pub name: String, 272 | #[serde(rename = "file_id")] 273 | pub id: String, 274 | pub r#type: FileType, 275 | pub created_at: DateTime, 276 | pub updated_at: DateTime, 277 | #[serde(default)] 278 | pub size: u64, 279 | pub url: Option, 280 | pub content_hash: Option, 281 | } 282 | 283 | impl AliyunFile { 284 | pub fn new_root() -> Self { 285 | let now = SystemTime::now(); 286 | Self { 287 | name: "/".to_string(), 288 | id: "root".to_string(), 289 | r#type: FileType::Folder, 290 | created_at: DateTime(now), 291 | updated_at: DateTime(now), 292 | size: 0, 293 | url: None, 294 | content_hash: None, 295 | } 296 | } 297 | } 298 | 299 | impl From for AliyunFile { 300 | fn from(f: ListFileItem) -> Self { 301 | Self { 302 | name: f.name, 303 | id: f.id, 304 | r#type: f.r#type, 305 | created_at: f.created_at, 306 | updated_at: f.updated_at, 307 | size: f.size.unwrap_or_default(), 308 | // 文件列表接口返回的图片下载地址经常是有问题的, 不使用它 309 | url: if matches!(f.category.as_deref(), Some("image")) { 310 | None 311 | } else { 312 | f.url 313 | }, 314 | content_hash: f.content_hash, 315 | } 316 | } 317 | } 318 | -------------------------------------------------------------------------------- /src/login/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod model; 2 | 3 | use crate::drive::DriveConfig; 4 | use crate::login::model::*; 5 | 6 | pub struct QrCodeScanner { 7 | client: reqwest::Client, 8 | drive_config: DriveConfig, 9 | } 10 | 11 | impl QrCodeScanner { 12 | pub async fn new(drive_config: DriveConfig) -> anyhow::Result { 13 | let client = reqwest::Client::builder() 14 | .pool_idle_timeout(std::time::Duration::from_secs(50)) 15 | .connect_timeout(std::time::Duration::from_secs(10)) 16 | .timeout(std::time::Duration::from_secs(30)) 17 | .build()?; 18 | Ok(Self { 19 | client, 20 | drive_config, 21 | }) 22 | } 23 | } 24 | 25 | impl QrCodeScanner { 26 | pub async fn scan(&self) -> anyhow::Result { 27 | let req = QrCodeRequest { 28 | client_id: self.drive_config.client_id.clone(), 29 | client_secret: self.drive_config.client_secret.clone(), 30 | scopes: vec![ 31 | "user:base".to_string(), 32 | "file:all:read".to_string(), 33 | "file:all:write".to_string(), 34 | ], 35 | width: None, 36 | height: None, 37 | }; 38 | let url = 39 | if self.drive_config.client_id.is_none() || self.drive_config.client_secret.is_none() { 40 | format!( 41 | "{}/oauth/authorize/qrcode", 42 | &self.drive_config.refresh_token_host 43 | ) 44 | } else { 45 | "https://openapi.aliyundrive.com/oauth/authorize/qrcode".to_string() 46 | }; 47 | let resp = self.client.post(url).json(&req).send().await?; 48 | let resp = resp.json::().await?; 49 | Ok(resp) 50 | } 51 | 52 | pub async fn query(&self, sid: &str) -> anyhow::Result { 53 | let url = format!("https://openapi.aliyundrive.com/oauth/qrcode/{sid}/status"); 54 | let resp = self.client.get(url).send().await?; 55 | let resp = resp.json::().await?; 56 | Ok(resp) 57 | } 58 | 59 | pub async fn fetch_refresh_token(&self, code: &str) -> anyhow::Result { 60 | let req = AuthorizationCodeRequest { 61 | client_id: self.drive_config.client_id.clone(), 62 | client_secret: self.drive_config.client_secret.clone(), 63 | grant_type: "authorization_code".to_string(), 64 | code: code.to_string(), 65 | }; 66 | let url = 67 | if self.drive_config.client_id.is_none() || self.drive_config.client_secret.is_none() { 68 | format!( 69 | "{}/oauth/access_token", 70 | &self.drive_config.refresh_token_host 71 | ) 72 | } else { 73 | "https://openapi.aliyundrive.com/oauth/access_token".to_string() 74 | }; 75 | let resp = self.client.post(url).json(&req).send().await?; 76 | let resp = resp.json::().await?; 77 | Ok(resp.refresh_token) 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/login/model.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | 5 | #[derive(Debug, Clone, Serialize)] 6 | pub struct QrCodeRequest { 7 | #[serde(skip_serializing_if = "Option::is_none")] 8 | pub client_id: Option, 9 | #[serde(skip_serializing_if = "Option::is_none")] 10 | pub client_secret: Option, 11 | pub scopes: Vec, 12 | #[serde(skip_serializing_if = "Option::is_none")] 13 | pub width: Option, 14 | #[serde(skip_serializing_if = "Option::is_none")] 15 | pub height: Option, 16 | } 17 | 18 | #[derive(Debug, Clone, Deserialize, Serialize)] 19 | pub struct QrCodeResponse { 20 | #[serde(rename = "qrCodeUrl")] 21 | pub qr_code_url: String, 22 | pub sid: String, 23 | } 24 | 25 | #[derive(Debug, Clone, Copy, Eq, PartialEq)] 26 | pub enum QrCodeStatus { 27 | WaitLogin, 28 | ScanSuccess, 29 | LoginSuccess, 30 | QrCodeExpired, 31 | } 32 | 33 | impl FromStr for QrCodeStatus { 34 | type Err = String; 35 | 36 | fn from_str(s: &str) -> Result { 37 | use QrCodeStatus::*; 38 | 39 | match s { 40 | "WaitLogin" => Ok(WaitLogin), 41 | "ScanSuccess" => Ok(ScanSuccess), 42 | "LoginSuccess" => Ok(LoginSuccess), 43 | _ => Ok(QrCodeExpired), 44 | } 45 | } 46 | } 47 | 48 | impl<'de> Deserialize<'de> for QrCodeStatus { 49 | fn deserialize(deserializer: D) -> Result 50 | where 51 | D: serde::de::Deserializer<'de>, 52 | { 53 | let s = String::deserialize(deserializer)?; 54 | FromStr::from_str(&s).map_err(serde::de::Error::custom) 55 | } 56 | } 57 | 58 | #[derive(Debug, Clone, Deserialize)] 59 | pub struct QrCodeStatusResponse { 60 | pub status: QrCodeStatus, 61 | #[serde(rename = "authCode")] 62 | pub auth_code: Option, 63 | } 64 | 65 | impl QrCodeStatusResponse { 66 | pub fn is_success(&self) -> bool { 67 | matches!(self.status, QrCodeStatus::LoginSuccess) 68 | } 69 | } 70 | 71 | #[derive(Debug, Clone, Serialize)] 72 | pub struct AuthorizationCodeRequest { 73 | #[serde(skip_serializing_if = "Option::is_none")] 74 | pub client_id: Option, 75 | #[serde(skip_serializing_if = "Option::is_none")] 76 | pub client_secret: Option, 77 | pub grant_type: String, 78 | pub code: String, 79 | } 80 | 81 | #[derive(Debug, Clone, Deserialize)] 82 | pub struct AuthorizationCodeResponse { 83 | // pub token_type: String, 84 | // pub access_token: String, 85 | pub refresh_token: String, 86 | // pub expires_in: usize, 87 | } 88 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::path::PathBuf; 3 | 4 | use anyhow::bail; 5 | use clap::{Parser, Subcommand}; 6 | use dav_server::{memls::MemLs, DavHandler}; 7 | #[cfg(unix)] 8 | use futures_util::stream::StreamExt; 9 | use self_update::cargo_crate_version; 10 | use tracing::{debug, info, warn}; 11 | use tracing_subscriber::EnvFilter; 12 | #[cfg(unix)] 13 | use {signal_hook::consts::signal::*, signal_hook_tokio::Signals}; 14 | 15 | use cache::Cache; 16 | use drive::{read_refresh_token, AliyunDrive, DriveConfig, DriveType}; 17 | use vfs::AliyunDriveFileSystem; 18 | use webdav::WebDavServer; 19 | 20 | mod cache; 21 | mod drive; 22 | mod login; 23 | mod vfs; 24 | mod webdav; 25 | 26 | #[derive(Parser, Debug)] 27 | #[command(name = "aliyundrive-webdav", about, version, author)] 28 | #[command(args_conflicts_with_subcommands = true)] 29 | struct Opt { 30 | /// Listen host 31 | #[arg(long, env = "HOST", default_value = "0.0.0.0")] 32 | host: String, 33 | /// Listen port 34 | #[arg(short, env = "PORT", long, default_value = "8080")] 35 | port: u16, 36 | /// Aliyun drive client_id 37 | #[arg(long, env = "CLIENT_ID")] 38 | client_id: Option, 39 | /// Aliyun drive client_secret 40 | #[arg(long, env = "CLIENT_SECRET")] 41 | client_secret: Option, 42 | /// Aliyun drive type 43 | #[arg(long, env = "DRIVE_TYPE")] 44 | drive_type: Option, 45 | /// Aliyun drive refresh token 46 | #[arg(short, long, env = "REFRESH_TOKEN")] 47 | refresh_token: Option, 48 | /// WebDAV authentication username 49 | #[arg(short = 'U', long, env = "WEBDAV_AUTH_USER")] 50 | auth_user: Option, 51 | /// WebDAV authentication password 52 | #[arg(short = 'W', long, env = "WEBDAV_AUTH_PASSWORD")] 53 | auth_password: Option, 54 | /// Automatically generate index.html 55 | #[arg(short = 'I', long)] 56 | auto_index: bool, 57 | /// Read/download buffer size in bytes, defaults to 10MB 58 | #[arg(short = 'S', long, default_value = "10485760")] 59 | read_buffer_size: usize, 60 | /// Upload buffer size in bytes, defaults to 16MB 61 | #[arg(long, default_value = "16777216")] 62 | upload_buffer_size: usize, 63 | /// Directory entries cache size 64 | #[arg(long, default_value = "1000")] 65 | cache_size: u64, 66 | /// Directory entries cache expiration time in seconds 67 | #[arg(long, default_value = "600")] 68 | cache_ttl: u64, 69 | /// Root directory path 70 | #[arg(long, env = "WEBDAV_ROOT", default_value = "/")] 71 | root: String, 72 | /// Working directory, refresh_token will be stored in there if specified 73 | #[arg(short = 'w', long)] 74 | workdir: Option, 75 | /// Delete file permanently instead of trashing it 76 | #[arg(long)] 77 | no_trash: bool, 78 | /// Enable read only mode 79 | #[arg(long)] 80 | read_only: bool, 81 | /// TLS certificate file path 82 | #[arg(long, env = "TLS_CERT")] 83 | tls_cert: Option, 84 | /// TLS private key file path 85 | #[arg(long, env = "TLS_KEY")] 86 | tls_key: Option, 87 | /// Prefix to be stripped off when handling request. 88 | #[arg(long, env = "WEBDAV_STRIP_PREFIX")] 89 | strip_prefix: Option, 90 | /// Enable debug log 91 | #[arg(long)] 92 | debug: bool, 93 | /// Disable self auto upgrade 94 | #[arg(long)] 95 | no_self_upgrade: bool, 96 | /// Skip uploading same size file 97 | #[arg(long)] 98 | skip_upload_same_size: bool, 99 | /// Prefer downloading using HTTP protocol 100 | #[arg(long)] 101 | prefer_http_download: bool, 102 | /// Enable 302 redirect when possible 103 | #[arg(long)] 104 | redirect: bool, 105 | 106 | #[command(subcommand)] 107 | subcommands: Option, 108 | } 109 | 110 | #[derive(Subcommand, Debug)] 111 | enum Commands { 112 | /// Scan QRCode 113 | #[command(subcommand)] 114 | Qr(QrCommand), 115 | } 116 | 117 | #[derive(Subcommand, Debug)] 118 | enum QrCommand { 119 | /// Scan QRCode login to get a token 120 | Login, 121 | /// Generate a QRCode 122 | Generate, 123 | /// Query the QRCode login result 124 | #[command(arg_required_else_help = true)] 125 | Query { 126 | /// Query parameter sid 127 | #[arg(long)] 128 | sid: String, 129 | }, 130 | } 131 | 132 | #[tokio::main(flavor = "multi_thread")] 133 | async fn main() -> anyhow::Result<()> { 134 | #[cfg(feature = "native-tls-vendored")] 135 | openssl_probe::init_ssl_cert_env_vars(); 136 | 137 | let opt = Opt::parse(); 138 | if env::var("RUST_LOG").is_err() { 139 | if opt.debug { 140 | env::set_var("RUST_LOG", "aliyundrive_webdav=debug,reqwest=debug"); 141 | } else { 142 | env::set_var("RUST_LOG", "aliyundrive_webdav=info,reqwest=warn"); 143 | } 144 | } 145 | tracing_subscriber::fmt() 146 | .with_env_filter(EnvFilter::from_default_env()) 147 | .with_timer(tracing_subscriber::fmt::time::time()) 148 | .init(); 149 | 150 | let workdir = opt 151 | .workdir 152 | .or_else(|| dirs::cache_dir().map(|c| c.join("aliyundrive-webdav"))); 153 | let refresh_token_host = if opt.client_id.is_none() || opt.client_secret.is_none() { 154 | env::var("ALIYUNDRIVE_OAUTH_SERVER") 155 | .unwrap_or_else(|_| "https://aliyundrive-oauth.messense.me".to_string()) 156 | } else { 157 | "https://openapi.aliyundrive.com".to_string() 158 | }; 159 | let drive_config = DriveConfig { 160 | api_base_url: "https://openapi.aliyundrive.com".to_string(), 161 | refresh_token_host, 162 | workdir, 163 | client_id: opt.client_id.clone(), 164 | client_secret: opt.client_secret.clone(), 165 | drive_type: opt.drive_type.clone(), 166 | }; 167 | 168 | // subcommands 169 | if let Some(Commands::Qr(qr)) = opt.subcommands.as_ref() { 170 | match qr { 171 | QrCommand::Login => { 172 | let refresh_token = login(drive_config.clone(), 120).await?; 173 | println!("\nrefresh_token:\n\n{}", refresh_token) 174 | } 175 | QrCommand::Generate => { 176 | let scanner = login::QrCodeScanner::new(drive_config.clone()).await?; 177 | let data = scanner.scan().await?; 178 | println!("{}", serde_json::to_string_pretty(&data)?); 179 | } 180 | QrCommand::Query { sid } => { 181 | let scanner = login::QrCodeScanner::new(drive_config.clone()).await?; 182 | let query_result = scanner.query(sid).await?; 183 | if query_result.is_success() { 184 | let code = query_result.auth_code.unwrap(); 185 | let refresh_token = scanner.fetch_refresh_token(&code).await?; 186 | println!("{}", refresh_token) 187 | } 188 | } 189 | } 190 | return Ok(()); 191 | } 192 | 193 | if env::var("NO_SELF_UPGRADE").is_err() && !opt.no_self_upgrade { 194 | tokio::task::spawn_blocking(move || { 195 | if let Err(e) = check_for_update(opt.debug) { 196 | debug!("failed to check for update: {}", e); 197 | } 198 | }) 199 | .await?; 200 | } 201 | 202 | let auth_user = opt.auth_user; 203 | let auth_password = opt.auth_password; 204 | if (auth_user.is_some() && auth_password.is_none()) 205 | || (auth_user.is_none() && auth_password.is_some()) 206 | { 207 | bail!("auth-user and auth-password must be specified together."); 208 | } 209 | 210 | let tls_config = match (opt.tls_cert, opt.tls_key) { 211 | (Some(cert), Some(key)) => Some((cert, key)), 212 | (None, None) => None, 213 | _ => bail!("tls-cert and tls-key must be specified together."), 214 | }; 215 | 216 | let refresh_token_from_file = if let Some(dir) = drive_config.workdir.as_ref() { 217 | read_refresh_token(dir).await.ok() 218 | } else { 219 | None 220 | }; 221 | let refresh_token = if opt.refresh_token.is_none() 222 | && refresh_token_from_file.is_none() 223 | && atty::is(atty::Stream::Stdout) 224 | { 225 | login(drive_config.clone(), 30).await? 226 | } else { 227 | let token = opt.refresh_token.unwrap_or_default(); 228 | if !token.is_empty() && token.split('.').count() < 3 { 229 | bail!("Invalid refresh token value found in `--refresh-token` argument"); 230 | } 231 | token 232 | }; 233 | 234 | let drive = AliyunDrive::new(drive_config, refresh_token).await?; 235 | let mut fs = AliyunDriveFileSystem::new(drive, opt.root, opt.cache_size, opt.cache_ttl)?; 236 | fs.set_no_trash(opt.no_trash) 237 | .set_read_only(opt.read_only) 238 | .set_upload_buffer_size(opt.upload_buffer_size) 239 | .set_skip_upload_same_size(opt.skip_upload_same_size) 240 | .set_prefer_http_download(opt.prefer_http_download); 241 | debug!("aliyundrive file system initialized"); 242 | 243 | #[cfg(unix)] 244 | let dir_cache = fs.dir_cache.clone(); 245 | 246 | let mut dav_server_builder = DavHandler::builder() 247 | .filesystem(Box::new(fs)) 248 | .locksystem(MemLs::new()) 249 | .read_buf_size(opt.read_buffer_size) 250 | .autoindex(opt.auto_index) 251 | .redirect(opt.redirect); 252 | if let Some(prefix) = opt.strip_prefix { 253 | dav_server_builder = dav_server_builder.strip_prefix(prefix); 254 | } 255 | 256 | let dav_server = dav_server_builder.build_handler(); 257 | debug!( 258 | read_buffer_size = opt.read_buffer_size, 259 | auto_index = opt.auto_index, 260 | "webdav handler initialized" 261 | ); 262 | 263 | let server = WebDavServer { 264 | host: opt.host, 265 | port: opt.port, 266 | auth_user, 267 | auth_password, 268 | tls_config, 269 | handler: dav_server, 270 | }; 271 | 272 | #[cfg(not(unix))] 273 | server.serve().await?; 274 | 275 | #[cfg(unix)] 276 | { 277 | let signals = Signals::new([SIGHUP])?; 278 | let handle = signals.handle(); 279 | let signals_task = tokio::spawn(handle_signals(signals, dir_cache)); 280 | 281 | server.serve().await?; 282 | 283 | // Terminate the signal stream. 284 | handle.close(); 285 | signals_task.await?; 286 | } 287 | Ok(()) 288 | } 289 | 290 | #[cfg(unix)] 291 | async fn handle_signals(mut signals: Signals, dir_cache: Cache) { 292 | while let Some(signal) = signals.next().await { 293 | match signal { 294 | SIGHUP => { 295 | dir_cache.invalidate_all(); 296 | info!("directory cache invalidated by SIGHUP"); 297 | } 298 | _ => unreachable!(), 299 | } 300 | } 301 | } 302 | 303 | async fn login(drive_config: DriveConfig, timeout: u64) -> anyhow::Result { 304 | const SLEEP: u64 = 3; 305 | 306 | let scanner = login::QrCodeScanner::new(drive_config).await?; 307 | // 返回二维码内容结果集 308 | let sid = scanner.scan().await?.sid; 309 | // 需要生成二维码的内容 310 | let qrcode_content = format!("https://www.aliyundrive.com/o/oauth/authorize?sid={sid}"); 311 | // 打印二维码 312 | qr2term::print_qr(&qrcode_content)?; 313 | info!("Please scan the qrcode to login in {} seconds", timeout); 314 | let loop_count = timeout / SLEEP; 315 | for _i in 0..loop_count { 316 | tokio::time::sleep(tokio::time::Duration::from_secs(SLEEP)).await; 317 | // 模拟轮训查询二维码状态 318 | let query_result = scanner.query(&sid).await?; 319 | if !query_result.is_success() { 320 | continue; 321 | } 322 | let code = query_result.auth_code.unwrap(); 323 | let refresh_token = scanner.fetch_refresh_token(&code).await?; 324 | return Ok(refresh_token); 325 | } 326 | bail!("Login failed") 327 | } 328 | 329 | fn check_for_update(show_output: bool) -> anyhow::Result<()> { 330 | use self_update::update::UpdateStatus; 331 | #[cfg(unix)] 332 | use std::os::unix::process::CommandExt; 333 | use std::process::Command; 334 | 335 | let auth_token = env::var("GITHUB_TOKEN") 336 | .unwrap_or_else(|_| env::var("HOMEBREW_GITHUB_API_TOKEN").unwrap_or_default()); 337 | let status = self_update::backends::github::Update::configure() 338 | .repo_owner("messense") 339 | .repo_name("aliyundrive-webdav") 340 | .bin_name("aliyundrive-webdav") 341 | .target(if cfg!(target_os = "macos") { 342 | "apple-darwin" 343 | } else { 344 | self_update::get_target() 345 | }) 346 | .auth_token(&auth_token) 347 | .show_output(show_output) 348 | .show_download_progress(true) 349 | .no_confirm(true) 350 | .current_version(cargo_crate_version!()) 351 | .build()? 352 | .update_extended()?; 353 | if let UpdateStatus::Updated(ref release) = status { 354 | if let Some(body) = &release.body { 355 | if !body.trim().is_empty() { 356 | info!("aliyundrive-webdav upgraded to {}:\n", release.version); 357 | info!("{}", body); 358 | } else { 359 | info!("aliyundrive-webdav upgraded to {}", release.version); 360 | } 361 | } 362 | } else { 363 | info!("aliyundrive-webdav is up-to-date"); 364 | } 365 | 366 | if status.updated() { 367 | warn!("Respawning..."); 368 | let current_exe = env::current_exe(); 369 | let mut command = Command::new(current_exe?); 370 | command.args(env::args().skip(1)).env("NO_SELF_UPGRADE", ""); 371 | #[cfg(unix)] 372 | { 373 | let err = command.exec(); 374 | bail!(err); 375 | } 376 | 377 | #[cfg(windows)] 378 | { 379 | let status = command.spawn().and_then(|mut c| c.wait())?; 380 | bail!("aliyundrive-webdav upgraded"); 381 | } 382 | } 383 | Ok(()) 384 | } 385 | -------------------------------------------------------------------------------- /src/vfs.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::fmt::{Debug, Formatter}; 3 | use std::io::{Cursor, SeekFrom, Write}; 4 | use std::path::{Path, PathBuf}; 5 | use std::sync::Arc; 6 | use std::time::{SystemTime, UNIX_EPOCH}; 7 | 8 | use anyhow::Result; 9 | use bytes::{Buf, BufMut, Bytes, BytesMut}; 10 | use dashmap::DashMap; 11 | use dav_server::{ 12 | davpath::DavPath, 13 | fs::{ 14 | DavDirEntry, DavFile, DavFileSystem, DavMetaData, FsError, FsFuture, FsStream, OpenOptions, 15 | ReadDirMeta, 16 | }, 17 | }; 18 | use futures_util::future::{ready, FutureExt}; 19 | use path_slash::PathBufExt; 20 | use tracing::{debug, error, trace, warn}; 21 | use zip::write::{FileOptions, ZipWriter}; 22 | 23 | use crate::{ 24 | cache::Cache, 25 | drive::{model::GetFileDownloadUrlResponse, AliyunDrive, AliyunFile, DateTime, FileType}, 26 | }; 27 | 28 | #[derive(Clone)] 29 | pub struct AliyunDriveFileSystem { 30 | drive: AliyunDrive, 31 | pub(crate) dir_cache: Cache, 32 | uploading: Arc>>, 33 | root: PathBuf, 34 | no_trash: bool, 35 | read_only: bool, 36 | upload_buffer_size: usize, 37 | skip_upload_same_size: bool, 38 | prefer_http_download: bool, 39 | } 40 | 41 | impl AliyunDriveFileSystem { 42 | #[allow(clippy::too_many_arguments)] 43 | pub fn new(drive: AliyunDrive, root: String, cache_size: u64, cache_ttl: u64) -> Result { 44 | let dir_cache = Cache::new(cache_size, cache_ttl); 45 | debug!("dir cache initialized"); 46 | let root = if root.starts_with('/') { 47 | PathBuf::from(root) 48 | } else { 49 | Path::new("/").join(root) 50 | }; 51 | Ok(Self { 52 | drive, 53 | dir_cache, 54 | uploading: Arc::new(DashMap::new()), 55 | root, 56 | no_trash: false, 57 | read_only: false, 58 | upload_buffer_size: 16 * 1024 * 1024, 59 | skip_upload_same_size: false, 60 | prefer_http_download: false, 61 | }) 62 | } 63 | 64 | pub fn set_read_only(&mut self, read_only: bool) -> &mut Self { 65 | self.read_only = read_only; 66 | self 67 | } 68 | 69 | pub fn set_no_trash(&mut self, no_trash: bool) -> &mut Self { 70 | self.no_trash = no_trash; 71 | self 72 | } 73 | 74 | pub fn set_upload_buffer_size(&mut self, upload_buffer_size: usize) -> &mut Self { 75 | self.upload_buffer_size = upload_buffer_size; 76 | self 77 | } 78 | 79 | pub fn set_skip_upload_same_size(&mut self, skip_upload_same_size: bool) -> &mut Self { 80 | self.skip_upload_same_size = skip_upload_same_size; 81 | self 82 | } 83 | 84 | pub fn set_prefer_http_download(&mut self, prefer_http_download: bool) -> &mut Self { 85 | self.prefer_http_download = prefer_http_download; 86 | self 87 | } 88 | 89 | fn find_in_cache(&self, path: &Path) -> Result, FsError> { 90 | if let Some(parent) = path.parent() { 91 | let parent_str = parent.to_string_lossy(); 92 | let file_name = path 93 | .file_name() 94 | .ok_or(FsError::NotFound)? 95 | .to_string_lossy() 96 | .into_owned(); 97 | let file = self.dir_cache.get(&parent_str).and_then(|files| { 98 | for file in &files { 99 | if file.name == file_name { 100 | return Some(file.clone()); 101 | } 102 | } 103 | None 104 | }); 105 | Ok(file) 106 | } else { 107 | let root = AliyunFile::new_root(); 108 | Ok(Some(root)) 109 | } 110 | } 111 | 112 | async fn get_file(&self, path: PathBuf) -> Result, FsError> { 113 | let path_str = path.to_slash_lossy(); 114 | let file = self.find_in_cache(&path)?; 115 | if let Some(file) = file { 116 | trace!(path = %path.display(), file_id = %file.id, "file found in cache"); 117 | Ok(Some(file)) 118 | } else { 119 | trace!(path = %path.display(), "file not found in cache"); 120 | if let Ok(Some(file)) = self.drive.get_by_path(&path_str).await { 121 | return Ok(Some(file)); 122 | } 123 | 124 | // path may contain whitespaces which get_by_path can't handle 125 | // so we try to find it in directory 126 | let parts: Vec<&str> = path_str.split('/').collect(); 127 | let parts_len = parts.len(); 128 | let filename = parts[parts_len - 1]; 129 | let mut prefix = PathBuf::from("/"); 130 | for part in &parts[0..parts_len - 1] { 131 | let parent = prefix.join(part); 132 | prefix = parent.clone(); 133 | let files = self.read_dir_and_cache(parent).await?; 134 | if let Some(file) = files.iter().find(|f| f.name == filename) { 135 | trace!(path = %path.display(), file_id = %file.id, "file found in cache"); 136 | return Ok(Some(file.clone())); 137 | } 138 | } 139 | Ok(None) 140 | } 141 | } 142 | 143 | async fn read_dir_and_cache(&self, path: PathBuf) -> Result, FsError> { 144 | let path_str = path.to_slash_lossy(); 145 | let parent_file_id = if path_str == "/" { 146 | "root".to_string() 147 | } else { 148 | match self.find_in_cache(&path) { 149 | Ok(Some(file)) => file.id, 150 | _ => match self.drive.get_by_path(&path_str).await { 151 | Ok(Some(file)) => file.id, 152 | Ok(None) => return Err(FsError::NotFound), 153 | Err(err) => { 154 | error!(path = %path_str, error = %err, "get_by_path failed"); 155 | return Err(FsError::GeneralFailure); 156 | } 157 | }, 158 | } 159 | }; 160 | let mut files = if let Some(files) = self.dir_cache.get(&path_str) { 161 | debug!(path = %path_str, "read_dir cache hit"); 162 | files 163 | } else { 164 | let res = self 165 | .list_files_and_cache(path_str.to_string(), parent_file_id.clone()) 166 | .await; 167 | match res { 168 | Ok(files) => { 169 | debug!(path = %path_str, "read_dir cache miss"); 170 | files 171 | } 172 | Err(err) => { 173 | if let Some(req_err) = err.downcast_ref::() { 174 | if matches!(req_err.status(), Some(reqwest::StatusCode::NOT_FOUND)) { 175 | debug!(path = %path_str, "read_dir not found"); 176 | return Err(FsError::NotFound); 177 | } else { 178 | error!(path = %path_str, error = %err, "list_files_and_cache failed"); 179 | return Err(FsError::GeneralFailure); 180 | } 181 | } else { 182 | error!(path = %path_str, error = %err, "list_files_and_cache failed"); 183 | return Err(FsError::GeneralFailure); 184 | } 185 | } 186 | } 187 | }; 188 | let uploading_files = self.list_uploading_files(&parent_file_id); 189 | if !uploading_files.is_empty() { 190 | debug!("added {} uploading files", uploading_files.len()); 191 | files.extend(uploading_files); 192 | } 193 | Ok(files) 194 | } 195 | 196 | fn list_uploading_files(&self, parent_file_id: &str) -> Vec { 197 | self.uploading 198 | .get(parent_file_id) 199 | .map(|val_ref| val_ref.value().clone()) 200 | .unwrap_or_default() 201 | } 202 | 203 | fn remove_uploading_file(&self, parent_file_id: &str, name: &str) { 204 | if let Some(mut files) = self.uploading.get_mut(parent_file_id) { 205 | if let Some(index) = files.iter().position(|x| x.name == name) { 206 | files.swap_remove(index); 207 | } 208 | } 209 | } 210 | 211 | async fn list_files_and_cache( 212 | &self, 213 | path_str: String, 214 | parent_file_id: String, 215 | ) -> Result> { 216 | let files = self.drive.list_all(&parent_file_id).await?; 217 | self.cache_dir(path_str, files.clone()).await; 218 | Ok(files) 219 | } 220 | 221 | async fn cache_dir(&self, dir_path: String, files: Vec) { 222 | trace!(path = %dir_path, count = files.len(), "cache dir"); 223 | self.dir_cache.insert(dir_path, files).await; 224 | } 225 | 226 | fn normalize_dav_path(&self, dav_path: &DavPath) -> PathBuf { 227 | let path = dav_path.as_pathbuf(); 228 | if self.root.parent().is_none() || path.starts_with(&self.root) { 229 | return path; 230 | } 231 | let rel_path = dav_path.as_rel_ospath(); 232 | if rel_path == Path::new("") { 233 | return self.root.clone(); 234 | } 235 | self.root.join(rel_path) 236 | } 237 | } 238 | 239 | impl DavFileSystem for AliyunDriveFileSystem { 240 | fn open<'a>( 241 | &'a self, 242 | dav_path: &'a DavPath, 243 | options: OpenOptions, 244 | ) -> FsFuture> { 245 | let path = self.normalize_dav_path(dav_path); 246 | let mode = if options.write { "write" } else { "read" }; 247 | debug!(path = %path.display(), mode = %mode, "fs: open"); 248 | async move { 249 | if options.append { 250 | // Can't support open in write-append mode 251 | error!(path = %path.display(), "unsupported write-append mode"); 252 | return Err(FsError::NotImplemented); 253 | } 254 | let parent_path = path.parent().ok_or(FsError::NotFound)?; 255 | let parent_file = self 256 | .get_file(parent_path.to_path_buf()) 257 | .await? 258 | .ok_or(FsError::NotFound)?; 259 | let sha1 = options.checksum.and_then(|c| { 260 | if let Some((algo, hash)) = c.split_once(':') { 261 | if algo.eq_ignore_ascii_case("sha1") { 262 | Some(hash.to_string()) 263 | } else { 264 | None 265 | } 266 | } else { 267 | None 268 | } 269 | }); 270 | let mut dav_file = if let Some(file) = self.get_file(path.clone()).await? { 271 | if options.write && options.create_new { 272 | return Err(FsError::Exists); 273 | } 274 | if options.write && self.read_only { 275 | return Err(FsError::Forbidden); 276 | } 277 | AliyunDavFile::new( 278 | self.clone(), 279 | file, 280 | parent_file.id, 281 | parent_path.to_path_buf(), 282 | options.size.unwrap_or_default(), 283 | sha1, 284 | ) 285 | } else if options.write && (options.create || options.create_new) { 286 | if self.read_only { 287 | return Err(FsError::Forbidden); 288 | } 289 | 290 | let size = options.size; 291 | let name = dav_path 292 | .file_name() 293 | .ok_or(FsError::GeneralFailure)? 294 | .to_string(); 295 | 296 | // 忽略 macOS 上的一些特殊文件 297 | if name == ".DS_Store" || name.starts_with("._") { 298 | return Err(FsError::NotFound); 299 | } 300 | 301 | let now = SystemTime::now(); 302 | let file = AliyunFile { 303 | name, 304 | id: "".to_string(), 305 | r#type: FileType::File, 306 | created_at: DateTime::new(now), 307 | updated_at: DateTime::new(now), 308 | size: size.unwrap_or(0), 309 | url: None, 310 | content_hash: None, 311 | }; 312 | let mut uploading = self.uploading.entry(parent_file.id.clone()).or_default(); 313 | uploading.push(file.clone()); 314 | AliyunDavFile::new( 315 | self.clone(), 316 | file, 317 | parent_file.id, 318 | parent_path.to_path_buf(), 319 | size.unwrap_or(0), 320 | sha1, 321 | ) 322 | } else { 323 | return Err(FsError::NotFound); 324 | }; 325 | dav_file.http_download = self.prefer_http_download; 326 | Ok(Box::new(dav_file) as Box) 327 | } 328 | .boxed() 329 | } 330 | 331 | fn read_dir<'a>( 332 | &'a self, 333 | path: &'a DavPath, 334 | _meta: ReadDirMeta, 335 | ) -> FsFuture>> { 336 | let path = self.normalize_dav_path(path); 337 | debug!(path = %path.display(), "fs: read_dir"); 338 | async move { 339 | let files = self.read_dir_and_cache(path.clone()).await?; 340 | let mut v: Vec> = Vec::with_capacity(files.len()); 341 | for file in files { 342 | v.push(Box::new(file)); 343 | } 344 | let stream = futures_util::stream::iter(v); 345 | Ok(Box::pin(stream) as FsStream>) 346 | } 347 | .boxed() 348 | } 349 | 350 | fn metadata<'a>(&'a self, path: &'a DavPath) -> FsFuture> { 351 | let path = self.normalize_dav_path(path); 352 | debug!(path = %path.display(), "fs: metadata"); 353 | async move { 354 | let file = self.get_file(path).await?.ok_or(FsError::NotFound)?; 355 | Ok(Box::new(file) as Box) 356 | } 357 | .boxed() 358 | } 359 | 360 | fn create_dir<'a>(&'a self, dav_path: &'a DavPath) -> FsFuture<()> { 361 | let path = self.normalize_dav_path(dav_path); 362 | debug!(path = %path.display(), "fs: create_dir"); 363 | async move { 364 | if self.read_only { 365 | return Err(FsError::Forbidden); 366 | } 367 | 368 | let parent_path = path.parent().ok_or(FsError::NotFound)?; 369 | let parent_file = self 370 | .get_file(parent_path.to_path_buf()) 371 | .await? 372 | .ok_or(FsError::NotFound)?; 373 | if !matches!(parent_file.r#type, FileType::Folder) { 374 | return Err(FsError::Forbidden); 375 | } 376 | if let Some(name) = path.file_name() { 377 | let name = name.to_string_lossy().into_owned(); 378 | self.drive 379 | .create_folder(&parent_file.id, &name) 380 | .await 381 | .map_err(|err| { 382 | error!(path = %path.display(), error = %err, "create folder failed"); 383 | FsError::GeneralFailure 384 | })?; 385 | self.dir_cache.invalidate(parent_path).await; 386 | Ok(()) 387 | } else { 388 | Err(FsError::Forbidden) 389 | } 390 | } 391 | .boxed() 392 | } 393 | 394 | fn remove_dir<'a>(&'a self, dav_path: &'a DavPath) -> FsFuture<()> { 395 | let path = self.normalize_dav_path(dav_path); 396 | debug!(path = %path.display(), "fs: remove_dir"); 397 | async move { 398 | if self.read_only { 399 | return Err(FsError::Forbidden); 400 | } 401 | 402 | let file = self 403 | .get_file(path.clone()) 404 | .await? 405 | .ok_or(FsError::NotFound)?; 406 | if !matches!(file.r#type, FileType::Folder) { 407 | return Err(FsError::Forbidden); 408 | } 409 | self.drive 410 | .remove_file(&file.id, !self.no_trash) 411 | .await 412 | .map_err(|err| { 413 | error!(path = %path.display(), error = %err, "remove directory failed"); 414 | FsError::GeneralFailure 415 | })?; 416 | self.dir_cache.invalidate(&path).await; 417 | self.dir_cache.invalidate_parent(&path).await; 418 | Ok(()) 419 | } 420 | .boxed() 421 | } 422 | 423 | fn remove_file<'a>(&'a self, dav_path: &'a DavPath) -> FsFuture<()> { 424 | let path = self.normalize_dav_path(dav_path); 425 | debug!(path = %path.display(), "fs: remove_file"); 426 | async move { 427 | if self.read_only { 428 | return Err(FsError::Forbidden); 429 | } 430 | 431 | let file = self 432 | .get_file(path.clone()) 433 | .await? 434 | .ok_or(FsError::NotFound)?; 435 | if !matches!(file.r#type, FileType::File) { 436 | return Err(FsError::Forbidden); 437 | } 438 | self.drive 439 | .remove_file(&file.id, !self.no_trash) 440 | .await 441 | .map_err(|err| { 442 | error!(path = %path.display(), error = %err, "remove file failed"); 443 | FsError::GeneralFailure 444 | })?; 445 | self.dir_cache.invalidate_parent(&path).await; 446 | Ok(()) 447 | } 448 | .boxed() 449 | } 450 | 451 | fn copy<'a>(&'a self, from_dav: &'a DavPath, to_dav: &'a DavPath) -> FsFuture<()> { 452 | let from = self.normalize_dav_path(from_dav); 453 | let to = self.normalize_dav_path(to_dav); 454 | debug!(from = %from.display(), to = %to.display(), "fs: copy"); 455 | async move { 456 | if self.read_only { 457 | return Err(FsError::Forbidden); 458 | } 459 | 460 | let file = self 461 | .get_file(from.clone()) 462 | .await? 463 | .ok_or(FsError::NotFound)?; 464 | let to_parent_file = self 465 | .get_file(to.parent().unwrap().to_path_buf()) 466 | .await? 467 | .ok_or(FsError::NotFound)?; 468 | self.drive 469 | .copy_file(&file.id, &to_parent_file.id) 470 | .await 471 | .map_err(|err| { 472 | error!(from = %from.display(), to = %to.display(), error = %err, "copy file failed"); 473 | FsError::GeneralFailure 474 | })?; 475 | 476 | self.dir_cache.invalidate(&to).await; 477 | self.dir_cache.invalidate_parent(&to).await; 478 | Ok(()) 479 | } 480 | .boxed() 481 | } 482 | 483 | fn rename<'a>(&'a self, from_dav: &'a DavPath, to_dav: &'a DavPath) -> FsFuture<()> { 484 | let from = self.normalize_dav_path(from_dav); 485 | let to = self.normalize_dav_path(to_dav); 486 | debug!(from = %from.display(), to = %to.display(), "fs: rename"); 487 | async move { 488 | if self.read_only { 489 | return Err(FsError::Forbidden); 490 | } 491 | 492 | let is_dir; 493 | if from.parent() == to.parent() { 494 | // rename 495 | if let Some(name) = to.file_name() { 496 | let file = self 497 | .get_file(from.clone()) 498 | .await? 499 | .ok_or(FsError::NotFound)?; 500 | is_dir = matches!(file.r#type, FileType::Folder); 501 | let name = name.to_string_lossy().into_owned(); 502 | self.drive 503 | .rename_file(&file.id, &name) 504 | .await 505 | .map_err(|err| { 506 | error!(from = %from.display(), to = %to.display(), error = %err, "rename file failed"); 507 | FsError::GeneralFailure 508 | })?; 509 | } else { 510 | return Err(FsError::Forbidden); 511 | } 512 | } else { 513 | // move 514 | let file = self 515 | .get_file(from.clone()) 516 | .await? 517 | .ok_or(FsError::NotFound)?; 518 | is_dir = matches!(file.r#type, FileType::Folder); 519 | let to_parent_file = self 520 | .get_file(to.parent().unwrap().to_path_buf()) 521 | .await? 522 | .ok_or(FsError::NotFound)?; 523 | let new_name = to_dav.file_name(); 524 | self.drive 525 | .move_file(&file.id, &to_parent_file.id, new_name) 526 | .await 527 | .map_err(|err| { 528 | error!(from = %from.display(), to = %to.display(), error = %err, "move file failed"); 529 | FsError::GeneralFailure 530 | })?; 531 | } 532 | 533 | if is_dir { 534 | self.dir_cache.invalidate(&from).await; 535 | } 536 | self.dir_cache.invalidate_parent(&from).await; 537 | self.dir_cache.invalidate_parent(&to).await; 538 | Ok(()) 539 | } 540 | .boxed() 541 | } 542 | 543 | fn get_quota(&self) -> FsFuture<(u64, Option)> { 544 | debug!("fs: get_quota"); 545 | async move { 546 | let (used, total) = self.drive.get_quota().await.map_err(|err| { 547 | error!(error = %err, "get quota failed"); 548 | FsError::GeneralFailure 549 | })?; 550 | Ok((used, Some(total))) 551 | } 552 | .boxed() 553 | } 554 | 555 | fn have_props<'a>( 556 | &'a self, 557 | _path: &'a DavPath, 558 | ) -> std::pin::Pin + Send + 'a>> { 559 | Box::pin(ready(true)) 560 | } 561 | 562 | fn get_prop(&self, dav_path: &DavPath, prop: dav_server::fs::DavProp) -> FsFuture> { 563 | let path = self.normalize_dav_path(dav_path); 564 | let prop_name = match prop.prefix.as_ref() { 565 | Some(prefix) => format!("{}:{}", prefix, prop.name), 566 | None => prop.name.to_string(), 567 | }; 568 | debug!(path = %path.display(), prop = %prop_name, "fs: get_prop"); 569 | async move { 570 | if prop.namespace.as_deref() == Some("http://owncloud.org/ns") 571 | && prop.name == "checksums" 572 | { 573 | let file = self.get_file(path).await?.ok_or(FsError::NotFound)?; 574 | if let Some(sha1) = file.content_hash { 575 | let xml = format!( 576 | r#" 577 | 578 | sha1:{} 579 | 580 | "#, 581 | sha1 582 | ); 583 | return Ok(xml.into_bytes()); 584 | } 585 | } 586 | Err(FsError::NotImplemented) 587 | } 588 | .boxed() 589 | } 590 | } 591 | 592 | #[derive(Debug, Clone)] 593 | struct UploadState { 594 | size: u64, 595 | buffer: BytesMut, 596 | chunk_count: u64, 597 | chunk: u64, 598 | upload_id: String, 599 | upload_urls: Vec, 600 | sha1: Option, 601 | } 602 | 603 | impl Default for UploadState { 604 | fn default() -> Self { 605 | Self { 606 | size: 0, 607 | buffer: BytesMut::new(), 608 | chunk_count: 0, 609 | chunk: 1, 610 | upload_id: String::new(), 611 | upload_urls: Vec::new(), 612 | sha1: None, 613 | } 614 | } 615 | } 616 | 617 | struct AliyunDavFile { 618 | fs: AliyunDriveFileSystem, 619 | file: AliyunFile, 620 | parent_file_id: String, 621 | parent_dir: PathBuf, 622 | current_pos: u64, 623 | upload_state: UploadState, 624 | http_download: bool, 625 | } 626 | 627 | impl Debug for AliyunDavFile { 628 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 629 | f.debug_struct("AliyunDavFile") 630 | .field("file", &self.file) 631 | .field("parent_file_id", &self.parent_file_id) 632 | .field("current_pos", &self.current_pos) 633 | .field("upload_state", &self.upload_state) 634 | .finish() 635 | } 636 | } 637 | 638 | impl AliyunDavFile { 639 | fn new( 640 | fs: AliyunDriveFileSystem, 641 | file: AliyunFile, 642 | parent_file_id: String, 643 | parent_dir: PathBuf, 644 | size: u64, 645 | sha1: Option, 646 | ) -> Self { 647 | Self { 648 | fs, 649 | file, 650 | parent_file_id, 651 | parent_dir, 652 | current_pos: 0, 653 | upload_state: UploadState { 654 | size, 655 | sha1, 656 | ..Default::default() 657 | }, 658 | http_download: false, 659 | } 660 | } 661 | 662 | async fn get_download_url(&self) -> Result { 663 | self.fs.drive.get_download_url(&self.file.id).await.map_err(|err| { 664 | error!(file_id = %self.file.id, file_name = %self.file.name, error = %err, "get download url failed"); 665 | FsError::GeneralFailure 666 | }) 667 | } 668 | 669 | async fn prepare_for_upload(&mut self) -> Result { 670 | if self.upload_state.chunk_count == 0 { 671 | let size = self.upload_state.size; 672 | debug!(file_name = %self.file.name, size = size, "prepare for upload"); 673 | if !self.file.id.is_empty() { 674 | if let Some(content_hash) = self.file.content_hash.as_ref() { 675 | if let Some(sha1) = self.upload_state.sha1.as_ref() { 676 | if content_hash.eq_ignore_ascii_case(sha1) { 677 | debug!(file_name = %self.file.name, sha1 = %sha1, "skip uploading same content hash file"); 678 | return Ok(false); 679 | } 680 | } 681 | } 682 | if self.fs.skip_upload_same_size && self.file.size == size { 683 | debug!(file_name = %self.file.name, size = size, "skip uploading same size file"); 684 | return Ok(false); 685 | } 686 | // existing file, delete before upload 687 | if let Err(err) = self 688 | .fs 689 | .drive 690 | .remove_file(&self.file.id, !self.fs.no_trash) 691 | .await 692 | { 693 | error!(file_name = %self.file.name, error = %err, "delete file before upload failed"); 694 | } 695 | } 696 | // TODO: create parent folders? 697 | let upload_buffer_size = self.fs.upload_buffer_size as u64; 698 | let chunk_count = 699 | size / upload_buffer_size + if size % upload_buffer_size != 0 { 1 } else { 0 }; 700 | self.upload_state.chunk_count = chunk_count; 701 | let res = self 702 | .fs 703 | .drive 704 | .create_file_with_proof(&self.file.name, &self.parent_file_id, size, chunk_count) 705 | .await 706 | .map_err(|err| { 707 | error!(file_name = %self.file.name, error = %err, "create file with proof failed"); 708 | FsError::GeneralFailure 709 | })?; 710 | self.file.id = res.file_id.clone(); 711 | let Some(upload_id) = res.upload_id else { 712 | error!("create file with proof failed: missing upload_id"); 713 | return Err(FsError::GeneralFailure); 714 | }; 715 | self.upload_state.upload_id = upload_id; 716 | let upload_urls: Vec<_> = res 717 | .part_info_list 718 | .into_iter() 719 | .map(|x| x.upload_url) 720 | .collect(); 721 | if upload_urls.is_empty() { 722 | error!(file_id = %self.file.id, file_name = %self.file.name, "empty upload urls"); 723 | return Err(FsError::GeneralFailure); 724 | } 725 | self.upload_state.upload_urls = upload_urls; 726 | } 727 | Ok(true) 728 | } 729 | 730 | async fn maybe_upload_chunk(&mut self, remaining: bool) -> Result<(), FsError> { 731 | let chunk_size = if remaining { 732 | // last chunk size maybe less than upload_buffer_size 733 | self.upload_state.buffer.remaining() 734 | } else { 735 | self.fs.upload_buffer_size 736 | }; 737 | let current_chunk = self.upload_state.chunk; 738 | if chunk_size > 0 739 | && self.upload_state.buffer.remaining() >= chunk_size 740 | && current_chunk <= self.upload_state.chunk_count 741 | { 742 | let chunk_data = self.upload_state.buffer.split_to(chunk_size); 743 | debug!( 744 | file_id = %self.file.id, 745 | file_name = %self.file.name, 746 | size = self.upload_state.size, 747 | "upload part {}/{}", 748 | current_chunk, 749 | self.upload_state.chunk_count 750 | ); 751 | let mut upload_url = &self.upload_state.upload_urls[current_chunk as usize - 1]; 752 | let upload_data = chunk_data.freeze(); 753 | let mut res = self.fs.drive.upload(upload_url, upload_data.clone()).await; 754 | if let Err(ref err) = res { 755 | if err.to_string().contains("expired") { 756 | warn!( 757 | file_id = %self.file.id, 758 | file_name = %self.file.name, 759 | upload_url = %upload_url, 760 | "upload url expired" 761 | ); 762 | if let Ok(part_info_list) = self 763 | .fs 764 | .drive 765 | .get_upload_url( 766 | &self.file.id, 767 | &self.upload_state.upload_id, 768 | self.upload_state.chunk_count, 769 | ) 770 | .await 771 | { 772 | let upload_urls: Vec<_> = 773 | part_info_list.into_iter().map(|x| x.upload_url).collect(); 774 | self.upload_state.upload_urls = upload_urls; 775 | upload_url = &self.upload_state.upload_urls[current_chunk as usize - 1]; 776 | // retry upload 777 | res = self.fs.drive.upload(upload_url, upload_data).await; 778 | } 779 | } 780 | res.map_err(|err| { 781 | error!( 782 | file_id = %self.file.id, 783 | file_name = %self.file.name, 784 | upload_url = %upload_url, 785 | size = self.upload_state.size, 786 | error = %err, 787 | "upload file chunk {} failed", 788 | current_chunk 789 | ); 790 | FsError::GeneralFailure 791 | })?; 792 | } 793 | self.upload_state.chunk += 1; 794 | } 795 | Ok(()) 796 | } 797 | } 798 | 799 | impl DavFile for AliyunDavFile { 800 | fn metadata(&'_ mut self) -> FsFuture<'_, Box> { 801 | debug!(file_id = %self.file.id, file_name = %self.file.name, "file: metadata"); 802 | async move { 803 | // 阿里云盘接口没有 .livp 格式文件下载地址 804 | // 我们用 heic 和 mov 文件生成 zip 文件还原 .livp 文件 805 | // 故需要重新计算文件大小 806 | if self.file.name.ends_with(".livp") { 807 | if let Some(file) = self 808 | .fs 809 | .drive 810 | .get_file(&self.file.id) 811 | .await 812 | .map_err(|_| FsError::GeneralFailure)? 813 | { 814 | Ok(Box::new(file) as Box) 815 | } else { 816 | Err(FsError::NotFound) 817 | } 818 | } else { 819 | let file = self.file.clone(); 820 | Ok(Box::new(file) as Box) 821 | } 822 | } 823 | .boxed() 824 | } 825 | 826 | fn redirect_url(&mut self) -> FsFuture> { 827 | debug!(file_id = %self.file.id, file_name = %self.file.name, "file: redirect_url"); 828 | async move { 829 | if self.file.id.is_empty() { 830 | return Err(FsError::NotFound); 831 | } 832 | let download_url = self.file.url.take(); 833 | let download_url = if let Some(mut url) = download_url { 834 | if is_url_expired(&url) { 835 | debug!(url = %url, "download url expired"); 836 | url = self.get_download_url().await?.url; 837 | } 838 | url 839 | } else { 840 | let res = self.get_download_url().await?; 841 | res.url 842 | }; 843 | 844 | if !download_url.is_empty() { 845 | self.file.url = Some(download_url.clone()); 846 | if !download_url.contains("x-oss-additional-headers=referer") { 847 | return Ok(Some(download_url)); 848 | } 849 | } 850 | Ok(None) 851 | } 852 | .boxed() 853 | } 854 | 855 | fn write_buf(&'_ mut self, buf: Box) -> FsFuture<'_, ()> { 856 | debug!(file_id = %self.file.id, file_name = %self.file.name, "file: write_buf"); 857 | async move { 858 | if self.prepare_for_upload().await? { 859 | self.upload_state.buffer.put(buf); 860 | self.maybe_upload_chunk(false).await?; 861 | } 862 | Ok(()) 863 | } 864 | .boxed() 865 | } 866 | 867 | fn write_bytes(&mut self, buf: Bytes) -> FsFuture<()> { 868 | debug!(file_id = %self.file.id, file_name = %self.file.name, size = buf.len(), "file: write_bytes"); 869 | async move { 870 | if self.prepare_for_upload().await? { 871 | self.upload_state.buffer.extend_from_slice(&buf); 872 | self.maybe_upload_chunk(false).await?; 873 | } 874 | Ok(()) 875 | } 876 | .boxed() 877 | } 878 | 879 | fn read_bytes(&mut self, count: usize) -> FsFuture { 880 | debug!( 881 | file_id = %self.file.id, 882 | file_name = %self.file.name, 883 | pos = self.current_pos, 884 | count = count, 885 | size = self.file.size, 886 | "file: read_bytes", 887 | ); 888 | async move { 889 | if self.file.id.is_empty() { 890 | // upload in progress 891 | return Err(FsError::NotFound); 892 | } 893 | let download_url = self.file.url.take(); 894 | let (download_url, streams_url) = if let Some(mut url) = download_url { 895 | if is_url_expired(&url) { 896 | debug!(url = %url, "download url expired"); 897 | url = self.get_download_url().await?.url; 898 | } 899 | (url, HashMap::new()) 900 | } else { 901 | let res = self.get_download_url().await?; 902 | (res.url, res.streams_url) 903 | }; 904 | 905 | if !download_url.is_empty() { 906 | let mut url = 907 | reqwest::Url::parse(&download_url).map_err(|_| FsError::GeneralFailure)?; 908 | if self.http_download { 909 | url.set_scheme("http") 910 | .map_err(|_| FsError::GeneralFailure)?; 911 | } 912 | let content = self 913 | .fs 914 | .drive 915 | .download(url, Some((self.current_pos, count))) 916 | .await 917 | .map_err(|err| { 918 | error!(url = %download_url, error = %err, "download file failed"); 919 | FsError::NotFound 920 | })?; 921 | self.current_pos += content.len() as u64; 922 | self.file.url = Some(download_url); 923 | Ok(content) 924 | } else if streams_url.is_empty() { 925 | Err(FsError::NotFound) 926 | } else { 927 | // Generate .livp file on the fly 928 | let buf = Vec::new(); 929 | let mut zip = ZipWriter::new(Cursor::new(buf)); 930 | for (typ, url) in streams_url { 931 | let content = self.fs.drive.download(&url, None).await.map_err(|err| { 932 | error!(url = %download_url, error = %err, "download file failed"); 933 | FsError::NotFound 934 | })?; 935 | let name = self.file.name.replace(".livp", &format!(".{}", typ)); 936 | zip.start_file( 937 | name, 938 | FileOptions::default().compression_method(zip::CompressionMethod::Stored), 939 | ) 940 | .map_err(|_| FsError::GeneralFailure)?; 941 | zip.write(&content).map_err(|_| FsError::GeneralFailure)?; 942 | self.current_pos += content.len() as u64; 943 | } 944 | let zip_buf = zip 945 | .finish() 946 | .map_err(|_| FsError::GeneralFailure)? 947 | .into_inner(); 948 | Ok(Bytes::from(zip_buf)) 949 | } 950 | } 951 | .boxed() 952 | } 953 | 954 | fn seek(&mut self, pos: SeekFrom) -> FsFuture { 955 | debug!( 956 | file_id = %self.file.id, 957 | file_name = %self.file.name, 958 | pos = ?pos, 959 | "file: seek" 960 | ); 961 | async move { 962 | let new_pos = match pos { 963 | SeekFrom::Start(pos) => pos, 964 | SeekFrom::End(pos) => (self.file.size as i64 + pos) as u64, 965 | SeekFrom::Current(size) => self.current_pos + size as u64, 966 | }; 967 | self.current_pos = new_pos; 968 | Ok(new_pos) 969 | } 970 | .boxed() 971 | } 972 | 973 | fn flush(&mut self) -> FsFuture<()> { 974 | debug!(file_id = %self.file.id, file_name = %self.file.name, "file: flush"); 975 | async move { 976 | if self.prepare_for_upload().await? { 977 | self.maybe_upload_chunk(true).await?; 978 | if !self.upload_state.upload_id.is_empty() { 979 | self.fs 980 | .drive 981 | .complete_file_upload(&self.file.id, &self.upload_state.upload_id) 982 | .await 983 | .map_err(|err| { 984 | error!( 985 | file_id = %self.file.id, 986 | file_name = %self.file.name, 987 | error = %err, 988 | "complete file upload failed" 989 | ); 990 | FsError::GeneralFailure 991 | })?; 992 | } 993 | self.fs 994 | .remove_uploading_file(&self.parent_file_id, &self.file.name); 995 | self.fs.dir_cache.invalidate(&self.parent_dir).await; 996 | } 997 | Ok(()) 998 | } 999 | .boxed() 1000 | } 1001 | } 1002 | 1003 | fn is_url_expired(url: &str) -> bool { 1004 | if let Ok(oss_url) = ::url::Url::parse(url) { 1005 | let expires = oss_url.query_pairs().find_map(|(k, v)| { 1006 | if k == "x-oss-expires" { 1007 | if let Ok(expires) = v.parse::() { 1008 | return Some(expires); 1009 | } 1010 | } 1011 | None 1012 | }); 1013 | if let Some(expires) = expires { 1014 | let current_ts = SystemTime::now() 1015 | .duration_since(UNIX_EPOCH) 1016 | .expect("Time went backwards") 1017 | .as_secs(); 1018 | // 预留 1 分钟 1019 | return current_ts >= expires - 60; 1020 | } 1021 | } 1022 | false 1023 | } 1024 | -------------------------------------------------------------------------------- /src/webdav.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::io; 3 | use std::net::ToSocketAddrs; 4 | use std::path::PathBuf; 5 | use std::pin::Pin; 6 | use std::task::{Context, Poll}; 7 | 8 | use anyhow::Result; 9 | use dav_server::{body::Body, DavConfig, DavHandler}; 10 | use headers::{authorization::Basic, Authorization, HeaderMapExt}; 11 | use hyper::{service::Service, Request, Response}; 12 | use tracing::{error, info}; 13 | 14 | #[cfg(feature = "rustls-tls")] 15 | use { 16 | futures_util::stream::StreamExt, 17 | hyper::server::accept, 18 | hyper::server::conn::AddrIncoming, 19 | std::fs::File, 20 | std::future::ready, 21 | std::path::Path, 22 | std::sync::Arc, 23 | tls_listener::{SpawningHandshakes, TlsListener}, 24 | tokio_rustls::rustls::{Certificate, PrivateKey, ServerConfig}, 25 | tokio_rustls::TlsAcceptor, 26 | }; 27 | 28 | pub struct WebDavServer { 29 | pub host: String, 30 | pub port: u16, 31 | pub auth_user: Option, 32 | pub auth_password: Option, 33 | pub tls_config: Option<(PathBuf, PathBuf)>, 34 | pub handler: DavHandler, 35 | } 36 | 37 | impl WebDavServer { 38 | pub async fn serve(self) -> Result<()> { 39 | let addr = (self.host, self.port) 40 | .to_socket_addrs() 41 | .unwrap() 42 | .next() 43 | .ok_or_else(|| io::Error::from(io::ErrorKind::AddrNotAvailable))?; 44 | #[cfg(feature = "rustls-tls")] 45 | if let Some((tls_cert, tls_key)) = self.tls_config { 46 | let incoming = TlsListener::new( 47 | SpawningHandshakes(tls_acceptor(&tls_key, &tls_cert)?), 48 | AddrIncoming::bind(&addr)?, 49 | ) 50 | .filter(|conn| { 51 | if let Err(err) = conn { 52 | error!("TLS error: {:?}", err); 53 | ready(false) 54 | } else { 55 | ready(true) 56 | } 57 | }); 58 | let server = hyper::Server::builder(accept::from_stream(incoming)).serve(MakeSvc { 59 | auth_user: self.auth_user, 60 | auth_password: self.auth_password, 61 | handler: self.handler, 62 | }); 63 | info!("listening on https://{}", addr); 64 | let _ = server.await.map_err(|e| error!("server error: {}", e)); 65 | return Ok(()); 66 | } 67 | #[cfg(not(feature = "rustls-tls"))] 68 | if self.tls_config.is_some() { 69 | anyhow::bail!("TLS is not supported in this build."); 70 | } 71 | 72 | let server = hyper::Server::bind(&addr).serve(MakeSvc { 73 | auth_user: self.auth_user, 74 | auth_password: self.auth_password, 75 | handler: self.handler, 76 | }); 77 | info!("listening on http://{}", server.local_addr()); 78 | let _ = server.await.map_err(|e| error!("server error: {}", e)); 79 | Ok(()) 80 | } 81 | } 82 | 83 | #[derive(Clone)] 84 | pub struct AliyunDriveWebDav { 85 | auth_user: Option, 86 | auth_password: Option, 87 | handler: DavHandler, 88 | } 89 | 90 | impl Service> for AliyunDriveWebDav { 91 | type Response = Response; 92 | type Error = hyper::Error; 93 | #[allow(clippy::type_complexity)] 94 | type Future = Pin> + Send>>; 95 | 96 | fn poll_ready(&mut self, _: &mut Context) -> Poll> { 97 | Poll::Ready(Ok(())) 98 | } 99 | 100 | fn call(&mut self, req: Request) -> Self::Future { 101 | let should_auth = self.auth_user.is_some() && self.auth_password.is_some(); 102 | let dav_server = self.handler.clone(); 103 | let auth_user = self.auth_user.clone(); 104 | let auth_pwd = self.auth_password.clone(); 105 | Box::pin(async move { 106 | if should_auth { 107 | let auth_user = auth_user.unwrap(); 108 | let auth_pwd = auth_pwd.unwrap(); 109 | let user = match req.headers().typed_get::>() { 110 | Some(Authorization(basic)) 111 | if basic.username() == auth_user && basic.password() == auth_pwd => 112 | { 113 | basic.username().to_string() 114 | } 115 | Some(_) | None => { 116 | // return a 401 reply. 117 | let response = hyper::Response::builder() 118 | .status(401) 119 | .header("WWW-Authenticate", "Basic realm=\"aliyundrive-webdav\"") 120 | .body(Body::from("Authentication required".to_string())) 121 | .unwrap(); 122 | return Ok(response); 123 | } 124 | }; 125 | let config = DavConfig::new().principal(user); 126 | Ok(dav_server.handle_with(config, req).await) 127 | } else { 128 | Ok(dav_server.handle(req).await) 129 | } 130 | }) 131 | } 132 | } 133 | 134 | pub struct MakeSvc { 135 | pub auth_user: Option, 136 | pub auth_password: Option, 137 | pub handler: DavHandler, 138 | } 139 | 140 | impl Service for MakeSvc { 141 | type Response = AliyunDriveWebDav; 142 | type Error = hyper::Error; 143 | #[allow(clippy::type_complexity)] 144 | type Future = Pin> + Send>>; 145 | 146 | fn poll_ready(&mut self, _: &mut Context) -> Poll> { 147 | Poll::Ready(Ok(())) 148 | } 149 | 150 | fn call(&mut self, _: T) -> Self::Future { 151 | let auth_user = self.auth_user.clone(); 152 | let auth_password = self.auth_password.clone(); 153 | let handler = self.handler.clone(); 154 | let fut = async move { 155 | Ok(AliyunDriveWebDav { 156 | auth_user, 157 | auth_password, 158 | handler, 159 | }) 160 | }; 161 | Box::pin(fut) 162 | } 163 | } 164 | 165 | #[cfg(feature = "rustls-tls")] 166 | fn tls_acceptor(key: &Path, cert: &Path) -> anyhow::Result { 167 | let mut key_reader = io::BufReader::new(File::open(key)?); 168 | let mut cert_reader = io::BufReader::new(File::open(cert)?); 169 | 170 | let key = PrivateKey(private_keys(&mut key_reader)?.remove(0)); 171 | let certs = rustls_pemfile::certs(&mut cert_reader)? 172 | .into_iter() 173 | .map(Certificate) 174 | .collect(); 175 | 176 | let mut config = ServerConfig::builder() 177 | .with_safe_defaults() 178 | .with_no_client_auth() 179 | .with_single_cert(certs, key)?; 180 | 181 | config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()]; 182 | 183 | Ok(Arc::new(config).into()) 184 | } 185 | 186 | #[cfg(feature = "rustls-tls")] 187 | fn private_keys(rd: &mut dyn io::BufRead) -> Result>, io::Error> { 188 | use rustls_pemfile::{read_one, Item}; 189 | 190 | let mut keys = Vec::>::new(); 191 | loop { 192 | match read_one(rd)? { 193 | None => return Ok(keys), 194 | Some(Item::RSAKey(key)) => keys.push(key), 195 | Some(Item::PKCS8Key(key)) => keys.push(key), 196 | Some(Item::ECKey(key)) => keys.push(key), 197 | _ => {} 198 | }; 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /systemd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=AliyunDrive WebDAV 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | Environment="REFRESH_TOKEN=" 8 | ExecStart=/usr/bin/aliyundrive-webdav --auto-index -w /etc/aliyundrive-webdav 9 | KillMode=process 10 | Restart=on-failure 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | --------------------------------------------------------------------------------