├── .gitignore ├── examples ├── warp.rs ├── actix.rs ├── axum.rs ├── hyper.rs ├── caldav.rs ├── sample-litmus-server.rs └── auth.rs ├── .github └── workflows │ ├── rust.yml │ └── CI.yml ├── src ├── voidfs.rs ├── handle_mkcol.rs ├── ls.rs ├── handle_options.rs ├── body.rs ├── fakels.rs ├── multierror.rs ├── actix.rs ├── async_stream.rs ├── xmltree_ext.rs ├── errors.rs ├── warp.rs ├── util.rs ├── tree.rs ├── handle_delete.rs ├── localfs_windows.rs ├── caldav.rs ├── conditional.rs ├── lib.rs ├── localfs_macos.rs ├── handle_put.rs └── handle_copymove.rs ├── doc ├── APPLE-Finder-hints.md ├── APPLE-doubleinfo.md ├── Apache-PUT-with-Content-Range.md └── SABREDAV-partialupdate.md ├── Cargo.toml ├── TODO.md ├── README.litmus-test.md ├── README.CalDAV.md ├── README.md └── tests └── caldav_tests.rs /.gitignore: -------------------------------------------------------------------------------- 1 | Cargo.lock 2 | /target/ 3 | **/*.rs.bk 4 | .vscode 5 | src/xml 6 | .idea -------------------------------------------------------------------------------- /examples/warp.rs: -------------------------------------------------------------------------------- 1 | use dav_server::warp::dav_dir; 2 | use std::net::SocketAddr; 3 | 4 | #[tokio::main] 5 | async fn main() { 6 | env_logger::init(); 7 | let dir = "/tmp"; 8 | let addr: SocketAddr = ([127, 0, 0, 1], 4918).into(); 9 | 10 | println!("warp example: listening on {:?} serving {}", addr, dir); 11 | let warpdav = dav_dir(dir, true, true); 12 | warp::serve(warpdav).run(addr).await; 13 | } 14 | -------------------------------------------------------------------------------- /examples/actix.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use actix_web::{App, HttpServer, web}; 4 | use dav_server::actix::*; 5 | use dav_server::{DavConfig, DavHandler, fakels::FakeLs, localfs::LocalFs}; 6 | 7 | pub async fn dav_handler(req: DavRequest, davhandler: web::Data) -> DavResponse { 8 | if let Some(prefix) = req.prefix() { 9 | let config = DavConfig::new().strip_prefix(prefix); 10 | davhandler.handle_with(config, req.request).await.into() 11 | } else { 12 | davhandler.handle(req.request).await.into() 13 | } 14 | } 15 | 16 | #[actix_web::main] 17 | async fn main() -> io::Result<()> { 18 | env_logger::init(); 19 | let addr = "127.0.0.1:4918"; 20 | let dir = "/tmp"; 21 | 22 | let dav_server = DavHandler::builder() 23 | .filesystem(LocalFs::new(dir, false, false, false)) 24 | .locksystem(FakeLs::new()) 25 | .build_handler(); 26 | 27 | println!("actix-web example: listening on {} serving {}", addr, dir); 28 | 29 | HttpServer::new(move || { 30 | App::new() 31 | .app_data(web::Data::new(dav_server.clone())) 32 | .service(web::resource("/{tail:.*}").to(dav_handler)) 33 | }) 34 | .bind(addr)? 35 | .run() 36 | .await 37 | } 38 | -------------------------------------------------------------------------------- /examples/axum.rs: -------------------------------------------------------------------------------- 1 | use axum::{Extension, Router, extract::Request, response::IntoResponse, routing::any}; 2 | use dav_server::{DavHandler, fakels::FakeLs, localfs::LocalFs}; 3 | use tokio::net::TcpListener; 4 | 5 | fn main() { 6 | tokio::runtime::Builder::new_multi_thread() 7 | .enable_all() 8 | .build() 9 | .unwrap() 10 | .block_on(async_main()); 11 | } 12 | 13 | async fn async_main() { 14 | if std::env::var("RUST_LOG").is_err() { 15 | unsafe { 16 | std::env::set_var("RUST_LOG", "debug"); 17 | } 18 | } 19 | env_logger::init(); 20 | let ip = "127.0.0.1"; 21 | let port = 4918; 22 | 23 | let addr = format!("{ip}:{port}"); 24 | let listener = TcpListener::bind(&addr).await.unwrap(); 25 | 26 | let dav = DavHandler::builder() 27 | .filesystem(LocalFs::new("/tmp", false, false, false)) 28 | .locksystem(FakeLs::new()) 29 | .strip_prefix("/dav") 30 | .build_handler(); 31 | 32 | let router = Router::new() 33 | .route("/dav", any(handle_dav)) 34 | .route("/dav/", any(handle_dav)) 35 | .route("/dav/{*path}", any(handle_dav)) 36 | .layer(Extension(dav)); 37 | 38 | log::info!("serve at http://{addr}"); 39 | axum::serve(listener, router).await.unwrap(); 40 | } 41 | 42 | async fn handle_dav(Extension(dav): Extension, req: Request) -> impl IntoResponse { 43 | dav.handle(req).await 44 | } 45 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Push or PR 2 | 3 | on: 4 | [push, pull_request] 5 | 6 | env: 7 | CARGO_TERM_COLOR: always 8 | 9 | jobs: 10 | build_n_test: 11 | strategy: 12 | fail-fast: false 13 | matrix: 14 | os: [ubuntu-latest, macos-latest, windows-latest] 15 | 16 | runs-on: ${{ matrix.os }} 17 | 18 | steps: 19 | - uses: actions/checkout@v4 20 | - name: rustfmt 21 | if: ${{ !cancelled() }} 22 | run: cargo fmt --all -- --check 23 | - name: check 24 | if: ${{ !cancelled() }} 25 | run: cargo check --verbose 26 | - name: clippy 27 | if: ${{ !cancelled() }} 28 | run: | 29 | cargo clippy --all-targets -- -D warnings 30 | cargo clippy --all-targets -- -D warnings 31 | cargo clippy --all-targets -- -D warnings 32 | - name: Build 33 | if: ${{ !cancelled() }} 34 | run: | 35 | cargo build --verbose --examples --tests 36 | cargo build --verbose --examples --tests 37 | cargo build --verbose --examples --tests 38 | - name: Abort on error 39 | if: ${{ failure() }} 40 | run: echo "Some of jobs failed" && false 41 | 42 | 43 | semver: 44 | name: Check semver 45 | strategy: 46 | matrix: 47 | os: [ubuntu-latest, macos-latest, windows-latest] 48 | runs-on: ${{ matrix.os }} 49 | steps: 50 | - uses: actions/checkout@v4 51 | - uses: actions-rs/toolchain@v1 52 | with: 53 | profile: minimal 54 | toolchain: stable 55 | override: true 56 | - uses: obi1kenobi/cargo-semver-checks-action@v2 -------------------------------------------------------------------------------- /src/voidfs.rs: -------------------------------------------------------------------------------- 1 | //! Placeholder filesystem. Returns FsError::NotImplemented on every method. 2 | //! 3 | use std::{any::Any, marker::PhantomData}; 4 | 5 | use crate::davpath::DavPath; 6 | use crate::fs::*; 7 | 8 | /// Placeholder filesystem. 9 | #[derive(Clone, Debug)] 10 | pub struct VoidFs { 11 | _marker: PhantomData, 12 | } 13 | 14 | pub fn is_voidfs(fs: &dyn Any) -> bool { 15 | fs.is::>>() 16 | } 17 | 18 | impl VoidFs { 19 | pub fn new() -> Box { 20 | Box::new(Self { 21 | _marker: Default::default(), 22 | }) 23 | } 24 | } 25 | 26 | impl GuardedFileSystem for VoidFs { 27 | fn metadata<'a>( 28 | &'a self, 29 | _path: &'a DavPath, 30 | _credentials: &C, 31 | ) -> FsFuture<'a, Box> { 32 | Box::pin(async { Err(FsError::NotImplemented) }) 33 | } 34 | 35 | fn read_dir<'a>( 36 | &'a self, 37 | _path: &'a DavPath, 38 | _meta: ReadDirMeta, 39 | _credentials: &C, 40 | ) -> FsFuture<'a, FsStream>> { 41 | Box::pin(async { Err(FsError::NotImplemented) }) 42 | } 43 | 44 | fn open<'a>( 45 | &'a self, 46 | _path: &'a DavPath, 47 | _options: OpenOptions, 48 | _credentials: &C, 49 | ) -> FsFuture<'a, Box> { 50 | Box::pin(async { Err(FsError::NotImplemented) }) 51 | } 52 | } 53 | 54 | #[cfg(test)] 55 | mod tests { 56 | use super::*; 57 | use crate::memfs::MemFs; 58 | 59 | #[test] 60 | fn test_is_void() { 61 | assert!(is_voidfs::(&VoidFs::::new())); 62 | assert!(is_voidfs::<()>(&VoidFs::<()>::new())); 63 | assert!(!is_voidfs::<()>(&MemFs::new())); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /doc/APPLE-Finder-hints.md: -------------------------------------------------------------------------------- 1 | # APPLE-FINDER-HINTS 2 | 3 | The Apple Finder (and other subsystems) seem to probe for a few 4 | files at the root of the filesystems to get a hint about the 5 | behaviour they should show processing this filesystem. 6 | 7 | It also looks for files with extra localization information in 8 | every directory, and for resource fork data (the `._` files). 9 | 10 | ## FILES 11 | 12 | - `.metadata_never_index` 13 | prevents the system from indexing all of the data 14 | - `.ql_disablethumbnails` 15 | prevent the system from downloading all files that look like an 16 | image or a video to create a thumbnail 17 | - `.ql_disablecache` 18 | not really sure but it sounds useful 19 | 20 | The `.ql_` files are configuration for the "QuickLook" functionality 21 | of the Finder. 22 | 23 | The `.metadata_never_index` file appears to be a hint for the 24 | Spotlight indexing system. 25 | 26 | Additionally, the Finder probes for a `.localized` file in every 27 | directory it encounters, and it does a PROPSTAT for every file 28 | in the directory prefixed with `._`. 29 | 30 | ## OPTIMIZATIONS 31 | 32 | For a macOS client we return the metadata for a zero-sized file if it 33 | does a PROPSTAT of `/.metadata_never_index` or `/.ql_disablethumbnails`. 34 | 35 | We always return a 404 Not Found for a PROPSTAT of any `.localized` file. 36 | 37 | Furthermore, we disallow moving, removing etc of those files. The files 38 | do not show up in a PROPSTAT of the rootdirectory. 39 | 40 | If a PROPFIND with `Depth: 1` is done on a directory, we add the 41 | directory pathname to an LRU cache, and the pathname of each file of 42 | which the name starts with `._`. Since we then know which `._` files 43 | exist, it is easy to return a fast 404 for PROPSTAT request for `._` 44 | files that do not exist. The cache is kept consistent by checking 45 | the timestamp on the parent directory, and a timeout. 46 | 47 | -------------------------------------------------------------------------------- /examples/hyper.rs: -------------------------------------------------------------------------------- 1 | use std::{convert::Infallible, net::SocketAddr}; 2 | 3 | use hyper::{server::conn::http1, service::service_fn}; 4 | use hyper_util::rt::TokioIo; 5 | use tokio::net::TcpListener; 6 | 7 | use dav_server::{DavHandler, fakels::FakeLs, localfs::LocalFs}; 8 | 9 | #[tokio::main] 10 | async fn main() { 11 | env_logger::init(); 12 | let dir = "/tmp"; 13 | let addr: SocketAddr = ([127, 0, 0, 1], 4918).into(); 14 | 15 | let dav_server = DavHandler::builder() 16 | .filesystem(LocalFs::new(dir, false, false, false)) 17 | .locksystem(FakeLs::new()) 18 | .build_handler(); 19 | 20 | let listener = TcpListener::bind(addr).await.unwrap(); 21 | 22 | println!("Listening {addr}"); 23 | 24 | // We start a loop to continuously accept incoming connections 25 | loop { 26 | let (stream, _) = listener.accept().await.unwrap(); 27 | let dav_server = dav_server.clone(); 28 | 29 | // Use an adapter to access something implementing `tokio::io` traits as if they implement 30 | // `hyper::rt` IO traits. 31 | let io = TokioIo::new(stream); 32 | 33 | // Spawn a tokio task to serve multiple connections concurrently 34 | tokio::task::spawn(async move { 35 | // Finally, we bind the incoming connection to our `hello` service 36 | if let Err(err) = http1::Builder::new() 37 | // `service_fn` converts our function in a `Service` 38 | .serve_connection( 39 | io, 40 | service_fn({ 41 | move |req| { 42 | let dav_server = dav_server.clone(); 43 | async move { Ok::<_, Infallible>(dav_server.handle(req).await) } 44 | } 45 | }), 46 | ) 47 | .await 48 | { 49 | eprintln!("Failed serving: {err:?}"); 50 | } 51 | }); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/handle_mkcol.rs: -------------------------------------------------------------------------------- 1 | use headers::HeaderMapExt; 2 | use http::{Request, Response, StatusCode}; 3 | 4 | use crate::body::Body; 5 | use crate::conditional::*; 6 | use crate::davheaders; 7 | use crate::fs::*; 8 | use crate::{DavError, DavInner, DavResult}; 9 | 10 | impl DavInner { 11 | pub(crate) async fn handle_mkcol(&self, req: &Request<()>) -> DavResult> { 12 | let mut path = self.path(req); 13 | let meta = self.fs.metadata(&path, &self.credentials).await; 14 | 15 | // check the If and If-* headers. 16 | let res = if_match_get_tokens( 17 | req, 18 | meta.as_ref().map(|v| v.as_ref()).ok(), 19 | self.fs.as_ref(), 20 | &self.ls, 21 | &path, 22 | &self.credentials, 23 | ) 24 | .await; 25 | let tokens = match res { 26 | Ok(t) => t, 27 | Err(s) => return Err(DavError::Status(s)), 28 | }; 29 | 30 | // if locked check if we hold that lock. 31 | if let Some(ref locksystem) = self.ls { 32 | let t = tokens.iter().map(|s| s.as_str()).collect::>(); 33 | let principal = self.principal.as_deref(); 34 | if let Err(_l) = locksystem.check(&path, principal, false, false, t).await { 35 | return Err(DavError::Status(StatusCode::LOCKED)); 36 | } 37 | } 38 | 39 | let mut res = Response::new(Body::empty()); 40 | 41 | match self.fs.create_dir(&path, &self.credentials).await { 42 | // RFC 4918 9.3.1 MKCOL Status Codes. 43 | Err(FsError::Exists) => return Err(DavError::Status(StatusCode::METHOD_NOT_ALLOWED)), 44 | Err(FsError::NotFound) => return Err(DavError::Status(StatusCode::CONFLICT)), 45 | Err(e) => return Err(DavError::FsError(e)), 46 | Ok(()) => { 47 | if path.is_collection() { 48 | path.add_slash(); 49 | res.headers_mut().typed_insert(davheaders::ContentLocation( 50 | path.with_prefix().as_url_string(), 51 | )); 52 | } 53 | *res.status_mut() = StatusCode::CREATED; 54 | } 55 | } 56 | 57 | Ok(res) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /doc/APPLE-doubleinfo.md: -------------------------------------------------------------------------------- 1 | 2 | # APPLEDOUBLEINFO 3 | 4 | Normally, after asking for a directory listing (using PROPFIND with Depth: 1) 5 | the macOS Finder will send a PROPFIND request for every file in the 6 | directory, prefixed with ".\_". Even though it just got a complete directory 7 | listing which doesn't list those files. 8 | 9 | An optimization the Apple iDisk service makes, is that it sometimes 10 | synthesizes those info files ahead of time. It then lists those synthesized 11 | files in the PROPFIND response together with the propery, 12 | which is the contents of the ".\_file" (if it would be present) in base64. 13 | It appears to only do this when the appledoubleinfo data is completely 14 | basic and is 82 bytes of size. 15 | 16 | This prevents the webdav clients from launching an additional PROPFIND 17 | request for every file prefixed with ".\_". 18 | 19 | Note that you cannot add an propery to a PROPSTAT 20 | element of a "file" itself, that's ignored, alas. macOS only accepts 21 | it on ".\_" files. 22 | 23 | There is not much information about this, but an Apple engineer mentioned it in 24 | https://lists.apple.com/archives/filesystem-dev/2009/Feb/msg00013.html 25 | 26 | There is a default "empty"-like response for a file that I found at 27 | https://github.com/DanRohde/webdavcgi/blob/master/lib/perl/WebDAV/Properties.pm 28 | 29 | So, what we _could_ do (but don't, yet) to optimize the macOS webdav client, 30 | when we reply to PROPFIND: 31 | 32 | - for each file that does NOT have a ".\_file" present 33 | - we synthesize a virtual response 34 | - for a virtual file with name ".\_file 35 | - with size: 82 bytes 36 | - that contains: 37 | 38 | 39 | AAUWBwACAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAACAAAAJgAAACwAAAAJAAAAMgAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA== 40 | 41 | 42 | The contents of this base64 string are explained at 43 | https://github.com/DanRohde/webdavcgi/blob/master/lib/perl/WebDAV/Properties.pm 44 | 45 | ... and they are: 46 | 47 | ``` 48 | appledoubleheader: Magic(4) Version(4) Filler(16) EntryCout(2) 49 | EntryDescriptor(id:4(2:resource fork),offset:4,length:4) 50 | EntryDescriptor(id:9 finder)... Finder Info(16+16) 51 | 52 | namespace: http://www.apple.com/webdav\_fs/props/ 53 | content: MIME::Base64(pack('H\*', '00051607'. '00020000' . ( '00' x 16 ) . 54 | '0002'. '00000002'. '00000026' . '0000002C'.'00000009'. '00000032' . '00000020' . 55 | ('00' x 32) )) 56 | ``` 57 | 58 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dav-server" 3 | version = "0.9.0" 4 | readme = "README.md" 5 | description = "Rust WebDAV server library. A fork of the webdav-handler crate." 6 | repository = "https://github.com/messense/dav-server-rs" 7 | authors = ["Miquel van Smoorenburg ", "messense "] 8 | edition = "2024" 9 | license = "Apache-2.0" 10 | keywords = ["webdav"] 11 | categories = ["web-programming"] 12 | 13 | [package.metadata.docs.rs] 14 | all-features = true 15 | rustdoc-args = ["--cfg", "docsrs"] 16 | 17 | [package.metadata.playground] 18 | features = ["full"] 19 | 20 | [features] 21 | default = ["localfs", "memfs"] 22 | actix-compat = [ "actix-web" ] 23 | warp-compat = [ "warp", "hyper" ] 24 | caldav = [ "icalendar" ] 25 | all = [ "actix-compat", "warp-compat", "caldav" ] 26 | localfs = ["libc", "lru", "tokio/rt-multi-thread", "parking_lot", "reflink-copy"] 27 | memfs = ["libc"] 28 | 29 | [[example]] 30 | name = "actix" 31 | required-features = [ "actix-compat" ] 32 | 33 | [[example]] 34 | name = "warp" 35 | required-features = [ "warp-compat" ] 36 | 37 | [[example]] 38 | name = "caldav" 39 | required-features = [ "caldav" ] 40 | 41 | [dependencies] 42 | bytes = "1.0.1" 43 | dyn-clone = "1" 44 | futures-util = { version = "0.3.16", default-features = false, features = ["alloc"] } 45 | futures-channel = "0.3.16" 46 | headers = "0.4.0" 47 | htmlescape = "0.3.1" 48 | http = "1.0.0" 49 | http-body = "1.0.0" 50 | http-body-util = "0.1.0" 51 | libc = { version = "0.2.0", optional = true } 52 | log = "0.4.0" 53 | lru = { version = "0.16.0", optional = true } 54 | mime_guess = "2.0.0" 55 | parking_lot = { version = "0.12.0", optional = true } 56 | percent-encoding = "2.1.0" 57 | pin-project-lite = "0.2.16" 58 | tokio = { version = "1.22.0", features = [ "rt" ] } 59 | chrono = { version = "0.4", default-features = false, features = [ "clock" ] } 60 | url = "2.2.0" 61 | uuid = { version = "1.1.2", features = ["v4"] } 62 | xml-rs = "1" 63 | xmltree = "0.12.0" 64 | 65 | hyper = { version = "1.1.0", default-features = false, features = ["server"], optional = true } 66 | warp = { version = "0.3.0", optional = true, default-features = false } 67 | actix-web = { version = "4.0.0-beta.15", default-features = false, optional = true } 68 | reflink-copy = { version = "0.1.14", optional = true } 69 | icalendar = { version = "0.17.1", optional = true } 70 | derive-where = "1.6.0" 71 | 72 | [dev-dependencies] 73 | clap = { version = "4.0.0", features = ["derive"] } 74 | env_logger = "0.11.0" 75 | actix-web = { version = "4.0.0-beta.15", default-features = false, features = ["macros"] } 76 | hyper = { version = "1.1.0", features = ["http1", "server"] } 77 | hyper-util = { version = "0.1.2", features = ["tokio"] } 78 | tokio = { version = "1.3.0", features = ["full"] } 79 | axum = { version = "0.8", features = [] } 80 | -------------------------------------------------------------------------------- /src/ls.rs: -------------------------------------------------------------------------------- 1 | //! Contains the structs and traits that define a `locksystem` backend. 2 | //! 3 | //! Note that the methods DO NOT return futures, they are synchronous. 4 | //! This is because currently only two locksystems exist, `MemLs` and `FakeLs`. 5 | //! Both of them do not do any I/O, all methods return instantly. 6 | //! 7 | //! If ever a locksystem gets built that does I/O (to a filesystem, 8 | //! a database, or over the network) we'll need to revisit this. 9 | //! 10 | use crate::davpath::DavPath; 11 | use std::fmt::Debug; 12 | use std::future::Future; 13 | use std::pin::Pin; 14 | use std::time::{Duration, SystemTime}; 15 | 16 | use dyn_clone::{DynClone, clone_trait_object}; 17 | use xmltree::Element; 18 | 19 | /// Type of the locks returned by DavLockSystem methods. 20 | #[derive(Debug, Clone)] 21 | pub struct DavLock { 22 | /// Token. 23 | pub token: String, 24 | /// Path/ 25 | pub path: Box, 26 | /// Principal. 27 | pub principal: Option, 28 | /// Owner. 29 | pub owner: Option>, 30 | /// When the lock turns stale (absolute). 31 | pub timeout_at: Option, 32 | /// When the lock turns stale (relative). 33 | pub timeout: Option, 34 | /// Shared. 35 | pub shared: bool, 36 | /// Deep. 37 | pub deep: bool, 38 | } 39 | 40 | pub type LsFuture<'a, T> = Pin + Send + 'a>>; 41 | 42 | /// The trait that defines a locksystem. 43 | pub trait DavLockSystem: Debug + Send + Sync + DynClone { 44 | /// Lock a node. Returns `Ok(new_lock)` if succeeded, 45 | /// or `Err(conflicting_lock)` if failed. 46 | fn lock( 47 | &'_ self, 48 | path: &DavPath, 49 | principal: Option<&str>, 50 | owner: Option<&Element>, 51 | timeout: Option, 52 | shared: bool, 53 | deep: bool, 54 | ) -> LsFuture<'_, Result>; 55 | 56 | /// Unlock a node. Returns `Ok(())` if succeeded, `Err (())` if failed 57 | /// (because lock doesn't exist) 58 | fn unlock(&'_ self, path: &DavPath, token: &str) -> LsFuture<'_, Result<(), ()>>; 59 | 60 | /// Refresh lock. Returns updated lock if succeeded. 61 | fn refresh( 62 | &'_ self, 63 | path: &DavPath, 64 | token: &str, 65 | timeout: Option, 66 | ) -> LsFuture<'_, Result>; 67 | 68 | /// Check if node is locked and if so, if we own all the locks. 69 | /// If not, returns as Err one conflicting lock. 70 | fn check( 71 | &'_ self, 72 | path: &DavPath, 73 | principal: Option<&str>, 74 | ignore_principal: bool, 75 | deep: bool, 76 | submitted_tokens: Vec<&str>, 77 | ) -> LsFuture<'_, Result<(), DavLock>>; 78 | 79 | /// Find and return all locks that cover a given path. 80 | fn discover(&'_ self, path: &DavPath) -> LsFuture<'_, Vec>; 81 | 82 | /// Delete all locks at this path and below (after MOVE or DELETE) 83 | fn delete(&'_ self, path: &DavPath) -> LsFuture<'_, Result<(), ()>>; 84 | } 85 | 86 | clone_trait_object! {DavLockSystem} 87 | -------------------------------------------------------------------------------- /doc/Apache-PUT-with-Content-Range.md: -------------------------------------------------------------------------------- 1 | # HTTP PUT-with-Content-Range support. 2 | 3 | The [mod_dav](https://httpd.apache.org/docs/2.4/mod/mod_dav.html) module of 4 | the [Apache web server](https://httpd.apache.org/) was one of the first 5 | implementations of [Webdav](https://tools.ietf.org/html/rfc4918). Ever since 6 | the first released version, it has had support for partial uploads using 7 | the Content-Range header with PUT requests. 8 | 9 | ## A sample request 10 | 11 | ```text 12 | PUT /file.txt 13 | Content-Length: 4 14 | Content-Range: bytes 3-6/* 15 | 16 | ABCD 17 | ``` 18 | 19 | This request updates 'file.txt', specifically the bytes 3-6 (inclusive) to 20 | `ABCD`. 21 | 22 | There is no explicit support for appending to a file, that is simply done 23 | by writing just past the end of a file. For example, if a file has size 24 | 1000, and you want to append 4 bytes: 25 | 26 | ```text 27 | PUT /file.txt 28 | Content-Length: 4 29 | Content-Range: bytes 1000-1003/* 30 | 31 | 1234 32 | ``` 33 | 34 | ## Apache `mod_dav` behaviour: 35 | 36 | - The `Content-Range` header is required, and the syntax is `bytes START-END/LENGTH`. 37 | - END must be bigger than or equal to START. 38 | - LENGTH is parsed by Apache mod_dav, and it must either be a valid number 39 | or a `*` (star), but mod_dav otherwise ignores it. Since it is not clearly 40 | defined what LENGTH should be, always use `*`. 41 | - Neither the start, nor the end-byte have to be within the file's current size. 42 | - If the start-byte is beyond the file's current length, the space in between 43 | will be filled with NULL bytes (`0x00`). 44 | 45 | ## Notes 46 | 47 | - `bytes`, _not_ `bytes=`. 48 | - The `Content-Length` header is not required by the original Apache mod_dav 49 | implementation. The body must either have a valid Content-Length, or it must 50 | use the `Chunked` transfer encoding. It is *strongly encouraged* though to 51 | include Content-Length, so that it can be validated against the range before 52 | accepting the PUT request. 53 | - If the `Content-Length` header is present, its value must be equal 54 | to `END - START + 1`. 55 | 56 | ## Status codes 57 | 58 | ### The following status codes are used: 59 | 60 | Status code | Reason 61 | ----------- | ------ 62 | 200 or 204 | When the operation was successful 63 | 400 | Invalid `Content-Range` header 64 | 416 | If there was something wrong with the bytes, such as a `Content-Length` not matching with what was sent as the start and end bytes, or an end byte being lower than the start byte. 65 | 501 | Content-Range header present, but not supported. 66 | 67 | ## RECKOGNIZING PUT-with-Content-Range support (client). 68 | 69 | There is no official way to know if PUT-with-content-range is supported by 70 | a webserver. For a client it's probably best to do an OPTIONS request, 71 | and then check two things: 72 | 73 | - the `Server` header must contain the word `Apache` 74 | - the `DAV` header must contain ``. 75 | 76 | In that case, your are sure to talk to an Apache webserver with mod_dav enabled. 77 | 78 | ## IMPLEMENTING PUT-with-Content-Range support (server). 79 | 80 | Don't. Implement [sabredav-partialupdate](SABREDAV-partialupdate.md). 81 | 82 | ## MORE INFORMATION. 83 | 84 | https://blog.sphere.chronosempire.org.uk/2012/11/21/webdav-and-the-http-patch-nightmare 85 | 86 | -------------------------------------------------------------------------------- /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | name: CI 4 | 5 | jobs: 6 | check: 7 | name: Check 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - uses: actions-rs/toolchain@v1 12 | with: 13 | profile: minimal 14 | toolchain: stable 15 | override: true 16 | - uses: actions-rs/cargo@v1 17 | with: 18 | command: check 19 | args: --all-features 20 | 21 | test: 22 | name: Test Suite 23 | runs-on: ${{ matrix.os }} 24 | strategy: 25 | matrix: 26 | os: [ubuntu-latest, macos-latest, windows-latest] 27 | steps: 28 | - uses: actions/checkout@v2 29 | - uses: actions-rs/toolchain@v1 30 | with: 31 | profile: minimal 32 | toolchain: stable 33 | override: true 34 | - uses: Swatinem/rust-cache@v1 35 | - name: Check build with --no-default-features 36 | uses: actions-rs/cargo@v1 37 | with: 38 | command: build 39 | args: --no-default-features 40 | - name: Check build with default features 41 | uses: actions-rs/cargo@v1 42 | with: 43 | command: build 44 | - name: Check build with warp 45 | uses: actions-rs/cargo@v1 46 | with: 47 | command: build 48 | args: --features warp-compat 49 | - name: Check build with actix-web 50 | uses: actions-rs/cargo@v1 51 | with: 52 | command: build 53 | args: --features actix-compat 54 | - name: Check build with caldav 55 | uses: actions-rs/cargo@v1 56 | with: 57 | command: build 58 | args: --features caldav 59 | - name: Test 60 | uses: actions-rs/cargo@v1 61 | with: 62 | command: test 63 | args: --all-features --all 64 | 65 | compliance: 66 | name: Compliance Test 67 | runs-on: ubuntu-latest 68 | steps: 69 | - uses: actions/checkout@v2 70 | - uses: actions-rs/toolchain@v1 71 | with: 72 | profile: minimal 73 | toolchain: stable 74 | override: true 75 | - uses: Swatinem/rust-cache@v1 76 | - name: Build sample litmus server 77 | run: cargo build --example sample-litmus-server 78 | - name: Run sample litmus server 79 | run: | 80 | cargo run --example sample-litmus-server -- --memfs --auth & 81 | sleep 5 82 | - name: Build litmus 83 | run: | 84 | curl -O http://www.webdav.org/neon/litmus/litmus-0.13.tar.gz 85 | tar xf litmus-0.13.tar.gz 86 | cd litmus-0.13 87 | ./configure 88 | make 89 | - name: Run litmus protocol compliance test 90 | run: | 91 | cd litmus-0.13 92 | TESTS="http basic copymove locks props" HTDOCS=htdocs TESTROOT=. ./litmus http://localhost:4918/ someuser somepass 93 | 94 | fmt: 95 | name: Rustfmt 96 | runs-on: ubuntu-latest 97 | steps: 98 | - uses: actions/checkout@v2 99 | - uses: actions-rs/toolchain@v1 100 | with: 101 | profile: minimal 102 | toolchain: stable 103 | override: true 104 | - run: rustup component add rustfmt 105 | - uses: actions-rs/cargo@v1 106 | with: 107 | command: fmt 108 | args: --all -- --check 109 | -------------------------------------------------------------------------------- /src/handle_options.rs: -------------------------------------------------------------------------------- 1 | use headers::HeaderMapExt; 2 | use http::{Request, Response}; 3 | 4 | use crate::body::Body; 5 | use crate::util::{DavMethod, dav_method}; 6 | use crate::{DavInner, DavResult}; 7 | 8 | impl DavInner { 9 | pub(crate) async fn handle_options(&self, req: &Request<()>) -> DavResult> { 10 | let mut res = Response::new(Body::empty()); 11 | 12 | let h = res.headers_mut(); 13 | 14 | // We could simply not report webdav level 2 support if self.allow doesn't 15 | // contain LOCK/UNLOCK. However we do advertise support, since there might 16 | // be LOCK/UNLOCK support in another part of the URL space. 17 | #[cfg(feature = "caldav")] 18 | let dav = "1,2,3,sabredav-partialupdate,calendar-access"; 19 | #[cfg(not(feature = "caldav"))] 20 | let dav = "1,2,3,sabredav-partialupdate"; 21 | h.insert("DAV", dav.parse().unwrap()); 22 | h.insert("MS-Author-Via", "DAV".parse().unwrap()); 23 | h.typed_insert(headers::ContentLength(0)); 24 | 25 | // Helper to add method to array if method is in fact 26 | // allowed. If the current method is not OPTIONS, leave 27 | // out the current method since we're probably called 28 | // for DavMethodNotAllowed. 29 | let method = dav_method(req.method()).unwrap_or(DavMethod::Options); 30 | let islock = |m| m == DavMethod::Lock || m == DavMethod::Unlock; 31 | let mm = |v: &mut Vec, m: &str, y: DavMethod| { 32 | if (y == DavMethod::Options || (y != method || islock(y) != islock(method))) 33 | && (!islock(y) || self.ls.is_some()) 34 | && self.allow.map(|x| x.contains(y)).unwrap_or(true) 35 | { 36 | v.push(m.to_string()); 37 | } 38 | }; 39 | 40 | let path = self.path(req); 41 | let meta = self.fs.metadata(&path, &self.credentials).await; 42 | let is_unmapped = meta.is_err(); 43 | let is_file = meta.map(|m| m.is_file()).unwrap_or_default(); 44 | let is_star = path.is_star() && method == DavMethod::Options; 45 | 46 | let mut v = Vec::new(); 47 | if is_unmapped && !is_star { 48 | mm(&mut v, "OPTIONS", DavMethod::Options); 49 | mm(&mut v, "MKCOL", DavMethod::MkCol); 50 | mm(&mut v, "PUT", DavMethod::Put); 51 | mm(&mut v, "LOCK", DavMethod::Lock); 52 | } else { 53 | if is_file || is_star { 54 | mm(&mut v, "HEAD", DavMethod::Head); 55 | mm(&mut v, "GET", DavMethod::Get); 56 | mm(&mut v, "PATCH", DavMethod::Patch); 57 | mm(&mut v, "PUT", DavMethod::Put); 58 | } 59 | mm(&mut v, "OPTIONS", DavMethod::Options); 60 | mm(&mut v, "PROPFIND", DavMethod::PropFind); 61 | mm(&mut v, "COPY", DavMethod::Copy); 62 | if path.as_url_string() != "/" { 63 | mm(&mut v, "MOVE", DavMethod::Move); 64 | mm(&mut v, "DELETE", DavMethod::Delete); 65 | } 66 | mm(&mut v, "LOCK", DavMethod::Lock); 67 | mm(&mut v, "UNLOCK", DavMethod::Unlock); 68 | #[cfg(feature = "caldav")] 69 | { 70 | mm(&mut v, "REPORT", DavMethod::Report); 71 | if is_unmapped { 72 | mm(&mut v, "MKCALENDAR", DavMethod::MkCalendar); 73 | } 74 | } 75 | } 76 | 77 | let a = v.join(",").parse().unwrap(); 78 | res.headers_mut().insert("allow", a); 79 | 80 | Ok(res) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/body.rs: -------------------------------------------------------------------------------- 1 | //! Definitions for the Request and Response bodies. 2 | 3 | use std::error::Error as StdError; 4 | use std::io; 5 | use std::pin::Pin; 6 | use std::task::{Context, Poll}; 7 | 8 | use bytes::{Buf, Bytes}; 9 | use futures_util::stream::Stream; 10 | use http_body::Body as HttpBody; 11 | use pin_project_lite::pin_project; 12 | 13 | use crate::async_stream::AsyncStream; 14 | 15 | /// Body is returned by the webdav handler, and implements both `Stream` 16 | /// and `http_body::Body`. 17 | pub struct Body { 18 | pub inner: BodyType, 19 | } 20 | 21 | pub enum BodyType { 22 | Bytes(Option), 23 | AsyncStream(AsyncStream), 24 | Empty, 25 | } 26 | 27 | impl Body { 28 | /// Return an empty body. 29 | pub fn empty() -> Body { 30 | Body { 31 | inner: BodyType::Empty, 32 | } 33 | } 34 | } 35 | 36 | impl Stream for Body { 37 | type Item = io::Result; 38 | 39 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 40 | match self.inner { 41 | BodyType::Bytes(ref mut strm) => Poll::Ready(strm.take().map(Ok)), 42 | BodyType::AsyncStream(ref mut strm) => { 43 | let strm = Pin::new(strm); 44 | strm.poll_next(cx) 45 | } 46 | BodyType::Empty => Poll::Ready(None), 47 | } 48 | } 49 | } 50 | 51 | impl HttpBody for Body { 52 | type Data = Bytes; 53 | type Error = io::Error; 54 | 55 | fn poll_frame( 56 | self: Pin<&mut Self>, 57 | cx: &mut Context<'_>, 58 | ) -> Poll, Self::Error>>> { 59 | self.poll_next(cx).map_ok(http_body::Frame::data) 60 | } 61 | } 62 | 63 | impl From for Body { 64 | fn from(t: String) -> Body { 65 | Body { 66 | inner: BodyType::Bytes(Some(Bytes::from(t))), 67 | } 68 | } 69 | } 70 | 71 | impl From<&str> for Body { 72 | fn from(t: &str) -> Body { 73 | Body { 74 | inner: BodyType::Bytes(Some(Bytes::from(t.to_string()))), 75 | } 76 | } 77 | } 78 | 79 | impl From for Body { 80 | fn from(t: Bytes) -> Body { 81 | Body { 82 | inner: BodyType::Bytes(Some(t)), 83 | } 84 | } 85 | } 86 | 87 | impl From> for Body { 88 | fn from(s: AsyncStream) -> Body { 89 | Body { 90 | inner: BodyType::AsyncStream(s), 91 | } 92 | } 93 | } 94 | 95 | pin_project! { 96 | // 97 | // A struct that contains a Stream, and implements http_body::Body. 98 | // 99 | pub(crate) struct StreamBody { 100 | #[pin] 101 | body: B, 102 | } 103 | } 104 | 105 | impl HttpBody for StreamBody 106 | where 107 | ReqData: Buf + Send, 108 | ReqError: StdError + Send + Sync + 'static, 109 | ReqBody: Stream>, 110 | { 111 | type Data = ReqData; 112 | type Error = ReqError; 113 | 114 | fn poll_frame( 115 | self: Pin<&mut Self>, 116 | cx: &mut Context<'_>, 117 | ) -> Poll, Self::Error>>> { 118 | let this = self.project(); 119 | this.body.poll_next(cx).map_ok(http_body::Frame::data) 120 | } 121 | } 122 | 123 | impl StreamBody 124 | where 125 | ReqData: Buf + Send, 126 | ReqError: StdError + Send + Sync + 'static, 127 | ReqBody: Stream>, 128 | { 129 | pub fn new(body: ReqBody) -> StreamBody { 130 | StreamBody { body } 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /examples/caldav.rs: -------------------------------------------------------------------------------- 1 | //! CalDAV server example 2 | //! 3 | //! This example demonstrates how to set up a CalDAV server using the dav-server library. 4 | //! CalDAV is an extension of WebDAV for calendar data management. 5 | //! 6 | //! Usage: 7 | //! cargo run --example caldav --features caldav 8 | //! 9 | //! The server will be available at http://localhost:8080 10 | //! You can connect to it using CalDAV clients like Thunderbird, Apple Calendar, etc. 11 | 12 | use dav_server::{DavHandler, DavMethodSet, fakels::FakeLs, memfs::MemFs}; 13 | use hyper::{server::conn::http1, service::service_fn}; 14 | use hyper_util::rt::TokioIo; 15 | use std::{convert::Infallible, net::SocketAddr}; 16 | use tokio::net::TcpListener; 17 | 18 | #[tokio::main] 19 | async fn main() -> Result<(), Box> { 20 | env_logger::init(); 21 | 22 | let addr: SocketAddr = ([127, 0, 0, 1], 8080).into(); 23 | 24 | // Set up the DAV handler with CalDAV support 25 | // Note: Using MemFs for this example because it supports the property operations 26 | // needed for CalDAV collections. For production use, you'd want a filesystem 27 | // implementation that persists properties to disk (e.g., using extended attributes 28 | // or sidecar files). 29 | let dav_server = DavHandler::builder() 30 | .filesystem(MemFs::new()) 31 | .locksystem(FakeLs::new()) 32 | .methods(DavMethodSet::all()) 33 | .build_handler(); 34 | 35 | let listener = TcpListener::bind(addr).await?; 36 | 37 | println!("CalDAV server listening on {}", addr); 38 | println!("Calendar collections can be accessed at http://{}", addr); 39 | println!(); 40 | println!("NOTE: This example uses in-memory storage. Data will be lost when the server stops."); 41 | println!(); 42 | println!("To create a calendar collection, use:"); 43 | println!(" curl -i -X MKCALENDAR http://{}/my-calendar/", addr); 44 | println!(); 45 | println!("To add a calendar event, use:"); 46 | println!(" curl -i -X PUT http://{}/my-calendar/event1.ics \\", addr); 47 | println!(" -H 'Content-Type: text/calendar' \\"); 48 | println!(" --data-binary @event.ics"); 49 | println!(); 50 | println!("Example event.ics content:"); 51 | println!("BEGIN:VCALENDAR"); 52 | println!("VERSION:2.0"); 53 | println!("PRODID:-//Example Corp//CalDAV Client//EN"); 54 | println!("BEGIN:VEVENT"); 55 | println!("UID:12345@example.com"); 56 | println!("DTSTART:20240101T120000Z"); 57 | println!("DTEND:20240101T130000Z"); 58 | println!("SUMMARY:New Year Meeting"); 59 | println!("DESCRIPTION:Planning meeting for the new year"); 60 | println!("END:VEVENT"); 61 | println!("END:VCALENDAR"); 62 | 63 | // Start the server loop 64 | loop { 65 | let (stream, _) = listener.accept().await?; 66 | let dav_server = dav_server.clone(); 67 | 68 | let io = TokioIo::new(stream); 69 | 70 | tokio::task::spawn(async move { 71 | if let Err(err) = http1::Builder::new() 72 | .serve_connection( 73 | io, 74 | service_fn({ 75 | move |req| { 76 | let dav_server = dav_server.clone(); 77 | async move { Ok::<_, Infallible>(dav_server.handle(req).await) } 78 | } 79 | }), 80 | ) 81 | .await 82 | { 83 | eprintln!("Failed serving connection: {err:?}"); 84 | } 85 | }); 86 | } 87 | } 88 | 89 | #[cfg(not(feature = "caldav"))] 90 | fn main() { 91 | eprintln!("This example requires the 'caldav' feature to be enabled."); 92 | eprintln!("Run with: cargo run --example caldav --features caldav"); 93 | std::process::exit(1); 94 | } 95 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | 2 | # TODO list 3 | 4 | ## Protocol compliance 5 | 6 | ### Apply all headers 7 | 8 | The RFC says that for COPY/MOVE/DELETE with Depth: Infinity all headers 9 | must be applied to all resources. For example, in RFC4918 9.6.1: 10 | 11 | ``` 12 | Any headers included with DELETE MUST be applied in processing every resource to be deleted. 13 | ``` 14 | 15 | Currently we do not do this- we do apply the If-Match, If-None-Match, If-Modified-Since, 16 | If-Unmodified-Since, and If headers to the request url, but not recursively. 17 | 18 | ### Props on symbolic links 19 | 20 | Should probably disallow that 21 | 22 | ### In MOVE/DELETE test locks seperately per resource 23 | 24 | Right now we check if we hold the locks (if any) for the request url, and paths 25 | below it for Depth: Infinity requests. If we don't, the entire request fails. We 26 | should really check that for every resource to be MOVEd/DELETEd seperately 27 | and only fail those resources. 28 | 29 | This does mean that we cannot MOVE a collection by doing a simple rename, we must 30 | do it resource-per-resource, like COPY. 31 | 32 | ## Race conditions 33 | 34 | During long-running requests like MOVE/COPY/DELETE we should really LOCK the resource 35 | so that no other request can race us. 36 | 37 | Actually, check if this is true. Isn't the webdav client responsible for this? 38 | 39 | Anyway: 40 | 41 | - if the resource is locked exclusively and we hold the lock- great, nothing to do 42 | - otherwise: 43 | - lock the request URL exclusively (unless already locked exclusively), Depth: infinite, 44 | _without checking if any other locks already exist_. This is a temporary lock. 45 | - now check if we actually can lock the request URL and paths below 46 | - if not, unlock, error 47 | - go ahead and do the work 48 | - unlock 49 | 50 | The temporary lock should probably have a timeout of say 10 seconds, where we 51 | refresh it every 5 seconds or so, so that a stale lock doesn't hang around 52 | too long if something goes catastrophically wrong. Might only happen when 53 | the lock database is seperate from the webdav server. 54 | 55 | ## Improvements: 56 | 57 | - Do fake locking only for user-agents: 58 | 59 | - /WebDAVFS/ // Apple 60 | - /Microsoft Office OneNote 2013/' // MS 61 | - /^Microsoft-WebDAV/ // MS 62 | 63 | this is the list that NextCloud uses for fake locking. 64 | probably (WebDAVFS|Microsoft) would do the trick. 65 | 66 | - API: perhaps move filesystem interface to Path/PathBuf or similar and hide WebPath 67 | 68 | - add documentation 69 | - add tests, tests ... 70 | 71 | ## Project ideas: 72 | 73 | - Add support for properties to localfs.rs on XFS. XFS has unlimited and 74 | scalable extended attributes. ext2/3/4 can store max 4KB. On XFS we can 75 | then also store creationdate in an attribute. 76 | 77 | - Add support for changing live props like mtime/atime 78 | - atime could be done with Win32LastAccessTime 79 | - allow setting apache "executable" prop 80 | - it appears that there are webdav implementations that allow 81 | you to set "DAV:getcontentlength". 82 | 83 | - we could support (at least return) some Win32FileAttributes: 84 | - readonly: 00000001 (unix mode) 85 | - hidden: 00000002 (if file starts with a "." 86 | - dir: 00000010 87 | - file: 00000020 88 | 89 | readonly on dirs means "all files in the directory" so that 90 | is best not implemented. 91 | 92 | - allow setting of some windows live props: 93 | - readonly (on files, via chmod) 94 | - Win32LastAccessTime, Win32LastModifiedTime 95 | 96 | - implement [RFC4437 Webdav Redirectref](https://tools.ietf.org/html/rfc4437) -- basically support for symbolic links 97 | 98 | - implement [RFC3744 Webdac ACL](https://tools.ietf.org/html/rfc3744) 99 | 100 | ## Things I thought of but aren't going to work: 101 | 102 | ### Compression 103 | 104 | - support for compressing responses, at least PROPFIND. 105 | - support for compressed PUT requests 106 | 107 | Nice, but no webdav client that I know of uses compression. 108 | 109 | -------------------------------------------------------------------------------- /doc/SABREDAV-partialupdate.md: -------------------------------------------------------------------------------- 1 | # HTTP PATCH support 2 | 3 | This is a markdown translation of the document at 4 | [http://sabre.io/dav/http-patch/](http://sabre.io/dav/http-patch/) 5 | [© 2018 fruux GmbH](https://fruux.com/) 6 | 7 | The `Sabre\\DAV\\PartialUpdate\\Plugin` from the Sabre DAV library provides 8 | support for the HTTP PATCH method [RFC5789](http://tools.ietf.org/html/rfc5789). 9 | This allows you to update just a portion of a file, or append to a file. 10 | 11 | This document can be used as a spec for other implementors. There is some 12 | DAV-specific stuff in this document, but only in relation to the OPTIONS 13 | request. 14 | 15 | ## A sample request 16 | 17 | ``` 18 | PATCH /file.txt 19 | Content-Length: 4 20 | Content-Type: application/x-sabredav-partialupdate 21 | X-Update-Range: bytes=3-6 22 | 23 | ABCD 24 | ``` 25 | 26 | This request updates 'file.txt', specifically the bytes 3-6 (inclusive) to 27 | `ABCD`. 28 | 29 | If you just want to append to an existing file, use the following syntax: 30 | 31 | ``` 32 | PATCH /file.txt 33 | Content-Length: 4 34 | Content-Type: application/x-sabredav-partialupdate 35 | X-Update-Range: append 36 | 37 | 1234 38 | ``` 39 | 40 | The last request adds 4 bytes to the bottom of the file. 41 | 42 | ## The rules 43 | 44 | - The `Content-Length` header is required. 45 | - `X-Update-Range` is also required. 46 | - The `bytes` value is the exact same as the HTTP Range header. The two numbers 47 | are inclusive (so `3-6` means that bytes 3,4,5 and 6 will be updated). 48 | - Just like the HTTP Range header, the specified bytes is a 0-based index. 49 | - The `application/x-sabredav-partialupdate` must also be specified. 50 | - The end-byte is optional. 51 | - The start-byte cannot be omitted. 52 | - If the start byte is negative, it's calculated from the end of the file. So 53 | `-1` will update the last byte in the file. 54 | - Use `X-Update-Range: append` to add to the end of the file. 55 | - Neither the start, nor the end-byte have to be within the file's current size. 56 | - If the start-byte is beyond the file's current length, the space in between 57 | will be filled with NULL bytes (`0x00`). 58 | - The specification currently does not support multiple ranges. 59 | - If both start and end offsets are given, than both must be non-negative, and 60 | the end offset must be greater or equal to the start offset. 61 | 62 | ## More examples 63 | 64 | The following table illustrates most types of requests and what the end-result 65 | of them will be. 66 | 67 | It is assumed that the input file contains `1234567890`, and the request body 68 | always contains 4 dashes (`----`). 69 | 70 | X-Update-Range header | Result 71 | --------------------- | ------- 72 | `bytes=0-3` | `----567890` 73 | `bytes=1-4` | `1----67890` 74 | `bytes=0-` | `----567890` 75 | `bytes=-4` | `123456----` 76 | `bytes=-2` | `12345678----` 77 | `bytes=2-` | `12----7890` 78 | `bytes=12-` | `1234567890..----` 79 | `append` | `1234567890----` 80 | 81 | Please note that in the `bytes=12-` example, we used dots (`.`) to represent 82 | what are actually `NULL` bytes (so `0x00`). The null byte is not printable. 83 | 84 | ## Status codes 85 | 86 | ### The following status codes should be used: 87 | 88 | Status code | Reason 89 | ----------- | ------ 90 | 200 or 204 | When the operation was successful 91 | 400 | Invalid `X-Update-Range` header 92 | 411 | `Content-Length` header was not provided 93 | 415 | Unrecognized content-type, should be `application/x-sabredav-partialupdate` 94 | 416 | If there was something wrong with the bytes, such as a `Content-Length` not matching with what was sent as the start and end bytes, or an end byte being lower than the start byte. 95 | 96 | ## OPTIONS 97 | 98 | If you want to be compliant with SabreDAV's implementation of PATCH, you must 99 | also return 'sabredav-partialupdate' in the 'DAV:' header: 100 | 101 | ``` 102 | HTTP/1.1 204 No Content 103 | DAV: 1, 2, 3, sabredav-partialupdate, extended-mkcol 104 | ``` 105 | 106 | This is only required if you are adding this feature to a DAV server. For 107 | non-webdav implementations such as REST services this is optional. 108 | 109 | -------------------------------------------------------------------------------- /src/fakels.rs: -------------------------------------------------------------------------------- 1 | //! Fake locksystem (to make Windows/macOS work). 2 | //! 3 | //! Several Webdav clients, like the ones on Windows and macOS, require just 4 | //! basic functionality to mount the Webdav server in read-only mode. However 5 | //! to be able to mount the Webdav server in read-write mode, they require the 6 | //! Webdav server to have Webdav class 2 compliance - that means, LOCK/UNLOCK 7 | //! support. 8 | //! 9 | //! In many cases, this is not actually important. A lot of the current Webdav 10 | //! server implementations that are used to serve a filesystem just fake it: 11 | //! LOCK/UNLOCK always succeed, checking for locktokens in 12 | //! If: headers always succeeds, and nothing is every really locked. 13 | //! 14 | //! `FakeLs` implements such a fake locksystem. 15 | use std::future; 16 | use std::time::{Duration, SystemTime}; 17 | 18 | use futures_util::FutureExt; 19 | use uuid::Uuid; 20 | use xmltree::Element; 21 | 22 | use crate::davpath::DavPath; 23 | use crate::ls::*; 24 | 25 | /// Fake locksystem implementation. 26 | #[derive(Debug, Clone)] 27 | pub struct FakeLs {} 28 | 29 | impl FakeLs { 30 | /// Create a new "fakels" locksystem. 31 | pub fn new() -> Box { 32 | Box::new(FakeLs {}) 33 | } 34 | } 35 | 36 | fn tm_limit(d: Option) -> Duration { 37 | match d { 38 | None => Duration::new(120, 0), 39 | Some(d) => { 40 | if d.as_secs() > 120 { 41 | Duration::new(120, 0) 42 | } else { 43 | d 44 | } 45 | } 46 | } 47 | } 48 | 49 | impl DavLockSystem for FakeLs { 50 | fn lock( 51 | &'_ self, 52 | path: &DavPath, 53 | principal: Option<&str>, 54 | owner: Option<&Element>, 55 | timeout: Option, 56 | shared: bool, 57 | deep: bool, 58 | ) -> LsFuture<'_, Result> { 59 | let timeout = tm_limit(timeout); 60 | let timeout_at = SystemTime::now() + timeout; 61 | 62 | let d = if deep { 'I' } else { '0' }; 63 | let s = if shared { 'S' } else { 'E' }; 64 | let token = format!("opaquetoken:{}/{}/{}", Uuid::new_v4().hyphenated(), d, s); 65 | 66 | let lock = DavLock { 67 | token, 68 | path: Box::new(path.clone()), 69 | principal: principal.map(|s| s.to_string()), 70 | owner: owner.map(|o| Box::new(o.clone())), 71 | timeout_at: Some(timeout_at), 72 | timeout: Some(timeout), 73 | shared, 74 | deep, 75 | }; 76 | debug!("lock {} created", &lock.token); 77 | future::ready(Ok(lock)).boxed() 78 | } 79 | 80 | fn unlock(&'_ self, _path: &DavPath, _token: &str) -> LsFuture<'_, Result<(), ()>> { 81 | future::ready(Ok(())).boxed() 82 | } 83 | 84 | fn refresh( 85 | &'_ self, 86 | path: &DavPath, 87 | token: &str, 88 | timeout: Option, 89 | ) -> LsFuture<'_, Result> { 90 | debug!("refresh lock {token}"); 91 | let v: Vec<&str> = token.split('/').collect(); 92 | let deep = v.len() > 1 && v[1] == "I"; 93 | let shared = v.len() > 2 && v[2] == "S"; 94 | 95 | let timeout = tm_limit(timeout); 96 | let timeout_at = SystemTime::now() + timeout; 97 | 98 | let lock = DavLock { 99 | token: token.to_string(), 100 | path: Box::new(path.clone()), 101 | principal: None, 102 | owner: None, 103 | timeout_at: Some(timeout_at), 104 | timeout: Some(timeout), 105 | shared, 106 | deep, 107 | }; 108 | future::ready(Ok(lock)).boxed() 109 | } 110 | 111 | fn check( 112 | &'_ self, 113 | _path: &DavPath, 114 | _principal: Option<&str>, 115 | _ignore_principal: bool, 116 | _deep: bool, 117 | _submitted_tokens: Vec<&str>, 118 | ) -> LsFuture<'_, Result<(), DavLock>> { 119 | future::ready(Ok(())).boxed() 120 | } 121 | 122 | fn discover(&'_ self, _path: &DavPath) -> LsFuture<'_, Vec> { 123 | future::ready(Vec::new()).boxed() 124 | } 125 | 126 | fn delete(&'_ self, _path: &DavPath) -> LsFuture<'_, Result<(), ()>> { 127 | future::ready(Ok(())).boxed() 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /examples/sample-litmus-server.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Sample application. 3 | // 4 | // Listens on localhost:4918, plain http, no ssl. 5 | // Connect to http://localhost:4918/ 6 | // 7 | 8 | use std::{convert::Infallible, error::Error, net::SocketAddr}; 9 | 10 | use clap::Parser; 11 | use headers::{Authorization, HeaderMapExt, authorization::Basic}; 12 | use hyper::{server::conn::http1, service::service_fn}; 13 | use hyper_util::rt::TokioIo; 14 | use tokio::net::TcpListener; 15 | 16 | use dav_server::{DavConfig, DavHandler, body::Body, fakels, localfs, memfs, memls}; 17 | 18 | #[derive(Clone)] 19 | struct Server { 20 | dh: DavHandler, 21 | auth: bool, 22 | } 23 | 24 | impl Server { 25 | pub fn new(directory: String, memls: bool, fakels: bool, auth: bool) -> Self { 26 | let mut config = DavHandler::builder(); 27 | if !directory.is_empty() { 28 | config = config.filesystem(localfs::LocalFs::new(directory, true, true, true)); 29 | } else { 30 | config = config.filesystem(memfs::MemFs::new()); 31 | }; 32 | if fakels { 33 | config = config.locksystem(fakels::FakeLs::new()); 34 | } 35 | if memls { 36 | config = config.locksystem(memls::MemLs::new()); 37 | } 38 | 39 | Server { 40 | dh: config.build_handler(), 41 | auth, 42 | } 43 | } 44 | 45 | async fn handle( 46 | &self, 47 | req: hyper::Request, 48 | ) -> Result, Infallible> { 49 | let user = if self.auth { 50 | // we want the client to authenticate. 51 | match req.headers().typed_get::>() { 52 | Some(Authorization(basic)) => Some(basic.username().to_string()), 53 | None => { 54 | // return a 401 reply. 55 | let response = hyper::Response::builder() 56 | .status(401) 57 | .header("WWW-Authenticate", "Basic realm=\"foo\"") 58 | .body(Body::from("please auth".to_string())) 59 | .unwrap(); 60 | return Ok(response); 61 | } 62 | } 63 | } else { 64 | None 65 | }; 66 | 67 | if let Some(user) = user { 68 | let config = DavConfig::new().principal(user); 69 | Ok(self.dh.handle_with(config, req).await) 70 | } else { 71 | Ok(self.dh.handle(req).await) 72 | } 73 | } 74 | } 75 | 76 | #[derive(Debug, clap::Parser)] 77 | #[command(about, version)] 78 | struct Cli { 79 | /// port to listen on 80 | #[arg(short = 'p', long, default_value = "4918")] 81 | port: u16, 82 | /// local directory to serve 83 | #[arg(short = 'd', long)] 84 | dir: Option, 85 | /// serve from ephemeral memory filesystem 86 | #[arg(short = 'm', long)] 87 | memfs: bool, 88 | /// use ephemeral memory locksystem 89 | #[arg(short = 'l', long)] 90 | memls: bool, 91 | /// use fake memory locksystem 92 | #[arg(short = 'f', long)] 93 | fakels: bool, 94 | /// require basic authentication 95 | #[arg(short = 'a', long)] 96 | auth: bool, 97 | } 98 | 99 | #[tokio::main] 100 | async fn main() -> Result<(), Box> { 101 | env_logger::init(); 102 | 103 | let args = Cli::parse(); 104 | 105 | let (dir, name) = match args.dir.as_ref() { 106 | Some(dir) => (dir.as_str(), dir.as_str()), 107 | None => ("", "memory filesystem"), 108 | }; 109 | let auth = args.auth; 110 | let memls = args.memfs || args.memls; 111 | let fakels = args.fakels; 112 | 113 | let dav_server = Server::new(dir.to_string(), memls, fakels, auth); 114 | 115 | let port = args.port; 116 | let addr: SocketAddr = ([0, 0, 0, 0], port).into(); 117 | let listener = TcpListener::bind(addr).await?; 118 | 119 | println!("Serving {} on {}", name, port); 120 | 121 | // We start a loop to continuously accept incoming connections 122 | loop { 123 | let (stream, _) = listener.accept().await?; 124 | let dav_server = dav_server.clone(); 125 | 126 | // Use an adapter to access something implementing `tokio::io` traits as if they implement 127 | // `hyper::rt` IO traits. 128 | let io = TokioIo::new(stream); 129 | 130 | // Spawn a tokio task to serve multiple connections concurrently 131 | tokio::task::spawn(async move { 132 | // Finally, we bind the incoming connection to our `hello` service 133 | if let Err(err) = http1::Builder::new() 134 | // `service_fn` converts our function in a `Service` 135 | .serve_connection( 136 | io, 137 | service_fn({ 138 | move |req| { 139 | let dav_server = dav_server.clone(); 140 | async move { dav_server.clone().handle(req).await } 141 | } 142 | }), 143 | ) 144 | .await 145 | { 146 | eprintln!("Error serving connection: {:?}", err); 147 | } 148 | }); 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /examples/auth.rs: -------------------------------------------------------------------------------- 1 | use std::{convert::Infallible, fmt::Display, net::SocketAddr, path::Path}; 2 | 3 | use futures_util::{StreamExt, stream}; 4 | use http::{Request, Response, StatusCode}; 5 | use hyper::{body::Incoming, server::conn::http1, service::service_fn}; 6 | use hyper_util::rt::TokioIo; 7 | use tokio::{net::TcpListener, task::spawn}; 8 | 9 | use dav_server::{ 10 | DavHandler, 11 | body::Body, 12 | davpath::DavPath, 13 | fakels::FakeLs, 14 | fs::{ 15 | DavDirEntry, DavFile, DavMetaData, FsFuture, FsResult, FsStream, GuardedFileSystem, 16 | OpenOptions, ReadDirMeta, 17 | }, 18 | localfs::LocalFs, 19 | }; 20 | 21 | /// The server example demonstrates a limited scope policy for access to the file system. 22 | /// Depending on the filter specified by the user in the request, one will receive only files or directories. 23 | /// For example, try this URLs: 24 | /// - dav://dirs:-@127.0.0.1:4918 — responds only with directories. 25 | /// - dav://files:-@127.0.0.1:4918 — responds only with files. 26 | #[tokio::main] 27 | async fn main() { 28 | env_logger::init(); 29 | let dir = "/tmp"; 30 | let addr: SocketAddr = ([127, 0, 0, 1], 4918).into(); 31 | let fs = FilteredFs::new(dir); 32 | let dav_server = DavHandler::builder() 33 | .filesystem(Box::new(fs) as _) 34 | .locksystem(FakeLs::new()) 35 | .build_handler(); 36 | let listener = TcpListener::bind(addr).await.unwrap(); 37 | println!("Listening {addr}"); 38 | loop { 39 | let (stream, _client_addr) = listener.accept().await.unwrap(); 40 | let dav_server = dav_server.clone(); 41 | let io = TokioIo::new(stream); 42 | spawn(async move { 43 | let service = service_fn(move |request| handle(request, dav_server.clone())); 44 | if let Err(err) = http1::Builder::new().serve_connection(io, service).await { 45 | eprintln!("Failed serving: {err:?}"); 46 | } 47 | }); 48 | } 49 | } 50 | 51 | async fn handle( 52 | request: Request, 53 | handler: DavHandler, 54 | ) -> Result, Infallible> { 55 | /// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/WWW-Authenticate 56 | static AUTH_CHALLENGE: &str = "Basic realm=\"Specify the directory entries' filter \ 57 | as a username: `dirs`, `files` or `all`; password — any string\""; 58 | let filter = match Filter::from_request(&request) { 59 | Ok(f) => f, 60 | Err(err) => { 61 | let response = Response::builder() 62 | .status(StatusCode::UNAUTHORIZED) 63 | .header("WWW-Authenticate", AUTH_CHALLENGE) 64 | .body(err.to_string().into()) 65 | .expect("Auth error response must be built fine"); 66 | return Ok(response); 67 | } 68 | }; 69 | Ok(handler.handle_guarded(request, filter).await) 70 | } 71 | 72 | #[derive(Clone)] 73 | struct FilteredFs { 74 | inner: Box, 75 | } 76 | 77 | impl FilteredFs { 78 | fn new(dir: impl AsRef) -> Self { 79 | Self { 80 | inner: LocalFs::new(dir, false, false, false), 81 | } 82 | } 83 | } 84 | 85 | impl GuardedFileSystem for FilteredFs { 86 | fn open<'a>( 87 | &'a self, 88 | path: &'a DavPath, 89 | options: OpenOptions, 90 | _credentials: &'a Filter, 91 | ) -> FsFuture<'a, Box> { 92 | self.inner.open(path, options, &()) 93 | } 94 | 95 | fn read_dir<'a>( 96 | &'a self, 97 | path: &'a DavPath, 98 | meta: ReadDirMeta, 99 | filter: &'a Filter, 100 | ) -> FsFuture<'a, FsStream>> { 101 | Box::pin(async move { 102 | let mut stream = self.inner.read_dir(path, meta, &()).await?; 103 | let mut entries = Vec::default(); 104 | while let Some(entry) = stream.next().await { 105 | let entry = entry?; 106 | if filter.matches(entry.as_ref()).await? { 107 | entries.push(Ok(entry)); 108 | } 109 | } 110 | Ok(Box::pin(stream::iter(entries)) as _) 111 | }) 112 | } 113 | 114 | fn metadata<'a>( 115 | &'a self, 116 | path: &'a DavPath, 117 | _credentials: &'a Filter, 118 | ) -> FsFuture<'a, Box> { 119 | self.inner.metadata(path, &()) 120 | } 121 | } 122 | 123 | #[derive(Clone)] 124 | enum Filter { 125 | All, 126 | Files, 127 | Dirs, 128 | } 129 | 130 | impl Filter { 131 | fn from_request(request: &Request) -> Result> { 132 | use headers::{Authorization, HeaderMapExt, authorization::Basic}; 133 | 134 | let auth = request 135 | .headers() 136 | .typed_get::>() 137 | .ok_or(Box::new("please auth") as _)?; 138 | match auth.username() { 139 | "all" => Ok(Filter::All), 140 | "files" => Ok(Filter::Files), 141 | "dirs" => Ok(Filter::Dirs), 142 | _ => Err(Box::new("unexpected filter value") as _), 143 | } 144 | } 145 | 146 | async fn matches(&self, entry: &dyn DavDirEntry) -> FsResult { 147 | if let Filter::All = self { 148 | return Ok(true); 149 | } 150 | Ok(entry.is_dir().await? == matches!(self, Filter::Dirs)) 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/multierror.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use futures_util::{Stream, StreamExt}; 4 | 5 | use http::{Response, StatusCode}; 6 | use xml::EmitterConfig; 7 | use xml::common::XmlVersion; 8 | use xml::writer::EventWriter; 9 | use xml::writer::XmlEvent as XmlWEvent; 10 | 11 | use crate::DavError; 12 | use crate::async_stream::AsyncStream; 13 | use crate::body::Body; 14 | use crate::davpath::DavPath; 15 | use crate::util::MemBuffer; 16 | 17 | type Sender = crate::async_stream::Sender<(DavPath, StatusCode), DavError>; 18 | 19 | pub(crate) struct MultiError(Sender); 20 | 21 | impl MultiError { 22 | pub fn new(sender: Sender) -> MultiError { 23 | MultiError(sender) 24 | } 25 | 26 | pub async fn add_status<'a>( 27 | &'a mut self, 28 | path: &'a DavPath, 29 | status: impl Into + 'static, 30 | ) -> Result<(), futures_channel::mpsc::SendError> { 31 | let status = status.into().statuscode(); 32 | self.0.send((path.clone(), status)).await; 33 | Ok(()) 34 | } 35 | } 36 | 37 | type XmlWriter<'a> = EventWriter; 38 | 39 | fn write_elem<'b, S>(xw: &mut XmlWriter, name: S, text: &str) -> Result<(), DavError> 40 | where 41 | S: Into>, 42 | { 43 | let n = name.into(); 44 | xw.write(XmlWEvent::start_element(n))?; 45 | if !text.is_empty() { 46 | xw.write(XmlWEvent::characters(text))?; 47 | } 48 | xw.write(XmlWEvent::end_element())?; 49 | Ok(()) 50 | } 51 | 52 | fn write_response(w: &mut XmlWriter, path: &DavPath, sc: StatusCode) -> Result<(), DavError> { 53 | w.write(XmlWEvent::start_element("D:response"))?; 54 | let p = path.with_prefix().as_url_string(); 55 | write_elem(w, "D:href", &p)?; 56 | write_elem(w, "D:status", &format!("HTTP/1.1 {sc}"))?; 57 | w.write(XmlWEvent::end_element())?; 58 | Ok(()) 59 | } 60 | 61 | pub(crate) async fn multi_error( 62 | req_path: DavPath, 63 | status_stream: S, 64 | ) -> Result, DavError> 65 | where 66 | S: Stream> + Send + 'static, 67 | { 68 | // read the first path/status item 69 | let mut status_stream = Box::pin(status_stream); 70 | let (path, status) = match status_stream.next().await { 71 | None => { 72 | debug!("multi_error: empty status_stream"); 73 | return Err(DavError::ChanError); 74 | } 75 | Some(Err(e)) => return Err(e), 76 | Some(Ok(item)) => item, 77 | }; 78 | 79 | let mut items = Vec::new(); 80 | 81 | if path == req_path { 82 | // the first path/status item was for the request path. 83 | // see if there is a next item. 84 | match status_stream.next().await { 85 | None => { 86 | // No, this was the first and only item. 87 | let resp = Response::builder() 88 | .status(status) 89 | .body(Body::empty()) 90 | .unwrap(); 91 | return Ok(resp); 92 | } 93 | Some(Err(e)) => return Err(e), 94 | Some(Ok(item)) => { 95 | // Yes, more than one response. 96 | items.push(Ok((path, status))); 97 | items.push(Ok(item)); 98 | } 99 | } 100 | } else { 101 | items.push(Ok((path, status))); 102 | } 103 | 104 | // Transform path/status items to XML. 105 | let body = AsyncStream::new(|mut tx| { 106 | async move { 107 | // Write initial header. 108 | let mut xw = EventWriter::new_with_config( 109 | MemBuffer::new(), 110 | EmitterConfig { 111 | perform_indent: true, 112 | ..EmitterConfig::default() 113 | }, 114 | ); 115 | xw.write(XmlWEvent::StartDocument { 116 | version: XmlVersion::Version10, 117 | encoding: Some("utf-8"), 118 | standalone: None, 119 | }) 120 | .map_err(DavError::from)?; 121 | xw.write(XmlWEvent::start_element("D:multistatus").ns("D", "DAV:")) 122 | .map_err(DavError::from)?; 123 | let data = xw.inner_mut().take(); 124 | tx.send(data).await; 125 | 126 | // now write the items. 127 | let mut status_stream = futures_util::stream::iter(items).chain(status_stream); 128 | while let Some(res) = status_stream.next().await { 129 | let (path, status) = res?; 130 | let status = if status == StatusCode::NO_CONTENT { 131 | StatusCode::OK 132 | } else { 133 | status 134 | }; 135 | write_response(&mut xw, &path, status)?; 136 | let data = xw.inner_mut().take(); 137 | tx.send(data).await; 138 | } 139 | 140 | // and finally write the trailer. 141 | xw.write(XmlWEvent::end_element()).map_err(DavError::from)?; 142 | let data = xw.inner_mut().take(); 143 | tx.send(data).await; 144 | 145 | Ok::<_, io::Error>(()) 146 | } 147 | }); 148 | 149 | // return response. 150 | let resp = Response::builder() 151 | .header("content-type", "application/xml; charset=utf-8") 152 | .status(StatusCode::MULTI_STATUS) 153 | .body(Body::from(body)) 154 | .unwrap(); 155 | Ok(resp) 156 | } 157 | -------------------------------------------------------------------------------- /README.litmus-test.md: -------------------------------------------------------------------------------- 1 | 2 | # Webdav protocol compliance. 3 | 4 | The standard for webdav compliance testing is [`litmus`](http://www.webdav.org/neon/litmus/), 5 | which is available at [http://www.webdav.org/neon/litmus/](http://www.webdav.org/neon/litmus/). 6 | 7 | Building it: 8 | ``` 9 | curl -O http://www.webdav.org/neon/litmus/litmus-0.13.tar.gz 10 | tar xf litmus-0.13.tar.gz 11 | cd litmus-0.13 12 | ./configure 13 | make 14 | ``` 15 | 16 | Then run the test server (`sample-litmus-server`). For some tests, `litmus` 17 | assumes that it is using basic authentication, so you must run the server 18 | with the `--auth` flag. 19 | ``` 20 | cd webdav-handler-rs 21 | cargo run --example sample-litmus-server -- --memfs --auth 22 | ``` 23 | 24 | You do not have to install the litmus binary, it's possible to run the tests 25 | straight from the unpacked & compiled litmus directory (`someuser` and 26 | `somepass` are literal, you do not have to put a real username/password there): 27 | 28 | ``` 29 | $ cd litmus-0.13 30 | $ TESTS="http basic copymove locks props" HTDOCS=htdocs TESTROOT=. ./litmus http://localhost:4918/ someuser somepass 31 | 32 | -> running `http': 33 | 0. init.................. pass 34 | 1. begin................. pass 35 | 2. expect100............. pass 36 | 3. finish................ pass 37 | <- summary for `http': of 4 tests run: 4 passed, 0 failed. 100.0% 38 | -> running `basic': 39 | 0. init.................. pass 40 | 1. begin................. pass 41 | 2. options............... pass 42 | 3. put_get............... pass 43 | 4. put_get_utf8_segment.. pass 44 | 5. put_no_parent......... pass 45 | 6. mkcol_over_plain...... pass 46 | 7. delete................ pass 47 | 8. delete_null........... pass 48 | 9. delete_fragment....... WARNING: DELETE removed collection resource with Request-URI including fragment; unsafe 49 | ...................... pass (with 1 warning) 50 | 10. mkcol................. pass 51 | 11. mkcol_again........... pass 52 | 12. delete_coll........... pass 53 | 13. mkcol_no_parent....... pass 54 | 14. mkcol_with_body....... pass 55 | 15. finish................ pass 56 | <- summary for `basic': of 16 tests run: 16 passed, 0 failed. 100.0% 57 | -> 1 warning was issued. 58 | -> running `copymove': 59 | 0. init.................. pass 60 | 1. begin................. pass 61 | 2. copy_init............. pass 62 | 3. copy_simple........... pass 63 | 4. copy_overwrite........ pass 64 | 5. copy_nodestcoll....... pass 65 | 6. copy_cleanup.......... pass 66 | 7. copy_coll............. pass 67 | 8. copy_shallow.......... pass 68 | 9. move.................. pass 69 | 10. move_coll............. pass 70 | 11. move_cleanup.......... pass 71 | 12. finish................ pass 72 | <- summary for `copymove': of 13 tests run: 13 passed, 0 failed. 100.0% 73 | -> running `locks': 74 | 0. init.................. pass 75 | 1. begin................. pass 76 | 2. options............... pass 77 | 3. precond............... pass 78 | 4. init_locks............ pass 79 | 5. put................... pass 80 | 6. lock_excl............. pass 81 | 7. discover.............. pass 82 | 8. refresh............... pass 83 | 9. notowner_modify....... pass 84 | 10. notowner_lock......... pass 85 | 11. owner_modify.......... pass 86 | 12. notowner_modify....... pass 87 | 13. notowner_lock......... pass 88 | 14. copy.................. pass 89 | 15. cond_put.............. pass 90 | 16. fail_cond_put......... pass 91 | 17. cond_put_with_not..... pass 92 | 18. cond_put_corrupt_token pass 93 | 19. complex_cond_put...... pass 94 | 20. fail_complex_cond_put. pass 95 | 21. unlock................ pass 96 | 22. fail_cond_put_unlocked pass 97 | 23. lock_shared........... pass 98 | 24. notowner_modify....... pass 99 | 25. notowner_lock......... pass 100 | 26. owner_modify.......... pass 101 | 27. double_sharedlock..... pass 102 | 28. notowner_modify....... pass 103 | 29. notowner_lock......... pass 104 | 30. unlock................ pass 105 | 31. prep_collection....... pass 106 | 32. lock_collection....... pass 107 | 33. owner_modify.......... pass 108 | 34. notowner_modify....... pass 109 | 35. refresh............... pass 110 | 36. indirect_refresh...... pass 111 | 37. unlock................ pass 112 | 38. unmapped_lock......... pass 113 | 39. unlock................ pass 114 | 40. finish................ pass 115 | <- summary for `locks': of 41 tests run: 41 passed, 0 failed. 100.0% 116 | -> running `props': 117 | 0. init.................. pass 118 | 1. begin................. pass 119 | 2. propfind_invalid...... pass 120 | 3. propfind_invalid2..... pass 121 | 4. propfind_d0........... pass 122 | 5. propinit.............. pass 123 | 6. propset............... pass 124 | 7. propget............... pass 125 | 8. propextended.......... pass 126 | 9. propmove.............. pass 127 | 10. propget............... pass 128 | 11. propdeletes........... pass 129 | 12. propget............... pass 130 | 13. propreplace........... pass 131 | 14. propget............... pass 132 | 15. propnullns............ pass 133 | 16. propget............... pass 134 | 17. prophighunicode....... pass 135 | 18. propget............... pass 136 | 19. propremoveset......... pass 137 | 20. propget............... pass 138 | 21. propsetremove......... pass 139 | 22. propget............... pass 140 | 23. propvalnspace......... pass 141 | 24. propwformed........... pass 142 | 25. propinit.............. pass 143 | 26. propmanyns............ pass 144 | 27. propget............... pass 145 | 28. propcleanup........... pass 146 | 29. finish................ pass 147 | <- summary for `props': of 30 tests run: 30 passed, 0 failed. 100.0% 148 | ``` 149 | 150 | -------------------------------------------------------------------------------- /src/actix.rs: -------------------------------------------------------------------------------- 1 | //! Adapters to use the standard `http` types with Actix. 2 | //! 3 | //! Using the adapters in this crate, it's easy to build a webdav 4 | //! handler for actix: 5 | //! 6 | //! ```no_run 7 | //! use dav_server::{DavHandler, actix::DavRequest, actix::DavResponse}; 8 | //! use actix_web::web; 9 | //! 10 | //! pub async fn dav_handler(req: DavRequest, davhandler: web::Data) -> DavResponse { 11 | //! davhandler.handle(req.request).await.into() 12 | //! } 13 | //! ``` 14 | //! 15 | use std::{ 16 | convert::TryInto, 17 | future, io, 18 | pin::Pin, 19 | task::{Context, Poll}, 20 | }; 21 | 22 | use actix_web::body::BoxBody; 23 | use actix_web::error::PayloadError; 24 | use actix_web::{Error, FromRequest, HttpRequest, HttpResponse, dev}; 25 | use bytes::Bytes; 26 | use futures_util::Stream; 27 | use pin_project_lite::pin_project; 28 | 29 | /// http::Request compatibility. 30 | /// 31 | /// Wraps `http::Request` and implements `actix_web::FromRequest`. 32 | pub struct DavRequest { 33 | pub request: http::Request, 34 | prefix: Option, 35 | } 36 | 37 | impl DavRequest { 38 | /// Returns the request path minus the tail. 39 | pub fn prefix(&self) -> Option<&str> { 40 | self.prefix.as_deref() 41 | } 42 | } 43 | 44 | impl FromRequest for DavRequest { 45 | type Error = Error; 46 | type Future = future::Ready>; 47 | 48 | fn from_request(req: &HttpRequest, payload: &mut dev::Payload) -> Self::Future { 49 | let mut builder = http::Request::builder() 50 | .method(req.method().as_ref()) 51 | .uri(req.uri().to_string()) 52 | .version(from_actix_http_version(req.version())); 53 | for (name, value) in req.headers().iter() { 54 | builder = builder.header(name.as_str(), value.as_ref()); 55 | } 56 | let path = req.path(); 57 | let tail = req.match_info().unprocessed(); 58 | let prefix = match &path[..path.len() - tail.len()] { 59 | "" | "/" => None, 60 | x => Some(x.to_string()), 61 | }; 62 | 63 | let body = DavBody { 64 | body: payload.take(), 65 | }; 66 | let stdreq = DavRequest { 67 | request: builder.body(body).unwrap(), 68 | prefix, 69 | }; 70 | future::ready(Ok(stdreq)) 71 | } 72 | } 73 | 74 | pin_project! { 75 | /// Body type for `DavRequest`. 76 | /// 77 | /// It wraps actix's `PayLoad` and implements `http_body::Body`. 78 | pub struct DavBody { 79 | #[pin] 80 | body: dev::Payload, 81 | } 82 | } 83 | 84 | impl http_body::Body for DavBody { 85 | type Data = Bytes; 86 | type Error = io::Error; 87 | 88 | fn poll_frame( 89 | self: Pin<&mut Self>, 90 | cx: &mut Context<'_>, 91 | ) -> Poll, Self::Error>>> { 92 | self.project() 93 | .body 94 | .poll_next(cx) 95 | .map_ok(http_body::Frame::data) 96 | .map_err(|err| match err { 97 | PayloadError::Incomplete(Some(err)) => err, 98 | PayloadError::Incomplete(None) => io::ErrorKind::BrokenPipe.into(), 99 | PayloadError::Io(err) => err, 100 | err => io::Error::other(format!("{err:?}")), 101 | }) 102 | } 103 | } 104 | 105 | /// `http::Response` compatibility. 106 | /// 107 | /// Wraps `http::Response` and implements actix_web::Responder. 108 | pub struct DavResponse(pub http::Response); 109 | 110 | impl From> for DavResponse { 111 | fn from(resp: http::Response) -> DavResponse { 112 | DavResponse(resp) 113 | } 114 | } 115 | 116 | impl actix_web::Responder for DavResponse { 117 | type Body = BoxBody; 118 | 119 | fn respond_to(self, _req: &HttpRequest) -> HttpResponse { 120 | use crate::body::{Body, BodyType}; 121 | 122 | let (parts, body) = self.0.into_parts(); 123 | let mut builder = HttpResponse::build(parts.status.as_u16().try_into().unwrap()); 124 | for (name, value) in parts.headers.into_iter() { 125 | builder.append_header((name.unwrap().as_str(), value.as_ref())); 126 | } 127 | // I noticed that actix-web returns an empty chunked body 128 | // (\r\n0\r\n\r\n) and _no_ Transfer-Encoding header on 129 | // a 204 statuscode. It's probably because of 130 | // builder.streaming(). So only use builder.streaming() 131 | // on actual streaming replies. 132 | match body.inner { 133 | BodyType::Bytes(None) => builder.body(""), 134 | BodyType::Bytes(Some(b)) => builder.body(b), 135 | BodyType::Empty => builder.body(""), 136 | b @ BodyType::AsyncStream(..) => builder.streaming(Body { inner: b }), 137 | } 138 | } 139 | } 140 | 141 | /// Converts HTTP version from `actix_web` version of `http` crate while `actix_web` remains on old version. 142 | /// https://github.com/actix/actix-web/issues/3384 143 | fn from_actix_http_version(v: actix_web::http::Version) -> http::Version { 144 | match v { 145 | actix_web::http::Version::HTTP_3 => http::Version::HTTP_3, 146 | actix_web::http::Version::HTTP_2 => http::Version::HTTP_2, 147 | actix_web::http::Version::HTTP_11 => http::Version::HTTP_11, 148 | actix_web::http::Version::HTTP_10 => http::Version::HTTP_10, 149 | actix_web::http::Version::HTTP_09 => http::Version::HTTP_09, 150 | v => unreachable!("unexpected HTTP version {:?}", v), 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/async_stream.rs: -------------------------------------------------------------------------------- 1 | //! Use an [async block][async] to produce items for a stream. 2 | //! 3 | //! Example: 4 | //! 5 | //! ```rust ignore 6 | //! use futures_util::StreamExt; 7 | //! use futures_executor::block_on; 8 | //! # use dav_server::async_stream; 9 | //! use async_stream::AsyncStream; 10 | //! 11 | //! let mut strm = AsyncStream::::new(|mut tx| async move { 12 | //! for i in 0u8..10 { 13 | //! tx.send(i).await; 14 | //! } 15 | //! Ok(()) 16 | //! }); 17 | //! 18 | //! let fut = async { 19 | //! let mut count = 0; 20 | //! while let Some(item) = strm.next().await { 21 | //! println!("{:?}", item); 22 | //! count += 1; 23 | //! } 24 | //! assert!(count == 10); 25 | //! }; 26 | //! block_on(fut); 27 | //! 28 | //! ``` 29 | //! 30 | //! The stream will produce a `Result` where the `Item` 31 | //! is an item sent with [tx.send(item)][send]. Any errors returned by 32 | //! the async closure will be returned as an error value on 33 | //! the stream. 34 | //! 35 | //! On success the async closure should return `Ok(())`. 36 | //! 37 | //! [async]: https://rust-lang.github.io/async-book/getting_started/async_await_primer.html 38 | //! [send]: async_stream/struct.Sender.html#method.send 39 | //! 40 | use std::cell::Cell; 41 | use std::future::Future; 42 | use std::marker::PhantomData; 43 | use std::pin::Pin; 44 | use std::rc::Rc; 45 | use std::task::{Context, Poll}; 46 | 47 | use futures_util::Stream; 48 | 49 | /// Future returned by the Sender.send() method. 50 | /// 51 | /// Completes when the item is sent. 52 | #[must_use] 53 | pub struct SenderFuture { 54 | is_ready: bool, 55 | } 56 | 57 | impl SenderFuture { 58 | fn new() -> SenderFuture { 59 | SenderFuture { is_ready: false } 60 | } 61 | } 62 | 63 | impl Future for SenderFuture { 64 | type Output = (); 65 | 66 | fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { 67 | if self.is_ready { 68 | Poll::Ready(()) 69 | } else { 70 | self.is_ready = true; 71 | Poll::Pending 72 | } 73 | } 74 | } 75 | 76 | // Only internally used by one AsyncStream and never shared 77 | // in any other way, so we don't have to use Arc>. 78 | /// Type of the sender passed as first argument into the async closure. 79 | pub struct Sender(Rc>>, PhantomData); 80 | unsafe impl Sync for Sender {} 81 | unsafe impl Send for Sender {} 82 | 83 | impl Sender { 84 | fn new(item_opt: Option) -> Sender { 85 | Sender(Rc::new(Cell::new(item_opt)), PhantomData::) 86 | } 87 | 88 | // note that this is NOT impl Clone for Sender, it's private. 89 | fn clone(&self) -> Sender { 90 | Sender(self.0.clone(), PhantomData::) 91 | } 92 | 93 | /// Send one item to the stream. 94 | pub fn send(&mut self, item: T) -> SenderFuture 95 | where 96 | T: Into, 97 | { 98 | self.0.set(Some(item.into())); 99 | SenderFuture::new() 100 | } 101 | } 102 | 103 | /// An abstraction around a future, where the 104 | /// future can internally loop and yield items. 105 | /// 106 | /// AsyncStream::new() takes a [Future][Future] ([async closure][async], usually) 107 | /// and AsyncStream then implements a [futures 0.3 Stream][Stream]. 108 | /// 109 | /// [async]: https://rust-lang.github.io/async-book/getting_started/async_await_primer.html 110 | /// [Future]: https://doc.rust-lang.org/std/future/trait.Future.html 111 | /// [Stream]: https://docs.rs/futures/0.3/futures/stream/trait.Stream.html 112 | #[must_use] 113 | pub struct AsyncStream { 114 | item: Sender, 115 | #[allow(clippy::type_complexity)] 116 | fut: Option> + 'static + Send>>>, 117 | } 118 | 119 | impl AsyncStream { 120 | /// Create a new stream from a closure returning a Future 0.3, 121 | /// or an "async closure" (which is the same). 122 | /// 123 | /// The closure is passed one argument, the sender, which has a 124 | /// method "send" that can be called to send a item to the stream. 125 | pub fn new(f: F) -> Self 126 | where 127 | F: FnOnce(Sender) -> R, 128 | R: Future> + Send + 'static, 129 | Item: 'static, 130 | { 131 | let sender = Sender::new(None); 132 | AsyncStream:: { 133 | item: sender.clone(), 134 | fut: Some(Box::pin(f(sender))), 135 | } 136 | } 137 | } 138 | 139 | /// Stream implementation for Futures 0.3. 140 | impl Stream for AsyncStream { 141 | type Item = Result; 142 | 143 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>> { 144 | let pollres = { 145 | let fut = self.fut.as_mut().unwrap(); 146 | fut.as_mut().poll(cx) 147 | }; 148 | match pollres { 149 | // If the future returned Poll::Ready, that signals the end of the stream. 150 | Poll::Ready(Ok(_)) => Poll::Ready(None), 151 | Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))), 152 | Poll::Pending => { 153 | // Pending means that some sub-future returned pending. That sub-future 154 | // _might_ have been the SenderFuture returned by Sender.send, so 155 | // check if there is an item available in self.item. 156 | let mut item = self.item.0.replace(None); 157 | if item.is_none() { 158 | Poll::Pending 159 | } else { 160 | Poll::Ready(Some(Ok(item.take().unwrap()))) 161 | } 162 | } 163 | } 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /src/xmltree_ext.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::io::{Read, Write}; 3 | 4 | use xml::EmitterConfig; 5 | use xml::common::XmlVersion; 6 | use xml::writer::EventWriter; 7 | use xml::writer::XmlEvent as XmlWEvent; 8 | 9 | use xmltree::{self, Element, XMLNode}; 10 | 11 | use crate::{DavError, DavResult}; 12 | 13 | pub(crate) trait ElementExt { 14 | /// Builder. 15 | fn new2<'a, E: Into<&'a str>>(e: E) -> Self; 16 | /// Builder. 17 | fn ns>(self, prefix: S, namespace: S) -> Self; 18 | /// Builder. 19 | fn text>(self, t: T) -> Self; 20 | /// Like parse, but returns DavError. 21 | fn parse2(r: R) -> Result; 22 | /// Add a child element. 23 | fn push_element(&mut self, e: Element); 24 | /// Iterator over the children that are Elements. 25 | fn child_elems_into_iter(self) -> Box>; 26 | /// Iterator over the children that are Elements. 27 | fn child_elems_iter<'a>(&'a self) -> Box + 'a>; 28 | /// Vec of the children that are Elements. 29 | fn take_child_elems(self) -> Vec; 30 | /// Does the element have children that are also Elements. 31 | fn has_child_elems(&self) -> bool; 32 | /// Write the element using an EventWriter. 33 | fn write_ev(&self, emitter: &mut EventWriter) -> xml::writer::Result<()>; 34 | } 35 | 36 | impl ElementExt for Element { 37 | fn ns>(mut self, prefix: S, namespace: S) -> Element { 38 | let mut ns = self.namespaces.unwrap_or_else(xmltree::Namespace::empty); 39 | ns.force_put(prefix.into(), namespace.into()); 40 | self.namespaces = Some(ns); 41 | self 42 | } 43 | 44 | fn new2<'a, N: Into<&'a str>>(n: N) -> Element { 45 | let v: Vec<&str> = n.into().splitn(2, ':').collect(); 46 | if v.len() == 1 { 47 | Element::new(v[0]) 48 | } else { 49 | let mut e = Element::new(v[1]); 50 | e.prefix = Some(v[0].to_string()); 51 | e 52 | } 53 | } 54 | 55 | fn text>(mut self, t: S) -> Element { 56 | let nodes = self 57 | .children 58 | .drain(..) 59 | .filter(|n| n.as_text().is_none()) 60 | .collect(); 61 | self.children = nodes; 62 | self.children.push(XMLNode::Text(t.into())); 63 | self 64 | } 65 | 66 | fn push_element(&mut self, e: Element) { 67 | self.children.push(XMLNode::Element(e)); 68 | } 69 | 70 | fn child_elems_into_iter(self) -> Box> { 71 | let iter = self.children.into_iter().filter_map(|n| match n { 72 | XMLNode::Element(e) => Some(e), 73 | _ => None, 74 | }); 75 | Box::new(iter) 76 | } 77 | 78 | fn child_elems_iter<'a>(&'a self) -> Box + 'a> { 79 | let iter = self.children.iter().filter_map(|n| n.as_element()); 80 | Box::new(iter) 81 | } 82 | 83 | fn take_child_elems(self) -> Vec { 84 | self.children 85 | .into_iter() 86 | .filter_map(|n| match n { 87 | XMLNode::Element(e) => Some(e), 88 | _ => None, 89 | }) 90 | .collect() 91 | } 92 | 93 | fn has_child_elems(&self) -> bool { 94 | self.children.iter().find_map(|n| n.as_element()).is_some() 95 | } 96 | 97 | fn parse2(r: R) -> Result { 98 | let res = Element::parse(r); 99 | match res { 100 | Ok(elems) => Ok(elems), 101 | Err(xmltree::ParseError::MalformedXml(_)) => Err(DavError::XmlParseError), 102 | Err(_) => Err(DavError::XmlReadError), 103 | } 104 | } 105 | 106 | fn write_ev(&self, emitter: &mut EventWriter) -> xml::writer::Result<()> { 107 | use xml::attribute::Attribute; 108 | use xml::name::Name; 109 | use xml::namespace::Namespace; 110 | use xml::writer::events::XmlEvent; 111 | 112 | let mut name = Name::local(&self.name); 113 | if let Some(ref ns) = self.namespace { 114 | name.namespace = Some(ns); 115 | } 116 | if let Some(ref p) = self.prefix { 117 | name.prefix = Some(p); 118 | } 119 | 120 | let mut attributes = Vec::with_capacity(self.attributes.len()); 121 | for (k, v) in &self.attributes { 122 | attributes.push(Attribute { 123 | name: Name::local(k), 124 | value: v, 125 | }); 126 | } 127 | 128 | let empty_ns = Namespace::empty(); 129 | let namespace = if let Some(ref ns) = self.namespaces { 130 | Cow::Borrowed(ns) 131 | } else { 132 | Cow::Borrowed(&empty_ns) 133 | }; 134 | 135 | emitter.write(XmlEvent::StartElement { 136 | name, 137 | attributes: Cow::Owned(attributes), 138 | namespace, 139 | })?; 140 | for node in &self.children { 141 | match node { 142 | XMLNode::Element(elem) => elem.write_ev(emitter)?, 143 | XMLNode::Text(text) => emitter.write(XmlEvent::Characters(text))?, 144 | XMLNode::Comment(comment) => emitter.write(XmlEvent::Comment(comment))?, 145 | XMLNode::CData(comment) => emitter.write(XmlEvent::CData(comment))?, 146 | XMLNode::ProcessingInstruction(name, data) => match data.to_owned() { 147 | Some(string) => emitter.write(XmlEvent::ProcessingInstruction { 148 | name, 149 | data: Some(&string), 150 | })?, 151 | None => emitter.write(XmlEvent::ProcessingInstruction { name, data: None })?, 152 | }, 153 | } 154 | // elem.write_ev(emitter)?; 155 | } 156 | emitter.write(XmlEvent::EndElement { name: Some(name) })?; 157 | 158 | Ok(()) 159 | } 160 | } 161 | 162 | pub(crate) fn emitter(w: W) -> DavResult> { 163 | let mut emitter = EventWriter::new_with_config( 164 | w, 165 | EmitterConfig { 166 | perform_indent: false, 167 | indent_string: Cow::Borrowed(""), 168 | ..Default::default() 169 | }, 170 | ); 171 | emitter.write(XmlWEvent::StartDocument { 172 | version: XmlVersion::Version10, 173 | encoding: Some("utf-8"), 174 | standalone: None, 175 | })?; 176 | Ok(emitter) 177 | } 178 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use std::io::{self, ErrorKind}; 3 | 4 | use http::StatusCode; 5 | 6 | use crate::fs::FsError; 7 | 8 | pub(crate) type DavResult = Result; 9 | 10 | #[derive(Debug)] 11 | pub(crate) enum DavError { 12 | XmlReadError, // error reading/parsing xml 13 | XmlParseError, // error interpreting xml 14 | InvalidPath, // error parsing path 15 | IllegalPath, // path not valid here 16 | ForbiddenPath, // too many dotdots 17 | UnknownDavMethod, 18 | ChanError, 19 | Utf8Error, 20 | Status(StatusCode), 21 | StatusClose(StatusCode), 22 | FsError(FsError), 23 | IoError(io::Error), 24 | XmlReaderError(xml::reader::Error), 25 | XmlWriterError(xml::writer::Error), 26 | } 27 | 28 | impl Error for DavError { 29 | fn description(&self) -> &str { 30 | "DAV error" 31 | } 32 | 33 | fn cause(&self) -> Option<&dyn Error> { 34 | match *self { 35 | DavError::FsError(ref e) => Some(e), 36 | DavError::IoError(ref e) => Some(e), 37 | DavError::XmlReaderError(ref e) => Some(e), 38 | DavError::XmlWriterError(ref e) => Some(e), 39 | _ => None, 40 | } 41 | } 42 | } 43 | 44 | impl std::fmt::Display for DavError { 45 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 46 | match *self { 47 | DavError::XmlReaderError(_) => write!(f, "XML parse error"), 48 | DavError::XmlWriterError(_) => write!(f, "XML generate error"), 49 | DavError::IoError(_) => write!(f, "I/O error"), 50 | _ => write!(f, "{self:?}"), 51 | } 52 | } 53 | } 54 | 55 | impl From for DavError { 56 | fn from(e: FsError) -> Self { 57 | DavError::FsError(e) 58 | } 59 | } 60 | 61 | impl From for io::Error { 62 | fn from(e: DavError) -> Self { 63 | match e { 64 | DavError::IoError(e) => e, 65 | DavError::FsError(e) => e.into(), 66 | _ => io::Error::other(e), 67 | } 68 | } 69 | } 70 | 71 | impl From for io::Error { 72 | fn from(e: FsError) -> Self { 73 | fserror_to_ioerror(e) 74 | } 75 | } 76 | 77 | impl From for DavError { 78 | fn from(e: io::Error) -> Self { 79 | DavError::IoError(e) 80 | } 81 | } 82 | 83 | impl From for DavError { 84 | fn from(e: StatusCode) -> Self { 85 | DavError::Status(e) 86 | } 87 | } 88 | 89 | impl From for DavError { 90 | fn from(e: xml::reader::Error) -> Self { 91 | DavError::XmlReaderError(e) 92 | } 93 | } 94 | 95 | impl From for DavError { 96 | fn from(e: xml::writer::Error) -> Self { 97 | DavError::XmlWriterError(e) 98 | } 99 | } 100 | 101 | impl From for DavError { 102 | fn from(_: std::str::Utf8Error) -> Self { 103 | DavError::Utf8Error 104 | } 105 | } 106 | 107 | impl From for DavError { 108 | fn from(_: std::string::FromUtf8Error) -> Self { 109 | DavError::Utf8Error 110 | } 111 | } 112 | 113 | impl From for DavError { 114 | fn from(_e: futures_channel::mpsc::SendError) -> Self { 115 | DavError::ChanError 116 | } 117 | } 118 | 119 | fn fserror_to_ioerror(e: FsError) -> io::Error { 120 | match e { 121 | FsError::NotImplemented => io::Error::other("NotImplemented"), 122 | FsError::GeneralFailure => io::Error::other("GeneralFailure"), 123 | FsError::Exists => io::Error::new(io::ErrorKind::AlreadyExists, "Exists"), 124 | FsError::NotFound => io::Error::new(io::ErrorKind::NotFound, "Notfound"), 125 | FsError::Forbidden => io::Error::new(io::ErrorKind::PermissionDenied, "Forbidden"), 126 | FsError::InsufficientStorage => io::Error::other("InsufficientStorage"), 127 | FsError::LoopDetected => io::Error::other("LoopDetected"), 128 | FsError::PathTooLong => io::Error::other("PathTooLong"), 129 | FsError::TooLarge => io::Error::other("TooLarge"), 130 | FsError::IsRemote => io::Error::other("IsRemote"), 131 | } 132 | } 133 | 134 | fn ioerror_to_status(ioerror: &io::Error) -> StatusCode { 135 | match ioerror.kind() { 136 | ErrorKind::NotFound => StatusCode::NOT_FOUND, 137 | ErrorKind::PermissionDenied => StatusCode::FORBIDDEN, 138 | ErrorKind::AlreadyExists => StatusCode::CONFLICT, 139 | ErrorKind::TimedOut => StatusCode::GATEWAY_TIMEOUT, 140 | _ => StatusCode::BAD_GATEWAY, 141 | } 142 | } 143 | 144 | fn fserror_to_status(e: &FsError) -> StatusCode { 145 | match e { 146 | FsError::NotImplemented => StatusCode::NOT_IMPLEMENTED, 147 | FsError::GeneralFailure => StatusCode::INTERNAL_SERVER_ERROR, 148 | FsError::Exists => StatusCode::METHOD_NOT_ALLOWED, 149 | FsError::NotFound => StatusCode::NOT_FOUND, 150 | FsError::Forbidden => StatusCode::FORBIDDEN, 151 | FsError::InsufficientStorage => StatusCode::INSUFFICIENT_STORAGE, 152 | FsError::LoopDetected => StatusCode::LOOP_DETECTED, 153 | FsError::PathTooLong => StatusCode::URI_TOO_LONG, 154 | FsError::TooLarge => StatusCode::PAYLOAD_TOO_LARGE, 155 | FsError::IsRemote => StatusCode::BAD_GATEWAY, 156 | } 157 | } 158 | 159 | impl DavError { 160 | pub(crate) fn statuscode(&self) -> StatusCode { 161 | match *self { 162 | DavError::XmlReadError => StatusCode::BAD_REQUEST, 163 | DavError::XmlParseError => StatusCode::BAD_REQUEST, 164 | DavError::InvalidPath => StatusCode::BAD_REQUEST, 165 | DavError::IllegalPath => StatusCode::BAD_GATEWAY, 166 | DavError::ForbiddenPath => StatusCode::FORBIDDEN, 167 | DavError::UnknownDavMethod => StatusCode::NOT_IMPLEMENTED, 168 | DavError::ChanError => StatusCode::INTERNAL_SERVER_ERROR, 169 | DavError::Utf8Error => StatusCode::UNSUPPORTED_MEDIA_TYPE, 170 | DavError::IoError(ref e) => ioerror_to_status(e), 171 | DavError::FsError(ref e) => fserror_to_status(e), 172 | DavError::Status(e) => e, 173 | DavError::StatusClose(e) => e, 174 | DavError::XmlReaderError(ref _e) => StatusCode::BAD_REQUEST, 175 | DavError::XmlWriterError(ref _e) => StatusCode::INTERNAL_SERVER_ERROR, 176 | } 177 | } 178 | 179 | pub(crate) fn must_close(&self) -> bool { 180 | !matches!( 181 | self, 182 | &DavError::Status(_) 183 | | &DavError::FsError(FsError::NotFound) 184 | | &DavError::FsError(FsError::Forbidden) 185 | | &DavError::FsError(FsError::Exists) 186 | ) 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /src/warp.rs: -------------------------------------------------------------------------------- 1 | //! Adapter for the `warp` HTTP server framework. 2 | //! 3 | //! The filters in this module will always succeed and never 4 | //! return an error. For example, if a file is not found, the 5 | //! filter will return a 404 reply, and not an internal 6 | //! rejection. 7 | //! 8 | use std::convert::Infallible; 9 | #[cfg(any(docsrs, feature = "localfs"))] 10 | use std::path::Path; 11 | 12 | use warp::{ 13 | Filter, Reply, 14 | filters::BoxedFilter, 15 | http::{HeaderMap, Method}, 16 | }; 17 | 18 | use crate::{DavHandler, body::Body}; 19 | #[cfg(any(docsrs, feature = "localfs"))] 20 | use crate::{fakels::FakeLs, localfs::LocalFs}; 21 | 22 | /// Reply-filter that runs a DavHandler. 23 | /// 24 | /// Just pass in a pre-configured DavHandler. If a prefix was not 25 | /// configured, it will be the request path up to this point. 26 | pub fn dav_handler(handler: DavHandler) -> BoxedFilter<(impl Reply,)> { 27 | use http::uri::Uri; 28 | use warp::path::{FullPath, Tail}; 29 | 30 | warp::method() 31 | .and(warp::path::full()) 32 | .and(warp::path::tail()) 33 | .and(warp::header::headers_cloned()) 34 | .and(warp::body::stream()) 35 | .and_then( 36 | move |method: Method, 37 | path_full: FullPath, 38 | path_tail: Tail, 39 | headers: HeaderMap, 40 | body| { 41 | let handler = handler.clone(); 42 | 43 | async move { 44 | // rebuild an http::Request struct. 45 | let path_str = path_full.as_str(); 46 | let uri = path_str.parse::().unwrap(); 47 | let mut builder = http::Request::builder().method(method.as_ref()).uri(uri); 48 | for (k, v) in headers.iter() { 49 | builder = builder.header(k.as_str(), v.as_ref()); 50 | } 51 | let request = builder.body(body).unwrap(); 52 | 53 | let response = if handler.config.prefix.is_some() { 54 | // Run a handler with the configured path prefix. 55 | handler.handle_stream(request).await 56 | } else { 57 | // Run a handler with the current path prefix. 58 | let path_len = path_str.len(); 59 | let tail_len = path_tail.as_str().len(); 60 | let prefix = path_str[..path_len - tail_len].to_string(); 61 | let config = DavHandler::builder().strip_prefix(prefix); 62 | handler.handle_stream_with(config, request).await 63 | }; 64 | 65 | // Need to remap the http_body::Body to a hyper::Body. 66 | let response = warp_response(response).unwrap(); 67 | Ok::<_, Infallible>(response) 68 | } 69 | }, 70 | ) 71 | .boxed() 72 | } 73 | 74 | /// Creates a Filter that serves files and directories at the 75 | /// base path joined with the remainder of the request path, 76 | /// like `warp::filters::fs::dir`. 77 | /// 78 | /// The behaviour for serving a directory depends on the flags: 79 | /// 80 | /// - `index_html`: if an `index.html` file is found, serve it. 81 | /// - `auto_index_over_get`: Create a directory index page when accessing over HTTP `GET` (but NOT 82 | /// affecting WebDAV `PROPFIND` method currently). In the current implementation, this only 83 | /// affects HTTP `GET` method (commonly used for listing the directories when accessing through a 84 | /// `http://` or `https://` URL for a directory in a browser), but NOT WebDAV listing of a 85 | /// directory (HTTP `PROPFIND`). BEWARE: The name and behaviour of this parameter variable may 86 | /// change, and later it may control WebDAV `PROPFIND`, too (but not as of now). 87 | /// 88 | /// In release mode, if `auto_index_over_get` is `true`, then this executes as described above 89 | /// (currently affecting only HTTP `GET`), but beware of this current behaviour. 90 | /// 91 | /// In debug mode, if `auto_index_over_get` is `false`, this _panics_. That is so that it alerts 92 | /// the developers to this current limitation, so they don't accidentally expect 93 | /// `auto_index_over_get` to control WebDAV. 94 | /// - no flags set: 404. 95 | #[cfg(any(docsrs, feature = "localfs"))] 96 | pub fn dav_dir( 97 | base: impl AsRef, 98 | index_html: bool, 99 | auto_index_over_get: bool, 100 | ) -> BoxedFilter<(impl Reply,)> { 101 | debug_assert!( 102 | auto_index_over_get, 103 | "See documentation of dav_server::warp::dav_dir(...)." 104 | ); 105 | let mut builder = DavHandler::builder() 106 | .filesystem(LocalFs::new(base, false, false, false)) 107 | .locksystem(FakeLs::new()) 108 | .autoindex(auto_index_over_get); 109 | if index_html { 110 | builder = builder.indexfile("index.html".to_string()) 111 | } 112 | let handler = builder.build_handler(); 113 | dav_handler(handler) 114 | } 115 | 116 | /// Creates a Filter that serves a single file, ignoring the request path, 117 | /// like `warp::filters::fs::file`. 118 | #[cfg(any(docsrs, feature = "localfs"))] 119 | pub fn dav_file(file: impl AsRef) -> BoxedFilter<(impl Reply,)> { 120 | let handler = DavHandler::builder() 121 | .filesystem(LocalFs::new_file(file, false)) 122 | .locksystem(FakeLs::new()) 123 | .build_handler(); 124 | dav_handler(handler) 125 | } 126 | 127 | /// Adapts the response to the `warp` versions of `hyper` and `http` while `warp` remains on old versions. 128 | /// https://github.com/seanmonstar/warp/issues/1088 129 | fn warp_response( 130 | response: http::Response, 131 | ) -> Result, warp::http::Error> { 132 | let (parts, body) = response.into_parts(); 133 | // Leave response extensions empty. 134 | let mut response = warp::http::Response::builder() 135 | .version(warp_http_version(parts.version)) 136 | .status(parts.status.as_u16()); 137 | // Ignore headers without the name. 138 | let headers = parts.headers.into_iter().filter_map(|(k, v)| Some((k?, v))); 139 | for (k, v) in headers { 140 | response = response.header(k.as_str(), v.as_ref()); 141 | } 142 | response.body(warp::hyper::Body::wrap_stream(body)) 143 | } 144 | 145 | /// Adapts HTTP version to the `warp` version of `http` crate while `warp` remains on old version. 146 | /// https://github.com/seanmonstar/warp/issues/1088 147 | fn warp_http_version(v: http::Version) -> warp::http::Version { 148 | match v { 149 | http::Version::HTTP_3 => warp::http::Version::HTTP_3, 150 | http::Version::HTTP_2 => warp::http::Version::HTTP_2, 151 | http::Version::HTTP_11 => warp::http::Version::HTTP_11, 152 | http::Version::HTTP_10 => warp::http::Version::HTTP_10, 153 | http::Version::HTTP_09 => warp::http::Version::HTTP_09, 154 | v => unreachable!("unexpected HTTP version {:?}", v), 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use std::io::{Cursor, Write}; 2 | use std::time::SystemTime; 3 | 4 | use bytes::Bytes; 5 | use chrono::{DateTime, SecondsFormat, Utc}; 6 | use headers::Header; 7 | use http::method::InvalidMethod; 8 | 9 | use crate::DavResult; 10 | use crate::body::Body; 11 | use crate::errors::DavError; 12 | 13 | /// HTTP Methods supported by DavHandler. 14 | #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] 15 | #[repr(u32)] 16 | pub enum DavMethod { 17 | Head = 0x0001, 18 | Get = 0x0002, 19 | Put = 0x0004, 20 | Patch = 0x0008, 21 | Options = 0x0010, 22 | PropFind = 0x0020, 23 | PropPatch = 0x0040, 24 | MkCol = 0x0080, 25 | Copy = 0x0100, 26 | Move = 0x0200, 27 | Delete = 0x0400, 28 | Lock = 0x0800, 29 | Unlock = 0x1000, 30 | Report = 0x2000, 31 | MkCalendar = 0x4000, 32 | } 33 | 34 | // translate method into our own enum that has webdav methods as well. 35 | pub(crate) fn dav_method(m: &http::Method) -> DavResult { 36 | let m = match *m { 37 | http::Method::HEAD => DavMethod::Head, 38 | http::Method::GET => DavMethod::Get, 39 | http::Method::PUT => DavMethod::Put, 40 | http::Method::PATCH => DavMethod::Patch, 41 | http::Method::DELETE => DavMethod::Delete, 42 | http::Method::OPTIONS => DavMethod::Options, 43 | _ => match m.as_str() { 44 | "PROPFIND" => DavMethod::PropFind, 45 | "PROPPATCH" => DavMethod::PropPatch, 46 | "MKCOL" => DavMethod::MkCol, 47 | "COPY" => DavMethod::Copy, 48 | "MOVE" => DavMethod::Move, 49 | "LOCK" => DavMethod::Lock, 50 | "UNLOCK" => DavMethod::Unlock, 51 | "REPORT" => DavMethod::Report, 52 | "MKCALENDAR" => DavMethod::MkCalendar, 53 | _ => { 54 | return Err(DavError::UnknownDavMethod); 55 | } 56 | }, 57 | }; 58 | Ok(m) 59 | } 60 | 61 | // for external use. 62 | impl std::convert::TryFrom<&http::Method> for DavMethod { 63 | type Error = InvalidMethod; 64 | 65 | fn try_from(value: &http::Method) -> Result { 66 | dav_method(value).map_err(|_| { 67 | // A trick to get at the value of http::method::InvalidMethod. 68 | http::method::Method::from_bytes(b"").unwrap_err() 69 | }) 70 | } 71 | } 72 | 73 | /// A set of allowed [`DavMethod`]s. 74 | /// 75 | /// [`DavMethod`]: enum.DavMethod.html 76 | #[derive(Clone, Copy, Debug)] 77 | pub struct DavMethodSet(u32); 78 | 79 | impl DavMethodSet { 80 | pub const HTTP_RO: DavMethodSet = 81 | DavMethodSet(DavMethod::Get as u32 | DavMethod::Head as u32 | DavMethod::Options as u32); 82 | pub const HTTP_RW: DavMethodSet = DavMethodSet(Self::HTTP_RO.0 | DavMethod::Put as u32); 83 | pub const WEBDAV_RO: DavMethodSet = DavMethodSet(Self::HTTP_RO.0 | DavMethod::PropFind as u32); 84 | pub const WEBDAV_RW: DavMethodSet = DavMethodSet(0xffffffff); 85 | 86 | /// New set, all methods allowed. 87 | pub fn all() -> DavMethodSet { 88 | DavMethodSet(0xffffffff) 89 | } 90 | 91 | /// New empty set. 92 | pub fn none() -> DavMethodSet { 93 | DavMethodSet(0) 94 | } 95 | 96 | /// Add a method. 97 | pub fn add(&mut self, m: DavMethod) -> &Self { 98 | self.0 |= m as u32; 99 | self 100 | } 101 | 102 | /// Remove a method. 103 | pub fn remove(&mut self, m: DavMethod) -> &Self { 104 | self.0 &= !(m as u32); 105 | self 106 | } 107 | 108 | /// Check if a method is in the set. 109 | pub fn contains(&self, m: DavMethod) -> bool { 110 | self.0 & (m as u32) > 0 111 | } 112 | 113 | /// Generate an DavMethodSet from a list of words. 114 | pub fn from_vec(v: Vec>) -> Result { 115 | let mut m: u32 = 0; 116 | for w in &v { 117 | m |= match w.as_ref().to_lowercase().as_str() { 118 | "head" => DavMethod::Head as u32, 119 | "get" => DavMethod::Get as u32, 120 | "put" => DavMethod::Put as u32, 121 | "patch" => DavMethod::Patch as u32, 122 | "delete" => DavMethod::Delete as u32, 123 | "options" => DavMethod::Options as u32, 124 | "propfind" => DavMethod::PropFind as u32, 125 | "proppatch" => DavMethod::PropPatch as u32, 126 | "mkcol" => DavMethod::MkCol as u32, 127 | "copy" => DavMethod::Copy as u32, 128 | "move" => DavMethod::Move as u32, 129 | "lock" => DavMethod::Lock as u32, 130 | "unlock" => DavMethod::Unlock as u32, 131 | "report" => DavMethod::Report as u32, 132 | "mkcalendar" => DavMethod::MkCalendar as u32, 133 | "http-ro" => Self::HTTP_RO.0, 134 | "http-rw" => Self::HTTP_RW.0, 135 | "webdav-ro" => Self::WEBDAV_RO.0, 136 | "webdav-rw" => Self::WEBDAV_RW.0, 137 | _ => { 138 | // A trick to get at the value of http::method::InvalidMethod. 139 | let invalid_method = http::method::Method::from_bytes(b"").unwrap_err(); 140 | return Err(invalid_method); 141 | } 142 | }; 143 | } 144 | Ok(DavMethodSet(m)) 145 | } 146 | } 147 | 148 | pub(crate) fn dav_xml_error(body: &str) -> Body { 149 | let xml = format!( 150 | "{}\n{}\n{}\n{}\n", 151 | r#""#, 152 | r#""#, 153 | body, 154 | r#""# 155 | ); 156 | Body::from(xml) 157 | } 158 | 159 | pub(crate) fn systemtime_to_httpdate(t: SystemTime) -> String { 160 | let d = headers::Date::from(t); 161 | let mut v = Vec::new(); 162 | d.encode(&mut v); 163 | v[0].to_str().unwrap().to_owned() 164 | } 165 | 166 | pub(crate) fn systemtime_to_rfc3339_without_nanosecond(t: SystemTime) -> String { 167 | // 1996-12-19T16:39:57Z 168 | DateTime::::from(t).to_rfc3339_opts(SecondsFormat::Secs, true) 169 | } 170 | 171 | // A buffer that implements "Write". 172 | #[derive(Clone)] 173 | pub(crate) struct MemBuffer(Cursor>); 174 | 175 | impl MemBuffer { 176 | pub fn new() -> MemBuffer { 177 | MemBuffer(Cursor::new(Vec::new())) 178 | } 179 | 180 | pub fn take(&mut self) -> Bytes { 181 | let buf = std::mem::take(self.0.get_mut()); 182 | self.0.set_position(0); 183 | Bytes::from(buf) 184 | } 185 | } 186 | 187 | impl Write for MemBuffer { 188 | fn write(&mut self, buf: &[u8]) -> std::io::Result { 189 | self.0.write(buf) 190 | } 191 | 192 | fn flush(&mut self) -> std::io::Result<()> { 193 | Ok(()) 194 | } 195 | } 196 | 197 | #[cfg(test)] 198 | mod tests { 199 | use super::*; 200 | use std::time::UNIX_EPOCH; 201 | 202 | #[test] 203 | fn test_rfc3339_no_nanosecond() { 204 | let t = UNIX_EPOCH + std::time::Duration::new(1, 5); 205 | assert!(systemtime_to_rfc3339_without_nanosecond(t) == "1970-01-01T00:00:01Z"); 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /src/tree.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Borrow; 2 | use std::collections::HashMap; 3 | use std::fmt::Debug; 4 | use std::hash::Hash; 5 | 6 | use crate::FsError; 7 | use crate::FsResult; 8 | 9 | #[derive(Debug)] 10 | /// A tree contains a bunch of nodes. 11 | pub struct Tree { 12 | nodes: HashMap>, 13 | node_id: u64, 14 | } 15 | 16 | /// id of the root node of the tree. 17 | pub const ROOT_ID: u64 = 1; 18 | 19 | #[derive(Debug)] 20 | /// Node itself. "data" contains user-modifiable data. 21 | pub struct Node { 22 | pub data: D, 23 | #[allow(dead_code)] 24 | id: u64, 25 | parent_id: u64, 26 | children: HashMap, 27 | } 28 | 29 | #[derive(Debug)] 30 | // Iterator over the children of a node. 31 | pub struct Children(std::vec::IntoIter<(K, u64)>); 32 | 33 | impl Tree { 34 | /// Get new tree and initialize the root with 'data'. 35 | pub fn new(data: D) -> Tree { 36 | let mut t = Tree { 37 | nodes: HashMap::new(), 38 | node_id: ROOT_ID, 39 | }; 40 | t.new_node(99999999, data); 41 | t 42 | } 43 | 44 | fn new_node(&mut self, parent: u64, data: D) -> u64 { 45 | let id = self.node_id; 46 | self.node_id += 1; 47 | let node = Node { 48 | id, 49 | parent_id: parent, 50 | data, 51 | children: HashMap::new(), 52 | }; 53 | self.nodes.insert(id, node); 54 | id 55 | } 56 | 57 | /// add a child node to an existing node. 58 | pub fn add_child(&mut self, parent: u64, key: K, data: D, overwrite: bool) -> FsResult { 59 | { 60 | let pnode = self.nodes.get(&parent).ok_or(FsError::NotFound)?; 61 | if !overwrite && pnode.children.contains_key(&key) { 62 | return Err(FsError::Exists); 63 | } 64 | } 65 | let id = self.new_node(parent, data); 66 | let pnode = self.nodes.get_mut(&parent).unwrap(); 67 | 68 | pnode.children.insert(key, id); 69 | Ok(id) 70 | } 71 | 72 | /* 73 | * unused ... 74 | pub fn remove_child(&mut self, parent: u64, key: &K) -> FsResult<()> { 75 | let id = { 76 | let pnode = self.nodes.get(&parent).ok_or(FsError::NotFound)?; 77 | let id = *pnode.children.get(key).ok_or(FsError::NotFound)?; 78 | let node = self.nodes.get(&id).unwrap(); 79 | if node.children.len() > 0 { 80 | return Err(FsError::Forbidden); 81 | } 82 | id 83 | }; 84 | { 85 | let pnode = self.nodes.get_mut(&parent).unwrap(); 86 | pnode.children.remove(key); 87 | } 88 | self.nodes.remove(&id); 89 | Ok(()) 90 | }*/ 91 | 92 | /// Get a child node by key K. 93 | pub fn get_child(&self, parent: u64, key: &Q) -> FsResult 94 | where 95 | K: Borrow, 96 | Q: Hash + Eq + ?Sized, 97 | { 98 | let pnode = self.nodes.get(&parent).ok_or(FsError::NotFound)?; 99 | let id = pnode.children.get(key).ok_or(FsError::NotFound)?; 100 | Ok(*id) 101 | } 102 | 103 | /// Get all children of this node. Returns an iterator over . 104 | pub fn get_children(&self, parent: u64) -> FsResult> { 105 | let pnode = self.nodes.get(&parent).ok_or(FsError::NotFound)?; 106 | let mut v = Vec::new(); 107 | for (k, i) in &pnode.children { 108 | v.push(((*k).clone(), *i)); 109 | } 110 | Ok(Children(v.into_iter())) 111 | } 112 | 113 | /// Get reference to a node. 114 | pub fn get_node(&self, id: u64) -> FsResult<&D> { 115 | let n = self.nodes.get(&id).ok_or(FsError::NotFound)?; 116 | Ok(&n.data) 117 | } 118 | 119 | /// Get mutable reference to a node. 120 | pub fn get_node_mut(&mut self, id: u64) -> FsResult<&mut D> { 121 | let n = self.nodes.get_mut(&id).ok_or(FsError::NotFound)?; 122 | Ok(&mut n.data) 123 | } 124 | 125 | fn delete_node_from_parent(&mut self, id: u64) -> FsResult<()> { 126 | let parent_id = self.nodes.get(&id).ok_or(FsError::NotFound)?.parent_id; 127 | let key = { 128 | let pnode = self.nodes.get(&parent_id).unwrap(); 129 | let mut key = None; 130 | for (k, i) in &pnode.children { 131 | if i == &id { 132 | key = Some((*k).clone()); 133 | break; 134 | } 135 | } 136 | key 137 | }; 138 | let key = key.unwrap(); 139 | let pnode = self.nodes.get_mut(&parent_id).unwrap(); 140 | pnode.children.remove(&key); 141 | Ok(()) 142 | } 143 | 144 | /// Delete a node. Fails if node has children. Returns node itself. 145 | pub fn delete_node(&mut self, id: u64) -> FsResult> { 146 | { 147 | let n = self.nodes.get(&id).ok_or(FsError::NotFound)?; 148 | if !n.children.is_empty() { 149 | return Err(FsError::Forbidden); 150 | } 151 | } 152 | self.delete_node_from_parent(id)?; 153 | Ok(self.nodes.remove(&id).unwrap()) 154 | } 155 | 156 | /// Delete a subtree. 157 | pub fn delete_subtree(&mut self, id: u64) -> FsResult<()> { 158 | let children = { 159 | let n = self.nodes.get(&id).ok_or(FsError::NotFound)?; 160 | n.children.iter().map(|(_, &v)| v).collect::>() 161 | }; 162 | for c in children.into_iter() { 163 | self.delete_subtree(c)?; 164 | } 165 | self.delete_node_from_parent(id) 166 | } 167 | 168 | /// Move a node to a new position and new name in the tree. 169 | /// If "overwrite" is true, will replace an existing 170 | /// node, but only if it doesn't have any children. 171 | #[cfg(feature = "memfs")] 172 | pub fn move_node( 173 | &mut self, 174 | id: u64, 175 | new_parent: u64, 176 | new_name: K, 177 | overwrite: bool, 178 | ) -> FsResult<()> { 179 | let dest = { 180 | let pnode = self.nodes.get(&new_parent).ok_or(FsError::NotFound)?; 181 | if let Some(cid) = pnode.children.get(&new_name) { 182 | let cnode = self.nodes.get(cid).unwrap(); 183 | if !overwrite || !cnode.children.is_empty() { 184 | return Err(FsError::Exists); 185 | } 186 | Some(*cid) 187 | } else { 188 | None 189 | } 190 | }; 191 | self.delete_node_from_parent(id)?; 192 | self.nodes.get_mut(&id).unwrap().parent_id = new_parent; 193 | if let Some(dest) = dest { 194 | self.nodes.remove(&dest); 195 | } 196 | let pnode = self.nodes.get_mut(&new_parent).unwrap(); 197 | pnode.children.insert(new_name, id); 198 | Ok(()) 199 | } 200 | } 201 | 202 | impl Iterator for Children { 203 | type Item = (K, u64); 204 | fn next(&mut self) -> Option { 205 | self.0.next() 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /src/handle_delete.rs: -------------------------------------------------------------------------------- 1 | use futures_util::{FutureExt, StreamExt, future::BoxFuture}; 2 | use headers::HeaderMapExt; 3 | use http::{Request, Response, StatusCode}; 4 | 5 | use crate::async_stream::AsyncStream; 6 | use crate::body::Body; 7 | use crate::conditional::if_match_get_tokens; 8 | use crate::davheaders::Depth; 9 | use crate::davpath::DavPath; 10 | use crate::errors::*; 11 | use crate::fs::*; 12 | use crate::multierror::{MultiError, multi_error}; 13 | use crate::{DavInner, DavResult}; 14 | 15 | // map_err helper. 16 | async fn add_status<'a>(m_err: &'a mut MultiError, path: &'a DavPath, e: FsError) -> DavError { 17 | let status = DavError::FsError(e).statuscode(); 18 | if let Err(x) = m_err.add_status(path, status).await { 19 | return x.into(); 20 | } 21 | DavError::Status(status) 22 | } 23 | 24 | // map_err helper for directories, the result statuscode 25 | // mappings are not 100% the same. 26 | async fn dir_status<'a>(res: &'a mut MultiError, path: &'a DavPath, e: FsError) -> DavError { 27 | let status = match e { 28 | FsError::Exists => StatusCode::CONFLICT, 29 | e => DavError::FsError(e).statuscode(), 30 | }; 31 | if let Err(x) = res.add_status(path, status).await { 32 | return x.into(); 33 | } 34 | DavError::Status(status) 35 | } 36 | 37 | impl DavInner { 38 | pub(crate) fn delete_items<'a>( 39 | &'a self, 40 | res: &'a mut MultiError, 41 | depth: Depth, 42 | meta: Box, 43 | path: &'a DavPath, 44 | ) -> BoxFuture<'a, DavResult<()>> { 45 | async move { 46 | if !meta.is_dir() { 47 | trace!("delete_items (file) {path} {depth:?}"); 48 | return match self.fs.remove_file(path, &self.credentials).await { 49 | Ok(x) => Ok(x), 50 | Err(e) => Err(add_status(res, path, e).await), 51 | }; 52 | } 53 | if depth == Depth::Zero { 54 | trace!("delete_items (dir) {path} {depth:?}"); 55 | return match self.fs.remove_dir(path, &self.credentials).await { 56 | Ok(x) => Ok(x), 57 | Err(e) => Err(add_status(res, path, e).await), 58 | }; 59 | } 60 | 61 | // walk over all entries. 62 | let mut entries = match self 63 | .fs 64 | .read_dir(path, ReadDirMeta::DataSymlink, &self.credentials) 65 | .await 66 | { 67 | Ok(x) => Ok(x), 68 | Err(e) => Err(add_status(res, path, e).await), 69 | }?; 70 | 71 | let mut result = Ok(()); 72 | while let Some(dirent) = entries.next().await { 73 | let dirent = match dirent { 74 | Ok(dirent) => dirent, 75 | Err(e) => { 76 | result = Err(add_status(res, path, e).await); 77 | continue; 78 | } 79 | }; 80 | 81 | // if metadata() fails, skip to next entry. 82 | // NOTE: dirent.metadata == symlink_metadata (!) 83 | let meta = match dirent.metadata().await { 84 | Ok(m) => m, 85 | Err(e) => { 86 | result = Err(add_status(res, path, e).await); 87 | continue; 88 | } 89 | }; 90 | 91 | let mut npath = path.clone(); 92 | npath.push_segment(&dirent.name()); 93 | npath.add_slash_if(meta.is_dir()); 94 | 95 | // do the actual work. If this fails with a non-fs related error, 96 | // return immediately. 97 | if let Err(e) = self.delete_items(res, depth, meta, &npath).await { 98 | match e { 99 | DavError::Status(_) => { 100 | result = Err(e); 101 | continue; 102 | } 103 | _ => return Err(e), 104 | } 105 | } 106 | } 107 | 108 | // if we got any error, return with the error, 109 | // and do not try to remove the directory. 110 | result?; 111 | 112 | match self.fs.remove_dir(path, &self.credentials).await { 113 | Ok(x) => Ok(x), 114 | Err(e) => Err(dir_status(res, path, e).await), 115 | } 116 | } 117 | .boxed() 118 | } 119 | 120 | pub(crate) async fn handle_delete(self, req: &Request<()>) -> DavResult> { 121 | // RFC4918 9.6.1 DELETE for Collections. 122 | // Note that allowing Depth: 0 is NOT RFC compliant. 123 | let depth = match req.headers().typed_get::() { 124 | Some(Depth::Infinity) | None => Depth::Infinity, 125 | Some(Depth::Zero) => Depth::Zero, 126 | _ => return Err(DavError::Status(StatusCode::BAD_REQUEST)), 127 | }; 128 | 129 | let mut path = self.path(req); 130 | let meta = self.fs.symlink_metadata(&path, &self.credentials).await?; 131 | if meta.is_symlink() 132 | && let Ok(m2) = self.fs.metadata(&path, &self.credentials).await 133 | { 134 | path.add_slash_if(m2.is_dir()); 135 | } 136 | path.add_slash_if(meta.is_dir()); 137 | 138 | // check the If and If-* headers. 139 | let tokens_res = if_match_get_tokens( 140 | req, 141 | Some(meta.as_ref()), 142 | self.fs.as_ref(), 143 | &self.ls, 144 | &path, 145 | &self.credentials, 146 | ) 147 | .await; 148 | let tokens = match tokens_res { 149 | Ok(t) => t, 150 | Err(s) => return Err(DavError::Status(s)), 151 | }; 152 | 153 | // check locks. since we cancel the entire operation if there is 154 | // a conflicting lock, we do not return a 207 multistatus, but 155 | // just a simple status. 156 | if let Some(ref locksystem) = self.ls { 157 | let t = tokens.iter().map(|s| s.as_str()).collect::>(); 158 | let principal = self.principal.as_deref(); 159 | if let Err(_l) = locksystem.check(&path, principal, false, true, t).await { 160 | return Err(DavError::Status(StatusCode::LOCKED)); 161 | } 162 | } 163 | 164 | let req_path = path.clone(); 165 | 166 | let items = AsyncStream::new(|tx| { 167 | async move { 168 | // turn the Sink into something easier to pass around. 169 | let mut multierror = MultiError::new(tx); 170 | 171 | // now delete the path recursively. 172 | let fut = self.delete_items(&mut multierror, depth, meta, &path); 173 | if let Ok(()) = fut.await { 174 | // Done. Now delete the path in the locksystem as well. 175 | // Should really do this per resource, in case the delete partially fails. See TODO.pm 176 | if let Some(ref locksystem) = self.ls { 177 | locksystem.delete(&path).await.ok(); 178 | } 179 | let _ = multierror.add_status(&path, StatusCode::NO_CONTENT).await; 180 | } 181 | Ok(()) 182 | } 183 | }); 184 | 185 | multi_error(req_path, items).await 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /README.CalDAV.md: -------------------------------------------------------------------------------- 1 | # CalDAV Support in dav-server 2 | 3 | This document describes the CalDAV (Calendaring Extensions to WebDAV) support in the dav-server library. 4 | 5 | ## Overview 6 | 7 | CalDAV is an extension of WebDAV that provides a standard way to access and manage calendar data over HTTP. It's defined in [RFC 4791](https://tools.ietf.org/html/rfc4791) and allows calendar clients to: 8 | 9 | - Create and manage calendar collections 10 | - Store and retrieve calendar events, tasks, and journals 11 | - Query calendars with complex filters 12 | - Synchronize calendar data between clients and servers 13 | 14 | ## Features 15 | 16 | The CalDAV implementation in dav-server includes: 17 | 18 | - **Calendar Collections**: Special WebDAV collections that contain calendar data 19 | - **MKCALENDAR Method**: Create new calendar collections 20 | - **REPORT Method**: Query calendar data with filters 21 | - **CalDAV Properties**: Calendar-specific WebDAV properties 22 | - **iCalendar Support**: Parse and validate iCalendar data 23 | - **Time Range Queries**: Filter events by date/time ranges 24 | - **Component Filtering**: Filter by calendar component types (VEVENT, VTODO, etc.) 25 | 26 | ## Enabling CalDAV 27 | 28 | CalDAV support is available as an optional cargo feature: 29 | 30 | ```toml 31 | [dependencies] 32 | dav-server = { version = "0.8", features = ["caldav"] } 33 | ``` 34 | 35 | This adds the following dependencies: 36 | - `icalendar`: For parsing and validating iCalendar data 37 | - `chrono`: For date/time handling 38 | 39 | ## Quick Start 40 | 41 | Here's a basic CalDAV server setup: 42 | 43 | ```rust 44 | use dav_server::{DavHandler, fakels::FakeLs, localfs::LocalFs}; 45 | 46 | let server = DavHandler::builder() 47 | .filesystem(LocalFs::new("/calendars", false, false, false)) 48 | .locksystem(FakeLs::new()) 49 | .build_handler(); 50 | ``` 51 | 52 | ## CalDAV Methods 53 | 54 | ### MKCALENDAR 55 | 56 | Creates a new calendar collection: 57 | 58 | ```bash 59 | curl -X MKCALENDAR http://localhost:8080/my-calendar/ 60 | ``` 61 | 62 | With properties: 63 | 64 | ```bash 65 | curl -X MKCALENDAR http://localhost:8080/my-calendar/ \ 66 | -H "Content-Type: application/xml" \ 67 | --data ' 68 | 69 | 70 | 71 | My Calendar 72 | Personal calendar 73 | 74 | 75 | ' 76 | ``` 77 | 78 | ### REPORT 79 | 80 | Query calendar data: 81 | 82 | #### Calendar Query 83 | 84 | ```bash 85 | curl -X REPORT http://localhost:8080/my-calendar/ \ 86 | -H "Content-Type: application/xml" \ 87 | -H "Depth: 1" \ 88 | --data ' 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | ' 101 | ``` 102 | 103 | #### Calendar Multiget 104 | 105 | ```bash 106 | curl -X REPORT http://localhost:8080/my-calendar/ \ 107 | -H "Content-Type: application/xml" \ 108 | --data ' 109 | 110 | 111 | 112 | 113 | /my-calendar/event1.ics 114 | /my-calendar/event2.ics 115 | ' 116 | ``` 117 | 118 | ## CalDAV Properties 119 | 120 | The implementation supports standard CalDAV properties: 121 | 122 | ### Collection Properties 123 | 124 | - `calendar-description`: Human-readable description 125 | - `calendar-timezone`: Default timezone for the calendar 126 | - `supported-calendar-component-set`: Supported component types (VEVENT, VTODO, etc.) 127 | - `supported-calendar-data`: Supported calendar data formats 128 | - `max-resource-size`: Maximum size for calendar resources 129 | 130 | ### Principal Properties 131 | 132 | - `calendar-home-set`: URL of the user's calendar home collection 133 | - `calendar-user-address-set`: Calendar user's addresses 134 | - `schedule-inbox-URL`: URL for scheduling messages 135 | - `schedule-outbox-URL`: URL for outgoing scheduling 136 | 137 | ## Working with Calendar Data 138 | 139 | ### Adding Events 140 | 141 | Store iCalendar data using PUT: 142 | 143 | ```bash 144 | curl -X PUT http://localhost:8080/my-calendar/event.ics \ 145 | -H "Content-Type: text/calendar" \ 146 | --data 'BEGIN:VCALENDAR 147 | VERSION:2.0 148 | PRODID:-//Example Corp//CalDAV Client//EN 149 | BEGIN:VEVENT 150 | UID:12345@example.com 151 | DTSTART:20240101T120000Z 152 | DTEND:20240101T130000Z 153 | SUMMARY:New Year Meeting 154 | DESCRIPTION:Planning meeting for the new year 155 | END:VEVENT 156 | END:VCALENDAR' 157 | ``` 158 | 159 | ### Retrieving Events 160 | 161 | Use GET to retrieve individual calendar resources: 162 | 163 | ```bash 164 | curl http://localhost:8080/my-calendar/event.ics 165 | ``` 166 | 167 | ## Client Compatibility 168 | 169 | The CalDAV implementation has been tested with: 170 | 171 | - **Thunderbird**: Full support for calendar sync 172 | - **Apple Calendar**: Compatible with basic operations 173 | - **CalDAV-Sync (Android)**: Works with standard CalDAV features 174 | - **Evolution**: Support for calendar collections and events 175 | 176 | ## Limitations 177 | 178 | Current limitations include: 179 | 180 | - No scheduling support (iTIP/iMIP) 181 | - Limited calendar-user-principal support 182 | - No calendar sharing or ACL support 183 | - Basic time zone handling 184 | - No recurring event expansion in queries 185 | 186 | ## Example Applications 187 | These calendar server examples lacks authentication and does not support user-specific access. 188 | For a production environment, you should implement the GuardedFileSystem for better security and user management. 189 | 190 | ### Calendar Server 191 | 192 | ```rust 193 | use dav_server::{DavHandler, fakels::FakeLs, localfs::LocalFs}; 194 | use std::net::SocketAddr; 195 | 196 | #[tokio::main] 197 | async fn main() { 198 | let server = DavHandler::builder() 199 | .filesystem(LocalFs::new("/calendars", false, false, false)) 200 | .locksystem(FakeLs::new()) 201 | // Use .strip_prefix if you want to start the handler with a prefix path like "/calendars". 202 | // None will start with root ("/") 203 | // .strip_prefix("/calendars") 204 | .build_handler(); 205 | 206 | // Serve on port 8080 207 | // Calendars accessible at http://localhost:8080/calendars/ 208 | } 209 | ``` 210 | 211 | ### Multi-tenant Calendar Service 212 | 213 | ```rust 214 | use dav_server::{DavHandler, memfs::MemFs, memls::MemLs}; 215 | 216 | // Use in-memory filesystem for demonstration 217 | let server = DavHandler::builder() 218 | .filesystem(MemFs::new()) 219 | .locksystem(MemLs::new()) 220 | .principal("/principals/user1/") 221 | .build_handler(); 222 | ``` 223 | 224 | ## Testing 225 | 226 | Run CalDAV tests with: 227 | 228 | ```bash 229 | cargo test --features caldav caldav_tests 230 | ``` 231 | 232 | Run the CalDAV example: 233 | 234 | ```bash 235 | cargo run --example caldav --features caldav 236 | ``` 237 | 238 | ## Standards Compliance 239 | 240 | This implementation follows: 241 | 242 | - [RFC 4791](https://tools.ietf.org/html/rfc4791) - Calendaring Extensions to WebDAV (CalDAV) 243 | - [RFC 5545](https://tools.ietf.org/html/rfc5545) - Internet Calendaring and Scheduling Core Object Specification (iCalendar) 244 | - [RFC 4918](https://tools.ietf.org/html/rfc4918) - HTTP Extensions for Web Distributed Authoring and Versioning (WebDAV) 245 | 246 | ## Contributing 247 | 248 | Contributions to improve CalDAV support are welcome. Areas for enhancement include: 249 | 250 | - Scheduling support (iTIP) 251 | - Additional client compatibility testing -------------------------------------------------------------------------------- /src/localfs_windows.rs: -------------------------------------------------------------------------------- 1 | // Optimizations for windows and the windows webdav mini-redirector. 2 | // 3 | // The main thing here is case-insensitive path lookups, 4 | // and caching that. 5 | // 6 | use std::ffi::{OsStr, OsString}; 7 | use std::fs; 8 | use std::io::ErrorKind; 9 | use std::num::NonZeroUsize; 10 | use std::path::{Path, PathBuf}; 11 | use std::sync::LazyLock; 12 | use std::thread; 13 | use std::time::{Duration, SystemTime, UNIX_EPOCH}; 14 | 15 | use lru::LruCache; 16 | use parking_lot::Mutex; 17 | 18 | use crate::davpath::DavPath; 19 | 20 | const CACHE_ENTRIES: usize = 4096; 21 | const CACHE_MAX_AGE: u64 = 15 * 60; 22 | const CACHE_SLEEP_MS: u64 = 30059; 23 | 24 | static CACHE: LazyLock = LazyLock::new(|| Cache::new(CACHE_ENTRIES)); 25 | 26 | // Do a case-insensitive path lookup. 27 | pub(crate) fn resolve(base: impl Into, path: &DavPath) -> PathBuf { 28 | let base = base.into(); 29 | let path = path.as_rel_ospath(); 30 | 31 | // must be rooted, and valid UTF-8. 32 | let mut fullpath = base.clone(); 33 | fullpath.push(path); 34 | if !fullpath.has_root() || fullpath.to_str().is_none() { 35 | return fullpath; 36 | } 37 | 38 | // must have a parent. 39 | let parent = match fullpath.parent() { 40 | Some(p) => p, 41 | None => return fullpath, 42 | }; 43 | 44 | // deref in advance: first LazyLock, then Arc. 45 | let cache = &*CACHE; 46 | 47 | // In the cache? 48 | if let Some((path, _)) = cache.get(&fullpath) { 49 | return path; 50 | } 51 | 52 | // if the file exists, fine. 53 | if fullpath.metadata().is_ok() { 54 | return fullpath; 55 | } 56 | 57 | // we need the path as a list of segments. 58 | let segs = path.iter().collect::>(); 59 | if segs.is_empty() { 60 | return fullpath; 61 | } 62 | 63 | // if the parent exists, do a lookup there straight away 64 | // instead of starting from the root. 65 | let (parent, parent_exists) = if segs.len() > 1 { 66 | match cache.get(parent) { 67 | Some((path, _)) => (path, true), 68 | None => { 69 | let exists = parent.exists(); 70 | if exists { 71 | cache.insert(parent); 72 | } 73 | (parent.to_path_buf(), exists) 74 | } 75 | } 76 | } else { 77 | (parent.to_path_buf(), true) 78 | }; 79 | if parent_exists { 80 | let (newpath, stop) = lookup(parent, segs[segs.len() - 1], true); 81 | if !stop { 82 | cache.insert(&newpath); 83 | } 84 | return newpath; 85 | } 86 | 87 | // start from the root, then add segments one by one. 88 | let mut stop = false; 89 | let mut newpath = base; 90 | let lastseg = segs.len() - 1; 91 | for (idx, seg) in segs.into_iter().enumerate() { 92 | if !stop { 93 | if idx == lastseg { 94 | // Save the path leading up to this file or dir. 95 | cache.insert(&newpath); 96 | } 97 | let (n, s) = lookup(newpath, seg, false); 98 | newpath = n; 99 | stop = s; 100 | } else { 101 | newpath.push(seg); 102 | } 103 | } 104 | if !stop { 105 | // resolved succesfully. save in cache. 106 | cache.insert(&newpath); 107 | } 108 | newpath 109 | } 110 | 111 | // lookup a filename in a directory in a case insensitive way. 112 | fn lookup(mut path: PathBuf, seg: &OsStr, no_init_check: bool) -> (PathBuf, bool) { 113 | // does it exist as-is? 114 | let mut path2 = path.clone(); 115 | path2.push(seg); 116 | if !no_init_check { 117 | match path2.metadata() { 118 | Ok(_) => return (path2, false), 119 | Err(ref e) if e.kind() != ErrorKind::NotFound => { 120 | // stop on errors other than "NotFound". 121 | return (path2, true); 122 | } 123 | Err(_) => {} 124 | } 125 | } 126 | 127 | // first, lowercase filename. 128 | let filename = match seg.to_str() { 129 | Some(s) => s.to_lowercase(), 130 | None => return (path2, true), 131 | }; 132 | 133 | // we have to read the entire directory. 134 | let dir = match path.read_dir() { 135 | Ok(dir) => dir, 136 | Err(_) => return (path2, true), 137 | }; 138 | for entry in dir.into_iter() { 139 | let entry = match entry { 140 | Ok(e) => e, 141 | Err(_) => continue, 142 | }; 143 | let entry_name = entry.file_name(); 144 | let name = match entry_name.to_str() { 145 | Some(n) => n, 146 | None => continue, 147 | }; 148 | if name.to_lowercase() == filename { 149 | path.push(name); 150 | return (path, false); 151 | } 152 | } 153 | (path2, true) 154 | } 155 | 156 | // The cache stores a mapping of lowercased path -> actual path. 157 | pub struct Cache { 158 | cache: Mutex>, 159 | } 160 | 161 | #[derive(Clone)] 162 | struct Entry { 163 | // Full case-sensitive pathname. 164 | path: PathBuf, 165 | // Unix timestamp. 166 | time: u64, 167 | } 168 | 169 | // helper 170 | fn pathbuf_to_lowercase(path: PathBuf) -> PathBuf { 171 | let s = match OsString::from(path).into_string() { 172 | Ok(s) => OsString::from(s.to_lowercase()), 173 | Err(s) => s, 174 | }; 175 | PathBuf::from(s) 176 | } 177 | 178 | impl Cache { 179 | pub fn new(size: usize) -> Cache { 180 | thread::spawn(move || { 181 | // House keeping. Every 30 seconds, remove entries older than 182 | // CACHE_MAX_AGE seconds from the LRU cache. 183 | loop { 184 | thread::sleep(Duration::from_millis(CACHE_SLEEP_MS)); 185 | if let Ok(d) = SystemTime::now().duration_since(UNIX_EPOCH) { 186 | let now = d.as_secs(); 187 | let mut cache = CACHE.cache.lock(); 188 | while let Some((_k, e)) = cache.peek_lru() { 189 | trace!(target: "webdav_cache", "Cache: purge check: {_k:?}"); 190 | if e.time + CACHE_MAX_AGE > now { 191 | break; 192 | } 193 | let _age = now - e.time; 194 | if let Some((_k, _)) = cache.pop_lru() { 195 | trace!(target: "webdav_cache", "Cache: purging {_k:?} (age {_age})"); 196 | } else { 197 | break; 198 | } 199 | } 200 | drop(cache); 201 | } 202 | } 203 | }); 204 | Cache { 205 | cache: Mutex::new(LruCache::new(NonZeroUsize::new(size).unwrap())), 206 | } 207 | } 208 | 209 | // Insert an entry into the cache. 210 | pub fn insert(&self, path: &Path) { 211 | let lc_path = pathbuf_to_lowercase(PathBuf::from(path)); 212 | if let Ok(d) = SystemTime::now().duration_since(UNIX_EPOCH) { 213 | let e = Entry { 214 | path: PathBuf::from(path), 215 | time: d.as_secs(), 216 | }; 217 | let mut cache = self.cache.lock(); 218 | cache.put(lc_path, e); 219 | } 220 | } 221 | 222 | // Get an entry from the cache, and validate it. If it's valid 223 | // return the actual pathname and metadata. If it's invalid remove 224 | // it from the cache and return None. 225 | pub fn get(&self, path: &Path) -> Option<(PathBuf, fs::Metadata)> { 226 | // First lowercase the entire path. 227 | let lc_path = pathbuf_to_lowercase(PathBuf::from(path)); 228 | // Lookup. 229 | let e = { 230 | let mut cache = self.cache.lock(); 231 | cache.get(&lc_path)?.clone() 232 | }; 233 | // Found, validate. 234 | match fs::metadata(&e.path) { 235 | Err(_) => { 236 | let mut cache = self.cache.lock(); 237 | cache.pop(&lc_path); 238 | None 239 | } 240 | Ok(m) => Some((e.path, m)), 241 | } 242 | } 243 | } 244 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # dav-server-rs 2 | 3 | [![Apache-2.0 licensed](https://img.shields.io/badge/license-Apache2.0-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0.txt) 4 | [![Crates.io](https://img.shields.io/crates/v/dav-server.svg)](https://crates.io/crates/dav-server) 5 | [![docs.rs](https://docs.rs/dav-server/badge.svg)](https://docs.rs/dav-server) 6 | 7 | A fork of the [webdav-handler-rs](https://github.com/miquels/webdav-handler-rs) project. 8 | 9 | ### Generic async HTTP/Webdav handler 10 | 11 | [`Webdav`] (RFC4918) is defined as 12 | HTTP (GET/HEAD/PUT/DELETE) plus a bunch of extension methods (PROPFIND, etc). 13 | These extension methods are used to manage collections (like unix directories), 14 | get information on collections (like unix `ls` or `readdir`), rename and 15 | copy items, lock/unlock items, etc. 16 | 17 | A `handler` is a piece of code that takes a `http::Request`, processes it in some 18 | way, and then generates a `http::Response`. This library is a `handler` that maps 19 | the HTTP/Webdav protocol to the filesystem. Or actually, "a" filesystem. Included 20 | is an adapter for the local filesystem (`localfs`), and an adapter for an 21 | in-memory filesystem (`memfs`). 22 | 23 | So this library can be used as a handler with HTTP servers like [hyper], 24 | [warp], [actix-web], etc. Either as a correct and complete HTTP handler for 25 | files (GET/HEAD) or as a handler for the entire Webdav protocol. In the latter case, you can 26 | mount it as a remote filesystem: Linux, Windows, macOS can all mount Webdav filesystems. 27 | 28 | ### Backend interfaces. 29 | 30 | The backend interfaces are similar to the ones from the Go `x/net/webdav package`: 31 | 32 | - the library contains a [HTTP handler][DavHandler]. 33 | - you supply a [filesystem][DavFileSystem] for backend storage, which can optionally 34 | implement reading/writing [DAV properties][DavProp]. If the file system requires 35 | authorization, implement a [special trait][GuardedFileSystem]. 36 | - you can supply a [locksystem][DavLockSystem] that handles webdav locks. 37 | 38 | The handler in this library works with the standard http types 39 | from the `http` and `http_body` crates. That means that you can use it 40 | straight away with http libraries / frameworks that also work with 41 | those types, like hyper. Compatibility modules for [actix-web][actix-compat] 42 | and [warp][warp-compat] are also provided. 43 | 44 | ### Implemented standards. 45 | 46 | Currently [passes the "basic", "copymove", "props", "locks" and "http" 47 | checks][README_litmus] of the Webdav Litmus Test testsuite. That's all of the base 48 | [RFC4918] webdav specification. 49 | 50 | The litmus test suite also has tests for RFC3744 "acl" and "principal", 51 | RFC5842 "bind", and RFC3253 "versioning". Those we do not support right now. 52 | 53 | The relevant parts of the HTTP RFCs are also implemented, such as the 54 | preconditions (If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since, 55 | If-Range), partial transfers (Range). 56 | 57 | Also implemented is `partial PUT`, for which there are currently two 58 | non-standard ways to do it: [`PUT` with the `Content-Range` header][PUT], 59 | which is what Apache's `mod_dav` implements, and [`PATCH` with the `X-Update-Range` 60 | header][PATCH] from `SabreDav`. 61 | 62 | ### Backends. 63 | 64 | Included are two filesystems: 65 | 66 | - [`LocalFs`]: serves a directory on the local filesystem 67 | - [`MemFs`]: ephemeral in-memory filesystem. supports DAV properties. 68 | 69 | You're able to implement custom filesystem adapter: 70 | 71 | - [`DavFileSystem`][DavFileSystem]: without authorization. 72 | - [`GuardedFileSystem`][GuardedFileSystem]: when access control is required. 73 | 74 | Also included are two locksystems: 75 | 76 | - [`MemLs`]: ephemeral in-memory locksystem. 77 | - [`FakeLs`]: fake locksystem. just enough LOCK/UNLOCK support for macOS/Windows. 78 | 79 | External filesystems: 80 | 81 | - [`OpendalFs`](https://github.com/apache/opendal/tree/main/integrations/dav-server): serves different storage services via [opendal](https://github.com/apache/opendal) 82 | 83 | ### Example. 84 | 85 | Example server using [hyper] that serves the /tmp directory in r/w mode. You should be 86 | able to mount this network share from Linux, macOS and Windows. [Examples][examples] 87 | for other frameworks are also available. 88 | 89 | ```rust 90 | use std::{convert::Infallible, net::SocketAddr}; 91 | use hyper::{server::conn::http1, service::service_fn}; 92 | use hyper_util::rt::TokioIo; 93 | use tokio::net::TcpListener; 94 | use dav_server::{fakels::FakeLs, localfs::LocalFs, DavHandler}; 95 | 96 | #[tokio::main] 97 | async fn main() { 98 | let dir = "/tmp"; 99 | let addr: SocketAddr = ([127, 0, 0, 1], 4918).into(); 100 | 101 | let dav_server = DavHandler::builder() 102 | .filesystem(LocalFs::new(dir, false, false, false)) 103 | .locksystem(FakeLs::new()) 104 | .build_handler(); 105 | 106 | let listener = TcpListener::bind(addr).await.unwrap(); 107 | 108 | println!("Listening {addr}"); 109 | 110 | // We start a loop to continuously accept incoming connections 111 | loop { 112 | let (stream, _) = listener.accept().await.unwrap(); 113 | let dav_server = dav_server.clone(); 114 | 115 | // Use an adapter to access something implementing `tokio::io` traits as if they implement 116 | // `hyper::rt` IO traits. 117 | let io = TokioIo::new(stream); 118 | 119 | // Spawn a tokio task to serve multiple connections concurrently 120 | tokio::task::spawn(async move { 121 | // Finally, we bind the incoming connection to our `hello` service 122 | if let Err(err) = http1::Builder::new() 123 | // `service_fn` converts our function in a `Service` 124 | .serve_connection( 125 | io, 126 | service_fn({ 127 | move |req| { 128 | let dav_server = dav_server.clone(); 129 | async move { Ok::<_, Infallible>(dav_server.handle(req).await) } 130 | } 131 | }), 132 | ) 133 | .await 134 | { 135 | eprintln!("Failed serving: {err:?}"); 136 | } 137 | }); 138 | } 139 | } 140 | ``` 141 | 142 | [DavHandler]: https://docs.rs/dav-server/latest/dav_server/struct.DavHandler.html 143 | [DavFileSystem]: https://docs.rs/dav-server/latest/dav_server/fs/index.html 144 | [GuardedFileSystem]: https://docs.rs/dav-server/latest/dav_server/fs/trait.GuardedFileSystem.html 145 | [DavLockSystem]: https://docs.rs/dav-server/latest/dav_server/ls/index.html 146 | [DavProp]: https://docs.rs/dav-server/latest/dav_server/fs/struct.DavProp.html 147 | [`WebDav`]: https://tools.ietf.org/html/rfc4918 148 | [RFC4918]: https://tools.ietf.org/html/rfc4918 149 | [`MemLs`]: https://docs.rs/dav-server/latest/dav_server/memls/index.html 150 | [`MemFs`]: https://docs.rs/dav-server/latest/dav_server/memfs/index.html 151 | [`LocalFs`]: https://docs.rs/dav-server/latest/dav_server/localfs/index.html 152 | [`FakeLs`]: https://docs.rs/dav-server/latest/dav_server/fakels/index.html 153 | [actix-compat]: https://docs.rs/dav-server/latest/dav_server/actix/index.html 154 | [warp-compat]: https://docs.rs/dav-server/latest/dav_server/warp/index.html 155 | [README_litmus]: https://github.com/messense/dav-server-rs/blob/main/README.litmus-test.md 156 | [examples]: https://github.com/messense/dav-server-rs/tree/main/examples/ 157 | [PUT]: https://github.com/messense/dav-server-rs/tree/main/doc/Apache-PUT-with-Content-Range.md 158 | [PATCH]: https://github.com/messense/dav-server-rs/tree/main/doc/SABREDAV-partialupdate.md 159 | [hyper]: https://hyper.rs/ 160 | [warp]: https://crates.io/crates/warp 161 | [actix-web]: https://actix.rs/ 162 | 163 | ### Building. 164 | 165 | This crate uses std::future::Future and async/await, so it only works with Rust 1.39 and up. 166 | 167 | ### Testing. 168 | 169 | ``` 170 | RUST_LOG=dav_server=debug cargo run --example sample-litmus-server 171 | ``` 172 | 173 | This will start a server on port 4918, serving an in-memory filesystem. 174 | For other options, run `cargo run --example sample-litmus-server -- --help` 175 | 176 | ### Copyright and License. 177 | 178 | * © 2018, 2019, 2020 XS4ALL Internet bv 179 | * © 2018, 2019, 2020 Miquel van Smoorenburg 180 | * © 2021 - 2023 Messense Lv 181 | * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) 182 | -------------------------------------------------------------------------------- /src/caldav.rs: -------------------------------------------------------------------------------- 1 | //! CalDAV (Calendaring Extensions to WebDAV) support 2 | //! 3 | //! This module provides CalDAV functionality on top of the base WebDAV implementation. 4 | //! CalDAV is defined in RFC 4791 and provides standardized access to calendar data 5 | //! using the iCalendar format. 6 | 7 | #[cfg(feature = "caldav")] 8 | use icalendar::Calendar; 9 | use xmltree::Element; 10 | 11 | // CalDAV XML namespaces 12 | pub const NS_CALDAV_URI: &str = "urn:ietf:params:xml:ns:caldav"; 13 | pub const NS_CALENDARSERVER_URI: &str = "http://calendarserver.org/ns/"; 14 | 15 | // CalDAV property names 16 | pub const CALDAV_PROPERTIES: &[&str] = &[ 17 | "C:calendar-description", 18 | "C:calendar-timezone", 19 | "C:supported-calendar-component-set", 20 | "C:supported-calendar-data", 21 | "C:max-resource-size", 22 | "C:min-date-time", 23 | "C:max-date-time", 24 | "C:max-instances", 25 | "C:max-attendees-per-instance", 26 | "C:calendar-home-set", 27 | "C:calendar-user-address-set", 28 | "C:schedule-inbox-URL", 29 | "C:schedule-outbox-URL", 30 | ]; 31 | 32 | /// CalDAV resource types 33 | #[derive(Debug, Clone, PartialEq)] 34 | pub enum CalDavResourceType { 35 | Calendar, 36 | ScheduleInbox, 37 | ScheduleOutbox, 38 | CalendarObject, 39 | Regular, 40 | } 41 | 42 | /// CalDAV component types supported in a calendar collection 43 | #[derive(Debug, Clone, PartialEq)] 44 | pub enum CalendarComponentType { 45 | VEvent, 46 | VTodo, 47 | VJournal, 48 | VFreeBusy, 49 | VTimezone, 50 | VAlarm, 51 | } 52 | 53 | impl CalendarComponentType { 54 | pub fn as_str(&self) -> &'static str { 55 | match self { 56 | CalendarComponentType::VEvent => "VEVENT", 57 | CalendarComponentType::VTodo => "VTODO", 58 | CalendarComponentType::VJournal => "VJOURNAL", 59 | CalendarComponentType::VFreeBusy => "VFREEBUSY", 60 | CalendarComponentType::VTimezone => "VTIMEZONE", 61 | CalendarComponentType::VAlarm => "VALARM", 62 | } 63 | } 64 | } 65 | 66 | /// CalDAV calendar collection properties 67 | #[derive(Debug, Clone)] 68 | pub struct CalendarProperties { 69 | pub description: Option, 70 | pub timezone: Option, 71 | pub supported_components: Vec, 72 | pub max_resource_size: Option, 73 | pub color: Option, 74 | pub display_name: Option, 75 | } 76 | 77 | impl Default for CalendarProperties { 78 | fn default() -> Self { 79 | Self { 80 | description: None, 81 | timezone: None, 82 | supported_components: vec![ 83 | CalendarComponentType::VEvent, 84 | CalendarComponentType::VTodo, 85 | CalendarComponentType::VJournal, 86 | CalendarComponentType::VFreeBusy, 87 | ], 88 | max_resource_size: Some(1024 * 1024), // 1MB default 89 | color: None, 90 | display_name: None, 91 | } 92 | } 93 | } 94 | 95 | /// Calendar query filters for REPORT requests 96 | #[derive(Debug, Clone)] 97 | pub struct CalendarQuery { 98 | pub comp_filter: Option, 99 | pub time_range: Option, 100 | pub properties: Vec, 101 | } 102 | 103 | #[derive(Debug, Clone)] 104 | pub struct ComponentFilter { 105 | pub name: String, 106 | pub is_not_defined: bool, 107 | pub time_range: Option, 108 | pub prop_filters: Vec, 109 | pub comp_filters: Vec, 110 | } 111 | 112 | #[derive(Debug, Clone)] 113 | pub struct PropertyFilter { 114 | pub name: String, 115 | pub is_not_defined: bool, 116 | pub text_match: Option, 117 | pub time_range: Option, 118 | pub param_filters: Vec, 119 | } 120 | 121 | #[derive(Debug, Clone)] 122 | pub struct ParameterFilter { 123 | pub name: String, 124 | pub is_not_defined: bool, 125 | pub text_match: Option, 126 | } 127 | 128 | #[derive(Debug, Clone)] 129 | pub struct TextMatch { 130 | pub text: String, 131 | pub collation: Option, 132 | pub negate_condition: bool, 133 | } 134 | 135 | #[derive(Debug, Clone)] 136 | pub struct TimeRange { 137 | pub start: Option, // ISO 8601 format 138 | pub end: Option, // ISO 8601 format 139 | } 140 | 141 | /// CalDAV REPORT request types 142 | #[derive(Debug, Clone)] 143 | pub enum CalDavReportType { 144 | CalendarQuery(CalendarQuery), 145 | CalendarMultiget { hrefs: Vec }, 146 | FreeBusyQuery { time_range: TimeRange }, 147 | } 148 | 149 | /// Helper functions for CalDAV XML generation 150 | pub fn create_supported_calendar_component_set(components: &[CalendarComponentType]) -> Element { 151 | let mut elem = Element::new("supported-calendar-component-set"); 152 | elem.namespace = Some(NS_CALDAV_URI.to_string()); 153 | 154 | for comp in components { 155 | let mut comp_elem = Element::new("comp"); 156 | comp_elem.namespace = Some(NS_CALDAV_URI.to_string()); 157 | comp_elem 158 | .attributes 159 | .insert("name".to_string(), comp.as_str().to_string()); 160 | elem.children.push(xmltree::XMLNode::Element(comp_elem)); 161 | } 162 | 163 | elem 164 | } 165 | 166 | pub fn create_supported_calendar_data() -> Element { 167 | let mut elem = Element::new("supported-calendar-data"); 168 | elem.namespace = Some(NS_CALDAV_URI.to_string()); 169 | 170 | let mut calendar_data = Element::new("calendar-data"); 171 | calendar_data.namespace = Some(NS_CALDAV_URI.to_string()); 172 | calendar_data 173 | .attributes 174 | .insert("content-type".to_string(), "text/calendar".to_string()); 175 | calendar_data 176 | .attributes 177 | .insert("version".to_string(), "2.0".to_string()); 178 | 179 | elem.children.push(xmltree::XMLNode::Element(calendar_data)); 180 | elem 181 | } 182 | 183 | pub fn create_calendar_home_set(path: &str) -> Element { 184 | let mut elem = Element::new("calendar-home-set"); 185 | elem.namespace = Some(NS_CALDAV_URI.to_string()); 186 | 187 | let mut href = Element::new("href"); 188 | href.namespace = Some("DAV:".to_string()); 189 | href.children.push(xmltree::XMLNode::Text(path.to_string())); 190 | 191 | elem.children.push(xmltree::XMLNode::Element(href)); 192 | elem 193 | } 194 | 195 | /// Check if a resource is a calendar collection based on resource type 196 | pub fn is_calendar_collection(resource_type: &[Element]) -> bool { 197 | resource_type 198 | .iter() 199 | .any(|elem| elem.name == "calendar" && elem.namespace.as_deref() == Some(NS_CALDAV_URI)) 200 | } 201 | 202 | /// Check if content appears to be iCalendar data 203 | pub fn is_calendar_data(content: &[u8]) -> bool { 204 | content.starts_with(b"BEGIN:VCALENDAR") 205 | && (content.ends_with(b"END:VCALENDAR") || content.ends_with(b"END:VCALENDAR\n")) 206 | } 207 | 208 | #[cfg(feature = "caldav")] 209 | /// Validate iCalendar data using the icalendar crate 210 | pub fn validate_calendar_data(content: &str) -> Result { 211 | content 212 | .parse::() 213 | .map_err(|e| format!("Invalid iCalendar data: {}", e)) 214 | } 215 | 216 | #[cfg(not(feature = "caldav"))] 217 | /// Stub implementation when caldav feature is disabled 218 | pub fn validate_calendar_data(_content: &str) -> Result<(), String> { 219 | Err("CalDAV feature not enabled".to_string()) 220 | } 221 | 222 | /// Extract the UID from calendar data 223 | pub fn extract_calendar_uid(content: &str) -> Option { 224 | for line in content.lines() { 225 | let line = line.trim(); 226 | if let Some(uid) = line.strip_prefix("UID:") { 227 | return Some(uid.to_string()); 228 | } 229 | } 230 | None 231 | } 232 | 233 | /// Generate a simple calendar collection resource type XML 234 | pub fn calendar_resource_type() -> Vec { 235 | let mut collection = Element::new("collection"); 236 | collection.namespace = Some("DAV:".to_string()); 237 | 238 | let mut calendar = Element::new("calendar"); 239 | calendar.namespace = Some(NS_CALDAV_URI.to_string()); 240 | 241 | vec![collection, calendar] 242 | } 243 | 244 | /// Generate schedule inbox resource type XML 245 | pub fn schedule_inbox_resource_type() -> Vec { 246 | let mut collection = Element::new("collection"); 247 | collection.namespace = Some("DAV:".to_string()); 248 | 249 | let mut schedule_inbox = Element::new("schedule-inbox"); 250 | schedule_inbox.namespace = Some(NS_CALDAV_URI.to_string()); 251 | 252 | vec![collection, schedule_inbox] 253 | } 254 | 255 | /// Generate schedule outbox resource type XML 256 | pub fn schedule_outbox_resource_type() -> Vec { 257 | let mut collection = Element::new("collection"); 258 | collection.namespace = Some("DAV:".to_string()); 259 | 260 | let mut schedule_outbox = Element::new("schedule-outbox"); 261 | schedule_outbox.namespace = Some(NS_CALDAV_URI.to_string()); 262 | 263 | vec![collection, schedule_outbox] 264 | } 265 | -------------------------------------------------------------------------------- /src/conditional.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, SystemTime, UNIX_EPOCH}; 2 | 3 | use headers::HeaderMapExt; 4 | use http::{Method, StatusCode}; 5 | 6 | use crate::davheaders::{self, ETag}; 7 | use crate::davpath::DavPath; 8 | use crate::fs::{DavMetaData, GuardedFileSystem}; 9 | use crate::ls::DavLockSystem; 10 | 11 | type Request = http::Request<()>; 12 | 13 | // SystemTime has nanosecond precision. Round it down to the 14 | // nearest second, because an HttpDate has second precision. 15 | fn round_time(tm: impl Into) -> SystemTime { 16 | let tm = tm.into(); 17 | match tm.duration_since(UNIX_EPOCH) { 18 | Ok(d) => UNIX_EPOCH + Duration::from_secs(d.as_secs()), 19 | Err(_) => tm, 20 | } 21 | } 22 | 23 | pub(crate) fn ifrange_match( 24 | hdr: &davheaders::IfRange, 25 | tag: Option<&davheaders::ETag>, 26 | date: Option, 27 | ) -> bool { 28 | match *hdr { 29 | davheaders::IfRange::Date(ref d) => match date { 30 | Some(date) => round_time(date) == round_time(*d), 31 | None => false, 32 | }, 33 | davheaders::IfRange::ETag(ref t) => match tag { 34 | Some(tag) => t == tag, 35 | None => false, 36 | }, 37 | } 38 | } 39 | 40 | pub(crate) fn etaglist_match( 41 | tags: &davheaders::ETagList, 42 | exists: bool, 43 | tag: Option<&davheaders::ETag>, 44 | ) -> bool { 45 | match *tags { 46 | davheaders::ETagList::Star => exists, 47 | davheaders::ETagList::Tags(ref t) => match tag { 48 | Some(tag) => t.iter().any(|x| x == tag), 49 | None => false, 50 | }, 51 | } 52 | } 53 | 54 | // Handle the if-headers: RFC 7232, HTTP/1.1 Conditional Requests. 55 | pub(crate) fn http_if_match(req: &Request, meta: Option<&dyn DavMetaData>) -> Option { 56 | let file_modified = meta.and_then(|m| m.modified().ok()); 57 | 58 | if let Some(r) = req.headers().typed_get::() { 59 | let etag = meta.and_then(ETag::from_meta); 60 | if !etaglist_match(&r.0, meta.is_some(), etag.as_ref()) { 61 | trace!("precondition fail: If-Match {r:?}"); 62 | return Some(StatusCode::PRECONDITION_FAILED); 63 | } 64 | } else if let Some(r) = req.headers().typed_get::() { 65 | match file_modified { 66 | None => return Some(StatusCode::PRECONDITION_FAILED), 67 | Some(file_modified) => { 68 | if round_time(file_modified) > round_time(r) { 69 | trace!("precondition fail: If-Unmodified-Since {r:?}"); 70 | return Some(StatusCode::PRECONDITION_FAILED); 71 | } 72 | } 73 | } 74 | } 75 | 76 | if let Some(r) = req.headers().typed_get::() { 77 | let etag = meta.and_then(ETag::from_meta); 78 | if etaglist_match(&r.0, meta.is_some(), etag.as_ref()) { 79 | trace!("precondition fail: If-None-Match {r:?}"); 80 | if req.method() == Method::GET || req.method() == Method::HEAD { 81 | return Some(StatusCode::NOT_MODIFIED); 82 | } else { 83 | return Some(StatusCode::PRECONDITION_FAILED); 84 | } 85 | } 86 | } else if let Some(r) = req.headers().typed_get::() 87 | && (req.method() == Method::GET || req.method() == Method::HEAD) 88 | && let Some(file_modified) = file_modified 89 | && round_time(file_modified) <= round_time(r) 90 | { 91 | trace!("not-modified If-Modified-Since {r:?}"); 92 | return Some(StatusCode::NOT_MODIFIED); 93 | } 94 | None 95 | } 96 | 97 | // handle the If header: RFC4918, 10.4. If Header 98 | // 99 | // returns true if the header was not present, or if any of the iflists 100 | // evaluated to true. Also returns a Vec of StateTokens that we encountered. 101 | // 102 | // caller should set the http status to 412 PreconditionFailed if 103 | // the return value from this function is false. 104 | // 105 | pub(crate) async fn dav_if_match<'a, C>( 106 | req: &'a Request, 107 | fs: &'a (dyn GuardedFileSystem + 'static), 108 | ls: &'a Option>, 109 | path: &'a DavPath, 110 | credentials: &C, 111 | ) -> (bool, Vec) 112 | where 113 | C: Clone + Send + Sync + 'static, 114 | { 115 | let mut tokens: Vec = Vec::new(); 116 | let mut any_list_ok = false; 117 | 118 | let r = match req.headers().typed_get::() { 119 | Some(r) => r, 120 | None => return (true, tokens), 121 | }; 122 | 123 | for iflist in r.0.iter() { 124 | // save and return all statetokens that we encountered. 125 | let toks = iflist.conditions.iter().filter_map(|c| match c.item { 126 | davheaders::IfItem::StateToken(ref t) => Some(t.to_owned()), 127 | _ => None, 128 | }); 129 | tokens.extend(toks); 130 | 131 | // skip over if a previous list already evaluated to true. 132 | if any_list_ok { 133 | continue; 134 | } 135 | 136 | // find the resource that this list is about. 137 | let mut pa: Option = None; 138 | let (p, valid) = match iflist.resource_tag { 139 | Some(ref url) => { 140 | match DavPath::from_str_and_prefix(url.path(), path.prefix()) { 141 | Ok(p) => { 142 | // anchor davpath in pa. 143 | let p: &DavPath = pa.get_or_insert(p); 144 | (p, true) 145 | } 146 | Err(_) => (path, false), 147 | } 148 | } 149 | None => (path, true), 150 | }; 151 | 152 | // now process the conditions. they must all be true. 153 | let mut list_ok = false; 154 | for cond in iflist.conditions.iter() { 155 | let cond_ok = match cond.item { 156 | davheaders::IfItem::StateToken(ref s) => { 157 | // tokens in DAV: namespace always evaluate to false (10.4.8) 158 | if !valid || s.starts_with("DAV:") { 159 | false 160 | } else { 161 | match *ls { 162 | Some(ref ls) => ls.check(p, None, true, false, vec![s]).await.is_ok(), 163 | None => false, 164 | } 165 | } 166 | } 167 | davheaders::IfItem::ETag(ref tag) => { 168 | if !valid { 169 | // invalid location, so always false. 170 | false 171 | } else { 172 | match fs.metadata(p, credentials).await { 173 | Ok(meta) => { 174 | // exists and may have metadata .. 175 | if let Some(mtag) = ETag::from_meta(meta.as_ref()) { 176 | tag == &mtag 177 | } else { 178 | false 179 | } 180 | } 181 | Err(_) => { 182 | // metadata error, fail. 183 | false 184 | } 185 | } 186 | } 187 | } 188 | }; 189 | if cond_ok == cond.not { 190 | list_ok = false; 191 | break; 192 | } 193 | list_ok = true; 194 | } 195 | if list_ok { 196 | any_list_ok = true; 197 | } 198 | } 199 | if !any_list_ok { 200 | trace!("precondition fail: If {:?}", r.0); 201 | } 202 | (any_list_ok, tokens) 203 | } 204 | 205 | // Handle both the HTTP conditional If: headers, and the webdav If: header. 206 | pub(crate) async fn if_match<'a, C>( 207 | req: &'a Request, 208 | meta: Option<&'a (dyn DavMetaData + 'static)>, 209 | fs: &'a (dyn GuardedFileSystem + 'static), 210 | ls: &'a Option>, 211 | path: &'a DavPath, 212 | credentials: &C, 213 | ) -> Option 214 | where 215 | C: Clone + Send + Sync + 'static, 216 | { 217 | match dav_if_match(req, fs, ls, path, credentials).await { 218 | (true, _) => {} 219 | (false, _) => return Some(StatusCode::PRECONDITION_FAILED), 220 | } 221 | http_if_match(req, meta) 222 | } 223 | 224 | // Like if_match, but also returns all "associated state-tokens" 225 | pub(crate) async fn if_match_get_tokens<'a, C>( 226 | req: &'a Request, 227 | meta: Option<&'a (dyn DavMetaData + 'static)>, 228 | fs: &'a (dyn GuardedFileSystem + 'static), 229 | ls: &'a Option>, 230 | path: &'a DavPath, 231 | credentials: &C, 232 | ) -> Result, StatusCode> 233 | where 234 | C: Clone + Send + Sync + 'static, 235 | { 236 | if let Some(code) = http_if_match(req, meta) { 237 | return Err(code); 238 | } 239 | match dav_if_match(req, fs, ls, path, credentials).await { 240 | (true, v) => Ok(v), 241 | (false, _) => Err(StatusCode::PRECONDITION_FAILED), 242 | } 243 | } 244 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! ## Generic async HTTP/Webdav handler with CalDAV support 2 | //! 3 | //! [`Webdav`] (RFC4918) is defined as 4 | //! HTTP (GET/HEAD/PUT/DELETE) plus a bunch of extension methods (PROPFIND, etc). 5 | //! These extension methods are used to manage collections (like unix directories), 6 | //! get information on collections (like unix `ls` or `readdir`), rename and 7 | //! copy items, lock/unlock items, etc. 8 | //! 9 | //! [`CalDAV`] (RFC4791) extends WebDAV to provide calendar functionality, 10 | //! including calendar collections, calendar resources (iCalendar data), 11 | //! and calendar-specific queries. CalDAV support is available with the 12 | //! `caldav` feature. 13 | //! 14 | //! A `handler` is a piece of code that takes a `http::Request`, processes it in some 15 | //! way, and then generates a `http::Response`. This library is a `handler` that maps 16 | //! the HTTP/Webdav protocol to the filesystem. Or actually, "a" filesystem. Included 17 | //! is an adapter for the local filesystem (`localfs`), and an adapter for an 18 | //! in-memory filesystem (`memfs`). 19 | //! 20 | //! So this library can be used as a handler with HTTP servers like [hyper], 21 | //! [warp], [actix-web], etc. Either as a correct and complete HTTP handler for 22 | //! files (GET/HEAD) or as a handler for the entire Webdav protocol. In the latter case, you can 23 | //! mount it as a remote filesystem: Linux, Windows, macOS can all mount Webdav filesystems. 24 | //! 25 | //! With CalDAV support enabled, it can also serve as a calendar server compatible 26 | //! with CalDAV clients like Thunderbird, Apple Calendar, and other calendar applications. 27 | //! 28 | //! ## Backend interfaces. 29 | //! 30 | //! The backend interfaces are similar to the ones from the Go `x/net/webdav package`: 31 | //! 32 | //! - the library contains a [HTTP handler][DavHandler]. 33 | //! - you supply a [filesystem][DavFileSystem] for backend storage, which can optionally 34 | //! implement reading/writing [DAV properties][DavProp]. If the file system requires 35 | //! authorization, implement a [special trait][GuardedFileSystem]. 36 | //! - you can supply a [locksystem][DavLockSystem] that handles webdav locks. 37 | //! 38 | //! The handler in this library works with the standard http types 39 | //! from the `http` and `http_body` crates. That means that you can use it 40 | //! straight away with http libraries / frameworks that also work with 41 | //! those types, like hyper. Compatibility modules for [actix-web][actix-compat] 42 | //! and [warp][warp-compat] are also provided. 43 | //! 44 | //! ## Implemented standards. 45 | //! 46 | //! Currently [passes the "basic", "copymove", "props", "locks" and "http" 47 | //! checks][README_litmus] of the Webdav Litmus Test testsuite. That's all of the base 48 | //! [RFC4918] webdav specification. 49 | //! 50 | //! CalDAV support implements the core CalDAV specification from [RFC4791], including: 51 | //! - Calendar collections (MKCALENDAR method) 52 | //! - Calendar queries (REPORT method with calendar-query) 53 | //! - Calendar multiget (REPORT method with calendar-multiget) 54 | //! - CalDAV properties (supported-calendar-component-set, etc.) 55 | //! - iCalendar data validation and processing 56 | //! 57 | //! The litmus test suite also has tests for RFC3744 "acl" and "principal", 58 | //! RFC5842 "bind", and RFC3253 "versioning". Those we do not support right now. 59 | //! 60 | //! The relevant parts of the HTTP RFCs are also implemented, such as the 61 | //! preconditions (If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since, 62 | //! If-Range), partial transfers (Range). 63 | //! 64 | //! Also implemented is `partial PUT`, for which there are currently two 65 | //! non-standard ways to do it: [`PUT` with the `Content-Range` header][PUT], 66 | //! which is what Apache's `mod_dav` implements, and [`PATCH` with the `X-Update-Range` 67 | //! header][PATCH] from `SabreDav`. 68 | //! 69 | //! ## Backends. 70 | //! 71 | //! Included are two filesystems: 72 | //! 73 | //! - [`LocalFs`]: serves a directory on the local filesystem 74 | //! - [`MemFs`]: ephemeral in-memory filesystem. supports DAV properties. 75 | //! 76 | //! You're able to implement custom filesystem adapter: 77 | //! 78 | //! - [`DavFileSystem`]: without authorization. 79 | //! - [`GuardedFileSystem`]: when access control is required. 80 | //! 81 | //! Also included are two locksystems: 82 | //! 83 | //! - [`MemLs`]: ephemeral in-memory locksystem. 84 | //! - [`FakeLs`]: fake locksystem. just enough LOCK/UNLOCK support for macOS/Windows. 85 | //! 86 | //! External filesystem adapter implementations: 87 | //! 88 | //! - [`OpendalFs`](https://github.com/apache/opendal/tree/main/integrations/dav-server): 89 | //! connects various storage protocols via [OpenDAL](https://github.com/apache/opendal). 90 | //! 91 | //! ## CalDAV Support 92 | //! 93 | //! CalDAV functionality is available when the `caldav` feature is enabled: 94 | //! 95 | //! ```toml 96 | //! [dependencies] 97 | //! dav-server = { version = "0.9", features = ["caldav"] } 98 | //! ``` 99 | //! 100 | //! This adds support for: 101 | //! - `MKCALENDAR` method for creating calendar collections 102 | //! - `REPORT` method for calendar queries 103 | //! - CalDAV-specific properties and resource types 104 | //! - iCalendar data validation 105 | //! - Calendar-specific WebDAV extensions 106 | //! 107 | //! ## Example. 108 | //! 109 | //! Example server using [hyper] that serves the /tmp directory in r/w mode. You should be 110 | //! able to mount this network share from Linux, macOS and Windows. [Examples][examples] 111 | //! for other frameworks are also available. 112 | //! 113 | //! ```no_run 114 | //! use std::{convert::Infallible, net::SocketAddr}; 115 | //! use hyper::{server::conn::http1, service::service_fn}; 116 | //! use hyper_util::rt::TokioIo; 117 | //! use tokio::net::TcpListener; 118 | //! use dav_server::{fakels::FakeLs, localfs::LocalFs, DavHandler}; 119 | //! 120 | //! #[tokio::main] 121 | //! async fn main() { 122 | //! let dir = "/tmp"; 123 | //! let addr: SocketAddr = ([127, 0, 0, 1], 4918).into(); 124 | //! 125 | //! let dav_server = DavHandler::builder() 126 | //! .filesystem(LocalFs::new(dir, false, false, false)) 127 | //! .locksystem(FakeLs::new()) 128 | //! .build_handler(); 129 | //! 130 | //! let listener = TcpListener::bind(addr).await.unwrap(); 131 | //! 132 | //! println!("Listening {addr}"); 133 | //! 134 | //! // We start a loop to continuously accept incoming connections 135 | //! loop { 136 | //! let (stream, _) = listener.accept().await.unwrap(); 137 | //! let dav_server = dav_server.clone(); 138 | //! 139 | //! // Use an adapter to access something implementing `tokio::io` traits as if they implement 140 | //! // `hyper::rt` IO traits. 141 | //! let io = TokioIo::new(stream); 142 | //! 143 | //! // Spawn a tokio task to serve multiple connections concurrently 144 | //! tokio::task::spawn(async move { 145 | //! // Finally, we bind the incoming connection to our `hello` service 146 | //! if let Err(err) = http1::Builder::new() 147 | //! // `service_fn` converts our function in a `Service` 148 | //! .serve_connection( 149 | //! io, 150 | //! service_fn({ 151 | //! move |req| { 152 | //! let dav_server = dav_server.clone(); 153 | //! async move { Ok::<_, Infallible>(dav_server.handle(req).await) } 154 | //! } 155 | //! }), 156 | //! ) 157 | //! .await 158 | //! { 159 | //! eprintln!("Failed serving: {err:?}"); 160 | //! } 161 | //! }); 162 | //! } 163 | //! } 164 | //! ``` 165 | //! [DavHandler]: struct.DavHandler.html 166 | //! [DavFileSystem]: fs/index.html 167 | //! [DavLockSystem]: ls/index.html 168 | //! [DavProp]: fs/struct.DavProp.html 169 | //! [`WebDav`]: https://tools.ietf.org/html/rfc4918 170 | //! [RFC4918]: https://tools.ietf.org/html/rfc4918 171 | //! [`CalDAV`]: https://tools.ietf.org/html/rfc4791 172 | //! [RFC4791]: https://tools.ietf.org/html/rfc4791 173 | //! [`MemLs`]: memls/index.html 174 | //! [`MemFs`]: memfs/index.html 175 | //! [`LocalFs`]: localfs/index.html 176 | //! [`FakeLs`]: fakels/index.html 177 | //! [actix-compat]: actix/index.html 178 | //! [warp-compat]: warp/index.html 179 | //! [README_litmus]: https://github.com/messense/dav-server-rs/blob/main/README.litmus-test.md 180 | //! [examples]: https://github.com/messense/dav-server-rs/tree/main/examples/ 181 | //! [PUT]: https://github.com/messense/dav-server-rs/tree/main/doc/Apache-PUT-with-Content-Range.md 182 | //! [PATCH]: https://github.com/messense/dav-server-rs/tree/main/doc/SABREDAV-partialupdate.md 183 | //! [hyper]: https://hyper.rs/ 184 | //! [warp]: https://crates.io/crates/warp 185 | //! [actix-web]: https://actix.rs/ 186 | 187 | #![cfg_attr(docsrs, feature(doc_cfg))] 188 | 189 | #[macro_use] 190 | extern crate log; 191 | 192 | mod async_stream; 193 | mod conditional; 194 | mod davhandler; 195 | mod davheaders; 196 | mod errors; 197 | #[cfg(any(docsrs, feature = "caldav"))] 198 | #[cfg_attr(docsrs, doc(cfg(feature = "caldav")))] 199 | mod handle_caldav; 200 | mod handle_copymove; 201 | mod handle_delete; 202 | mod handle_gethead; 203 | mod handle_lock; 204 | mod handle_mkcol; 205 | mod handle_options; 206 | mod handle_props; 207 | mod handle_put; 208 | #[cfg(any(docsrs, feature = "localfs"))] 209 | #[cfg_attr(docsrs, doc(cfg(feature = "localfs")))] 210 | mod localfs_macos; 211 | #[cfg(any(docsrs, feature = "localfs"))] 212 | #[cfg_attr(docsrs, doc(cfg(feature = "localfs")))] 213 | mod localfs_windows; 214 | mod multierror; 215 | mod tree; 216 | mod util; 217 | mod voidfs; 218 | mod xmltree_ext; 219 | 220 | pub mod body; 221 | #[cfg(any(docsrs, feature = "caldav"))] 222 | #[cfg_attr(docsrs, doc(cfg(feature = "caldav")))] 223 | pub mod caldav; 224 | pub mod davpath; 225 | pub mod fakels; 226 | pub mod fs; 227 | #[cfg(any(docsrs, feature = "localfs"))] 228 | #[cfg_attr(docsrs, doc(cfg(feature = "localfs")))] 229 | pub mod localfs; 230 | pub mod ls; 231 | #[cfg(any(docsrs, feature = "memfs"))] 232 | #[cfg_attr(docsrs, doc(cfg(feature = "memfs")))] 233 | pub mod memfs; 234 | pub mod memls; 235 | 236 | #[cfg(any(docsrs, feature = "actix-compat"))] 237 | #[cfg_attr(docsrs, doc(cfg(feature = "actix-compat")))] 238 | pub mod actix; 239 | 240 | #[cfg(any(docsrs, feature = "warp-compat"))] 241 | #[cfg_attr(docsrs, doc(cfg(feature = "warp-compat")))] 242 | pub mod warp; 243 | 244 | pub(crate) use crate::davhandler::DavInner; 245 | pub(crate) use crate::errors::{DavError, DavResult}; 246 | pub(crate) use crate::fs::*; 247 | 248 | pub use crate::davhandler::{DavConfig, DavHandler}; 249 | pub use crate::util::{DavMethod, DavMethodSet}; 250 | -------------------------------------------------------------------------------- /src/localfs_macos.rs: -------------------------------------------------------------------------------- 1 | // Optimizations for macOS and the macOS finder. 2 | // 3 | // - after it reads a directory, macOS likes to do a PROPSTAT of all 4 | // files in the directory with "._" prefixed. so after each PROPSTAT 5 | // with Depth: 1 we keep a cache of "._" files we've seen, so that 6 | // we can easily tell which ones did _not_ exist. 7 | // - deny existence of ".localized" files 8 | // - fake a ".metadata_never_index" in the root 9 | // - fake a ".ql_disablethumbnails" file in the root. 10 | // 11 | use std::ffi::OsString; 12 | use std::num::NonZeroUsize; 13 | #[cfg(unix)] 14 | use std::os::unix::ffi::OsStrExt; 15 | use std::path::{Path, PathBuf}; 16 | use std::sync::LazyLock; 17 | use std::sync::atomic::{AtomicUsize, Ordering}; 18 | use std::thread; 19 | use std::time::{Duration, SystemTime, UNIX_EPOCH}; 20 | 21 | use lru::LruCache; 22 | use parking_lot::Mutex; 23 | 24 | use crate::davpath::DavPath; 25 | use crate::fs::*; 26 | use crate::localfs::LocalFs; 27 | 28 | const DU_CACHE_ENTRIES: usize = 4096; 29 | const DU_CACHE_MAX_AGE: u64 = 60; 30 | const DU_CACHE_SLEEP_MS: u64 = 10037; 31 | 32 | static DU_CACHE: LazyLock = LazyLock::new(|| DUCache::new(DU_CACHE_ENTRIES)); 33 | 34 | static DIR_ID: AtomicUsize = AtomicUsize::new(1); 35 | 36 | // Dot underscore cache entry. 37 | struct Entry { 38 | // Time the entry in the cache was created. 39 | time: SystemTime, 40 | // Modification time of the parent directory. 41 | dir_modtime: SystemTime, 42 | // Unique ID of the parent entry. 43 | dir_id: usize, 44 | } 45 | 46 | // Dot underscore cache. 47 | struct DUCache { 48 | cache: Mutex>, 49 | } 50 | 51 | impl DUCache { 52 | // return a new instance. 53 | fn new(size: usize) -> DUCache { 54 | thread::spawn(move || { 55 | loop { 56 | // House keeping. Every 10 seconds, remove entries older than 57 | // DU_CACHE_MAX_AGE seconds from the LRU cache. 58 | thread::sleep(Duration::from_millis(DU_CACHE_SLEEP_MS)); 59 | { 60 | let mut cache = DU_CACHE.cache.lock(); 61 | let now = SystemTime::now(); 62 | while let Some((_k, e)) = cache.peek_lru() { 63 | if let Ok(age) = now.duration_since(e.time) { 64 | trace!(target: "webdav_cache", "DUCache: purge check {_k:?}"); 65 | if age.as_secs() <= DU_CACHE_MAX_AGE { 66 | break; 67 | } 68 | if let Some((_k, _)) = cache.pop_lru() { 69 | trace!(target: "webdav_cache", "DUCache: purging {:?} (age {})", _k, age.as_secs()); 70 | } else { 71 | break; 72 | } 73 | } else { 74 | break; 75 | } 76 | } 77 | } 78 | } 79 | }); 80 | DUCache { 81 | cache: Mutex::new(LruCache::new(NonZeroUsize::new(size).unwrap())), 82 | } 83 | } 84 | 85 | // Lookup a "._filename" entry in the cache. If we are sure the path 86 | // does _not_ exist, return `true`. 87 | // 88 | // Note that it's assumed the file_name() DOES start with "._". 89 | fn negative(&self, path: &PathBuf) -> bool { 90 | // parent directory must be present in the cache. 91 | let mut dir = match path.parent() { 92 | Some(d) => d.to_path_buf(), 93 | None => return false, 94 | }; 95 | dir.push("."); 96 | let (dir_id, dir_modtime) = { 97 | let cache = self.cache.lock(); 98 | match cache.peek(&dir) { 99 | Some(t) => (t.dir_id, t.dir_modtime), 100 | None => { 101 | trace!(target: "webdav_cache", "DUCache::negative({path:?}): parent not in cache"); 102 | return false; 103 | } 104 | } 105 | }; 106 | 107 | // Get the metadata of the parent to see if it changed. 108 | // This is pretty cheap, since it's most likely in the kernel cache. 109 | let valid = match std::fs::metadata(&dir) { 110 | Ok(m) => m.modified().map(|m| m == dir_modtime).unwrap_or(false), 111 | Err(_) => false, 112 | }; 113 | let mut cache = self.cache.lock(); 114 | if !valid { 115 | trace!(target: "webdav_cache", "DUCache::negative({path:?}): parent in cache but stale"); 116 | cache.pop(&dir); 117 | return false; 118 | } 119 | 120 | // Now if there is _no_ entry in the cache for this file, 121 | // or it is not valid (different timestamp), it did not exist 122 | // the last time we did a readdir(). 123 | match cache.peek(path) { 124 | Some(t) => { 125 | trace!(target: "webdav_cache", "DUCache::negative({:?}): in cache, valid: {}", path, t.dir_id != dir_id); 126 | t.dir_id != dir_id 127 | } 128 | None => { 129 | trace!(target: "webdav_cache", "DUCache::negative({path:?}): not in cache"); 130 | true 131 | } 132 | } 133 | } 134 | } 135 | 136 | // Storage for the entries of one dir while we're collecting them. 137 | #[derive(Default)] 138 | pub(crate) struct DUCacheBuilder { 139 | dir: PathBuf, 140 | entries: Vec, 141 | done: bool, 142 | } 143 | 144 | impl DUCacheBuilder { 145 | // return a new instance. 146 | pub fn start(dir: PathBuf) -> DUCacheBuilder { 147 | DUCacheBuilder { 148 | dir, 149 | entries: Vec::new(), 150 | done: false, 151 | } 152 | } 153 | 154 | // add a filename to the list we have 155 | #[cfg(unix)] 156 | pub fn add(&mut self, filename: OsString) { 157 | if let Some(f) = Path::new(&filename).file_name() 158 | && f.as_bytes().starts_with(b"._") 159 | { 160 | self.entries.push(filename); 161 | } 162 | } 163 | 164 | // add a filename to the list we have 165 | #[cfg(windows)] 166 | pub fn add(&mut self, filename: OsString) { 167 | if let Some(f) = Path::new(&filename).file_name() 168 | && f.to_str().unwrap().as_bytes().starts_with(b"._") 169 | { 170 | self.entries.push(filename); 171 | } 172 | } 173 | 174 | // Process the "._" files we collected. 175 | // 176 | // We add all the "._" files we saw in the directory, and the 177 | // directory itself (with "/." added). 178 | pub fn finish(&mut self) { 179 | if self.done { 180 | return; 181 | } 182 | self.done = true; 183 | 184 | // Get parent directory modification time. 185 | let meta = match std::fs::metadata(&self.dir) { 186 | Ok(m) => m, 187 | Err(_) => return, 188 | }; 189 | let dir_modtime = match meta.modified() { 190 | Ok(t) => t, 191 | Err(_) => return, 192 | }; 193 | let dir_id = DIR_ID.fetch_add(1, Ordering::SeqCst); 194 | 195 | let now = SystemTime::now(); 196 | let mut cache = DU_CACHE.cache.lock(); 197 | 198 | // Add "/." to directory and store it. 199 | let mut path = self.dir.clone(); 200 | path.push("."); 201 | let entry = Entry { 202 | time: now, 203 | dir_modtime, 204 | dir_id, 205 | }; 206 | cache.put(path, entry); 207 | 208 | // Now add the "._" files. 209 | for filename in self.entries.drain(..) { 210 | // create full path and add it to the cache. 211 | let mut path = self.dir.clone(); 212 | path.push(filename); 213 | let entry = Entry { 214 | time: now, 215 | dir_modtime, 216 | dir_id, 217 | }; 218 | cache.put(path, entry); 219 | } 220 | } 221 | } 222 | 223 | // Fake metadata for an empty file. 224 | #[derive(Debug, Clone)] 225 | struct EmptyMetaData; 226 | impl DavMetaData for EmptyMetaData { 227 | fn len(&self) -> u64 { 228 | 0 229 | } 230 | fn is_dir(&self) -> bool { 231 | false 232 | } 233 | fn modified(&self) -> FsResult { 234 | // Tue May 30 04:00:00 CEST 2000 235 | Ok(UNIX_EPOCH + Duration::new(959652000, 0)) 236 | } 237 | fn created(&self) -> FsResult { 238 | self.modified() 239 | } 240 | } 241 | 242 | impl LocalFs { 243 | // Is this a virtualfile ? 244 | #[inline] 245 | pub(crate) fn is_virtual(&self, path: &DavPath) -> Option> { 246 | if !self.inner.macos { 247 | return None; 248 | } 249 | match path.as_bytes() { 250 | b"/.metadata_never_index" => {} 251 | b"/.ql_disablethumbnails" => {} 252 | _ => return None, 253 | } 254 | Some(Box::new(EmptyMetaData {})) 255 | } 256 | 257 | // This file can never exist. 258 | #[inline] 259 | pub(crate) fn is_forbidden(&self, path: &DavPath) -> bool { 260 | if !self.inner.macos { 261 | return false; 262 | } 263 | match path.as_bytes() { 264 | b"/.metadata_never_index" => return true, 265 | b"/.ql_disablethumbnails" => return true, 266 | _ => {} 267 | } 268 | path.file_name_bytes() == b".localized" 269 | } 270 | 271 | // File might not exists because of negative cache entry. 272 | #[cfg(unix)] 273 | #[inline] 274 | pub(crate) fn is_notfound(&self, path: &PathBuf) -> bool { 275 | if !self.inner.macos { 276 | return false; 277 | } 278 | match path.file_name().map(|p| p.as_bytes()) { 279 | Some(b".localized") => true, 280 | Some(name) if name.starts_with(b"._") => DU_CACHE.negative(path), 281 | _ => false, 282 | } 283 | } 284 | 285 | // File might not exists because of negative cache entry. 286 | #[cfg(windows)] 287 | #[inline] 288 | pub(crate) fn is_notfound(&self, path: &PathBuf) -> bool { 289 | if !self.inner.macos { 290 | return false; 291 | } 292 | match path.file_name().map(|p| p.to_str().unwrap().as_bytes()) { 293 | Some(b".localized") => true, 294 | Some(name) if name.starts_with(b"._") => DU_CACHE.negative(path), 295 | _ => false, 296 | } 297 | } 298 | 299 | // Return a "directory cache builder". 300 | #[inline] 301 | pub(crate) fn dir_cache_builder(&self, path: PathBuf) -> Option { 302 | if self.inner.macos { 303 | Some(DUCacheBuilder::start(path)) 304 | } else { 305 | None 306 | } 307 | } 308 | } 309 | -------------------------------------------------------------------------------- /src/handle_put.rs: -------------------------------------------------------------------------------- 1 | use std::any::Any; 2 | use std::error::Error as StdError; 3 | use std::io; 4 | use std::pin::pin; 5 | 6 | use bytes::{Buf, Bytes}; 7 | use headers::HeaderMapExt; 8 | use http::StatusCode as SC; 9 | use http::{self, Request, Response}; 10 | use http_body::Body as HttpBody; 11 | use http_body_util::BodyExt; 12 | 13 | use crate::body::Body; 14 | use crate::conditional::if_match_get_tokens; 15 | use crate::davheaders; 16 | use crate::fs::*; 17 | use crate::{DavError, DavInner, DavResult}; 18 | 19 | const SABRE: &str = "application/x-sabredav-partialupdate"; 20 | 21 | // This is a nice hack. If the type 'E' is actually an io::Error or a Box, 22 | // convert it back into a real io::Error. If it is a DavError or a Box, 23 | // use its Into impl. Otherwise just wrap the error in io::Error::new. 24 | // 25 | // If we had specialization this would look a lot prettier. 26 | // 27 | // Also, this is senseless. It's not as if we _do_ anything with the 28 | // io::Error, other than noticing "oops an error occured". 29 | fn to_ioerror(err: E) -> io::Error 30 | where 31 | E: StdError + Sync + Send + 'static, 32 | { 33 | let e = &err as &dyn Any; 34 | if e.is::() || e.is::>() { 35 | let err = Box::new(err) as Box; 36 | match err.downcast::() { 37 | Ok(e) => *e, 38 | Err(e) => match e.downcast::>() { 39 | Ok(e) => *(*e), 40 | Err(_) => io::ErrorKind::Other.into(), 41 | }, 42 | } 43 | } else if e.is::() || e.is::>() { 44 | let err = Box::new(err) as Box; 45 | match err.downcast::() { 46 | Ok(e) => (*e).into(), 47 | Err(e) => match e.downcast::>() { 48 | Ok(e) => (*(*e)).into(), 49 | Err(_) => io::ErrorKind::Other.into(), 50 | }, 51 | } 52 | } else { 53 | io::Error::other(err) 54 | } 55 | } 56 | 57 | impl DavInner { 58 | pub(crate) async fn handle_put( 59 | self, 60 | req: &Request<()>, 61 | body: ReqBody, 62 | ) -> DavResult> 63 | where 64 | ReqBody: HttpBody, 65 | ReqData: Buf + Send + 'static, 66 | ReqError: StdError + Send + Sync + 'static, 67 | { 68 | let mut start = 0; 69 | let mut count = 0; 70 | let mut have_count = false; 71 | let mut do_range = false; 72 | 73 | let mut oo = OpenOptions::write(); 74 | oo.create = true; 75 | oo.truncate = true; 76 | 77 | if let Some(n) = req.headers().typed_get::() { 78 | count = n.0; 79 | have_count = true; 80 | oo.size = Some(count); 81 | } else if let Some(n) = req 82 | .headers() 83 | .get("X-Expected-Entity-Length") 84 | .and_then(|v| v.to_str().ok()) 85 | { 86 | // macOS Finder, see https://evertpot.com/260/ 87 | if let Ok(len) = n.parse() { 88 | count = len; 89 | have_count = true; 90 | oo.size = Some(count); 91 | } 92 | } 93 | let checksum = req 94 | .headers() 95 | .get("OC-Checksum") 96 | .and_then(|v| v.to_str().ok().map(|s| s.to_string())); 97 | oo.checksum = checksum; 98 | 99 | let path = self.path(req); 100 | let meta = self.fs.metadata(&path, &self.credentials).await; 101 | 102 | // close connection on error. 103 | let mut res = Response::new(Body::empty()); 104 | res.headers_mut().typed_insert(headers::Connection::close()); 105 | 106 | // SabreDAV style PATCH? 107 | if req.method() == http::Method::PATCH { 108 | if req 109 | .headers() 110 | .typed_get::() 111 | .is_none_or(|ct| ct.0 != SABRE) 112 | { 113 | return Err(DavError::StatusClose(SC::UNSUPPORTED_MEDIA_TYPE)); 114 | } 115 | if !have_count { 116 | return Err(DavError::StatusClose(SC::LENGTH_REQUIRED)); 117 | }; 118 | let r = req 119 | .headers() 120 | .typed_get::() 121 | .ok_or(DavError::StatusClose(SC::BAD_REQUEST))?; 122 | match r { 123 | davheaders::XUpdateRange::FromTo(b, e) => { 124 | if b > e || e - b + 1 != count { 125 | return Err(DavError::StatusClose(SC::RANGE_NOT_SATISFIABLE)); 126 | } 127 | start = b; 128 | } 129 | davheaders::XUpdateRange::AllFrom(b) => { 130 | start = b; 131 | } 132 | davheaders::XUpdateRange::Last(n) => { 133 | if let Ok(ref m) = meta { 134 | if n > m.len() { 135 | return Err(DavError::StatusClose(SC::RANGE_NOT_SATISFIABLE)); 136 | } 137 | start = m.len() - n; 138 | } 139 | } 140 | davheaders::XUpdateRange::Append => { 141 | oo.append = true; 142 | } 143 | } 144 | do_range = true; 145 | oo.truncate = false; 146 | } 147 | 148 | // Apache-style Content-Range header? 149 | match req.headers().typed_try_get::() { 150 | Ok(Some(range)) => { 151 | if let Some((b, e)) = range.bytes_range() { 152 | if b > e { 153 | return Err(DavError::StatusClose(SC::RANGE_NOT_SATISFIABLE)); 154 | } 155 | 156 | if have_count { 157 | if e - b + 1 != count { 158 | return Err(DavError::StatusClose(SC::RANGE_NOT_SATISFIABLE)); 159 | } 160 | } else { 161 | count = e - b + 1; 162 | have_count = true; 163 | } 164 | start = b; 165 | do_range = true; 166 | oo.truncate = false; 167 | } 168 | } 169 | Ok(None) => {} 170 | Err(_) => return Err(DavError::StatusClose(SC::BAD_REQUEST)), 171 | } 172 | 173 | // check the If and If-* headers. 174 | let tokens = if_match_get_tokens( 175 | req, 176 | meta.as_ref().map(|v| v.as_ref()).ok(), 177 | self.fs.as_ref(), 178 | &self.ls, 179 | &path, 180 | &self.credentials, 181 | ); 182 | let tokens = match tokens.await { 183 | Ok(t) => t, 184 | Err(s) => return Err(DavError::StatusClose(s)), 185 | }; 186 | 187 | // if locked check if we hold that lock. 188 | if let Some(ref locksystem) = self.ls { 189 | let t = tokens.iter().map(|s| s.as_str()).collect::>(); 190 | let principal = self.principal.as_deref(); 191 | if let Err(_l) = locksystem.check(&path, principal, false, false, t).await { 192 | return Err(DavError::StatusClose(SC::LOCKED)); 193 | } 194 | } 195 | 196 | // tweak open options. 197 | if req 198 | .headers() 199 | .typed_get::() 200 | .is_some_and(|h| h.0 == davheaders::ETagList::Star) 201 | { 202 | oo.create = false; 203 | } 204 | if req 205 | .headers() 206 | .typed_get::() 207 | .is_some_and(|h| h.0 == davheaders::ETagList::Star) 208 | { 209 | oo.create_new = true; 210 | } 211 | 212 | let create = oo.create; 213 | let create_new = oo.create_new; 214 | let mut file = match self.fs.open(&path, oo, &self.credentials).await { 215 | Ok(f) => f, 216 | Err(FsError::NotFound) | Err(FsError::Exists) => { 217 | let s = if !create || create_new { 218 | SC::PRECONDITION_FAILED 219 | } else { 220 | SC::CONFLICT 221 | }; 222 | return Err(DavError::StatusClose(s)); 223 | } 224 | Err(e) => return Err(DavError::FsError(e)), 225 | }; 226 | 227 | if do_range { 228 | // seek to beginning of requested data. 229 | if file.seek(std::io::SeekFrom::Start(start)).await.is_err() { 230 | return Err(DavError::StatusClose(SC::RANGE_NOT_SATISFIABLE)); 231 | } 232 | } 233 | 234 | res.headers_mut() 235 | .typed_insert(headers::AcceptRanges::bytes()); 236 | 237 | let mut body = pin!(body); 238 | 239 | // loop, read body, write to file. 240 | let mut total = 0u64; 241 | 242 | while let Some(data) = body.frame().await { 243 | let data_frame = data.map_err(|e| to_ioerror(e))?; 244 | 245 | let Ok(mut buf) = data_frame.into_data() else { 246 | continue; 247 | }; 248 | 249 | total += buf.remaining() as u64; 250 | // consistency check. 251 | if have_count && total > count { 252 | break; 253 | } 254 | // The `Buf` might actually be a `Bytes`. 255 | let b = { 256 | let b: &mut dyn std::any::Any = &mut buf; 257 | b.downcast_mut::() 258 | }; 259 | if let Some(bytes) = b { 260 | let bytes = std::mem::replace(bytes, Bytes::new()); 261 | file.write_bytes(bytes).await?; 262 | } else { 263 | file.write_buf(Box::new(buf)).await?; 264 | } 265 | } 266 | file.flush().await?; 267 | 268 | if have_count && total > count { 269 | error!("PUT file: sender is sending more bytes than expected"); 270 | return Err(DavError::StatusClose(SC::BAD_REQUEST)); 271 | } 272 | 273 | if have_count && total < count { 274 | error!("PUT file: premature EOF on input"); 275 | return Err(DavError::StatusClose(SC::BAD_REQUEST)); 276 | } 277 | 278 | // Report whether we created or updated the file. 279 | *res.status_mut() = match meta { 280 | Ok(_) => SC::NO_CONTENT, 281 | Err(_) => { 282 | res.headers_mut().typed_insert(headers::ContentLength(0)); 283 | SC::CREATED 284 | } 285 | }; 286 | 287 | // no errors, connection may be kept open. 288 | res.headers_mut().remove(http::header::CONNECTION); 289 | 290 | if let Ok(meta) = file.metadata().await { 291 | if let Some(etag) = davheaders::ETag::from_meta(meta.as_ref()) { 292 | res.headers_mut().typed_insert(etag); 293 | } 294 | if let Ok(modified) = meta.modified() { 295 | res.headers_mut() 296 | .typed_insert(headers::LastModified::from(modified)); 297 | } 298 | } 299 | Ok(res) 300 | } 301 | } 302 | -------------------------------------------------------------------------------- /tests/caldav_tests.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "caldav")] 2 | mod caldav_tests { 3 | use dav_server::{DavHandler, body::Body, caldav::*, fakels::FakeLs, memfs::MemFs}; 4 | use http::{Method, Request, StatusCode}; 5 | 6 | fn setup_caldav_server() -> DavHandler { 7 | DavHandler::builder() 8 | .filesystem(MemFs::new()) 9 | .locksystem(FakeLs::new()) 10 | .build_handler() 11 | } 12 | 13 | async fn resp_to_string(mut resp: http::Response) -> String { 14 | use futures_util::StreamExt; 15 | 16 | let mut data = Vec::new(); 17 | let body = resp.body_mut(); 18 | 19 | while let Some(chunk) = body.next().await { 20 | match chunk { 21 | Ok(bytes) => data.extend_from_slice(&bytes), 22 | Err(e) => panic!("Error reading body stream: {}", e), 23 | } 24 | } 25 | 26 | String::from_utf8(data).unwrap_or_else(|_| "".to_string()) 27 | } 28 | 29 | #[tokio::test] 30 | async fn test_caldav_options() { 31 | let server = setup_caldav_server(); 32 | 33 | let req = Request::builder() 34 | .method(Method::OPTIONS) 35 | .uri("/") 36 | .body(Body::empty()) 37 | .unwrap(); 38 | 39 | let resp = server.handle(req).await; 40 | assert_eq!(resp.status(), StatusCode::OK); 41 | 42 | let dav_header = resp.headers().get("DAV").unwrap(); 43 | let dav_str = dav_header.to_str().unwrap(); 44 | assert!(dav_str.contains("calendar-access")); 45 | } 46 | 47 | #[tokio::test] 48 | async fn test_mkcalendar() { 49 | let server = setup_caldav_server(); 50 | 51 | let req = Request::builder() 52 | .method("MKCALENDAR") 53 | .uri("/calendar") 54 | .body(Body::empty()) 55 | .unwrap(); 56 | 57 | let resp = server.handle(req).await; 58 | assert_eq!(resp.status(), StatusCode::CREATED); 59 | } 60 | 61 | #[tokio::test] 62 | async fn test_mkcalendar_already_exists() { 63 | let server = setup_caldav_server(); 64 | 65 | // First create a regular collection 66 | let req = Request::builder() 67 | .method("MKCOL") 68 | .uri("/calendar") 69 | .body(Body::empty()) 70 | .unwrap(); 71 | let _ = server.handle(req).await; 72 | 73 | // Try to create calendar collection on existing path 74 | let req = Request::builder() 75 | .method("MKCALENDAR") 76 | .uri("/calendar") 77 | .body(Body::empty()) 78 | .unwrap(); 79 | 80 | let resp = server.handle(req).await; 81 | assert_eq!(resp.status(), StatusCode::METHOD_NOT_ALLOWED); 82 | } 83 | 84 | #[tokio::test] 85 | async fn test_calendar_propfind() { 86 | let server = setup_caldav_server(); 87 | 88 | // Create a calendar collection first 89 | let req = Request::builder() 90 | .method("MKCALENDAR") 91 | .uri("/calendar") 92 | .body(Body::empty()) 93 | .unwrap(); 94 | let _ = server.handle(req).await; 95 | 96 | // PROPFIND request 97 | let propfind_body = r#" 98 | 99 | 100 | 101 | 102 | 103 | 104 | "#; 105 | 106 | let req = Request::builder() 107 | .method("PROPFIND") 108 | .uri("/calendar") 109 | .header("Depth", "0") 110 | .body(Body::from(propfind_body)) 111 | .unwrap(); 112 | 113 | let resp = server.handle(req).await; 114 | assert_eq!(resp.status(), StatusCode::MULTI_STATUS); 115 | 116 | // Check that response contains CalDAV properties 117 | let body_str = resp_to_string(resp).await; 118 | assert!(body_str.contains("supported-calendar-component-set")); 119 | assert!(body_str.contains("supported-calendar-data")); 120 | } 121 | 122 | #[tokio::test] 123 | async fn test_calendar_event_put() { 124 | let server = setup_caldav_server(); 125 | 126 | // Create a calendar collection first 127 | let req = Request::builder() 128 | .method("MKCALENDAR") 129 | .uri("/calendar") 130 | .body(Body::empty()) 131 | .unwrap(); 132 | let _ = server.handle(req).await; 133 | 134 | // PUT a calendar event 135 | let ical_data = r#"BEGIN:VCALENDAR 136 | VERSION:2.0 137 | PRODID:-//Test//Test//EN 138 | BEGIN:VEVENT 139 | UID:test-event-123@example.com 140 | DTSTART:20240101T120000Z 141 | DTEND:20240101T130000Z 142 | SUMMARY:Test Event 143 | DESCRIPTION:This is a test event 144 | END:VEVENT 145 | END:VCALENDAR"#; 146 | 147 | let req = Request::builder() 148 | .method(Method::PUT) 149 | .uri("/calendar/event.ics") 150 | .header("Content-Type", "text/calendar") 151 | .body(Body::from(ical_data)) 152 | .unwrap(); 153 | 154 | let resp = server.handle(req).await; 155 | assert!(resp.status().is_success()); 156 | } 157 | 158 | #[tokio::test] 159 | async fn test_calendar_query_report() { 160 | let server = setup_caldav_server(); 161 | 162 | // Create a calendar collection 163 | let req = Request::builder() 164 | .method("MKCALENDAR") 165 | .uri("/calendar") 166 | .body(Body::empty()) 167 | .unwrap(); 168 | let _ = server.handle(req).await; 169 | 170 | // Add a calendar event 171 | let ical_data = r#"BEGIN:VCALENDAR 172 | VERSION:2.0 173 | PRODID:-//Test//Test//EN 174 | BEGIN:VEVENT 175 | UID:test-event-123@example.com 176 | DTSTART:20240101T120000Z 177 | DTEND:20240101T130000Z 178 | SUMMARY:Test Event 179 | END:VEVENT 180 | END:VCALENDAR"#; 181 | 182 | let req = Request::builder() 183 | .method(Method::PUT) 184 | .uri("/calendar/event.ics") 185 | .header("Content-Type", "text/calendar") 186 | .body(Body::from(ical_data)) 187 | .unwrap(); 188 | let _ = server.handle(req).await; 189 | 190 | // REPORT calendar-query 191 | let report_body = r#" 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | "#; 202 | 203 | let req = Request::builder() 204 | .method("REPORT") 205 | .uri("/calendar") 206 | .header("Depth", "1") 207 | .body(Body::from(report_body)) 208 | .unwrap(); 209 | 210 | let resp = server.handle(req).await; 211 | assert_eq!(resp.status(), StatusCode::MULTI_STATUS); 212 | 213 | let body_str = resp_to_string(resp).await; 214 | assert!(body_str.contains("calendar-data")); 215 | assert!(body_str.contains("Test Event")); 216 | } 217 | 218 | #[tokio::test] 219 | async fn test_calendar_multiget_report() { 220 | let server = setup_caldav_server(); 221 | 222 | // Create a calendar collection 223 | let req = Request::builder() 224 | .method("MKCALENDAR") 225 | .uri("/calendar") 226 | .body(Body::empty()) 227 | .unwrap(); 228 | let _ = server.handle(req).await; 229 | 230 | // Add a calendar event 231 | let ical_data = r#"BEGIN:VCALENDAR 232 | VERSION:2.0 233 | PRODID:-//Test//Test//EN 234 | BEGIN:VEVENT 235 | UID:test-event-123@example.com 236 | DTSTART:20240101T120000Z 237 | DTEND:20240101T130000Z 238 | SUMMARY:Test Event 239 | END:VEVENT 240 | END:VCALENDAR"#; 241 | 242 | let req = Request::builder() 243 | .method(Method::PUT) 244 | .uri("/calendar/event.ics") 245 | .header("Content-Type", "text/calendar") 246 | .body(Body::from(ical_data)) 247 | .unwrap(); 248 | let _ = server.handle(req).await; 249 | 250 | // REPORT calendar-multiget 251 | let report_body = r#" 252 | 253 | 254 | 255 | 256 | /calendar/event.ics 257 | "#; 258 | 259 | let req = Request::builder() 260 | .method("REPORT") 261 | .uri("/calendar") 262 | .body(Body::from(report_body)) 263 | .unwrap(); 264 | 265 | let resp = server.handle(req).await; 266 | assert_eq!(resp.status(), StatusCode::MULTI_STATUS); 267 | 268 | let body_str = resp_to_string(resp).await; 269 | assert!(body_str.contains("calendar-data")); 270 | assert!(body_str.contains("Test Event")); 271 | } 272 | 273 | #[test] 274 | fn test_is_calendar_data() { 275 | let valid_ical = b"BEGIN:VCALENDAR\nVERSION:2.0\nEND:VCALENDAR\n"; 276 | assert!(is_calendar_data(valid_ical)); 277 | 278 | let invalid_data = b"This is not calendar data"; 279 | assert!(!is_calendar_data(invalid_data)); 280 | } 281 | 282 | #[test] 283 | fn test_extract_calendar_uid() { 284 | let ical_with_uid = "BEGIN:VCALENDAR\nUID:test-123@example.com\nEND:VCALENDAR"; 285 | assert_eq!( 286 | extract_calendar_uid(ical_with_uid), 287 | Some("test-123@example.com".to_string()) 288 | ); 289 | 290 | let ical_without_uid = "BEGIN:VCALENDAR\nSUMMARY:Test\nEND:VCALENDAR"; 291 | assert_eq!(extract_calendar_uid(ical_without_uid), None); 292 | } 293 | 294 | #[test] 295 | fn test_calendar_component_types() { 296 | assert_eq!(CalendarComponentType::VEvent.as_str(), "VEVENT"); 297 | assert_eq!(CalendarComponentType::VTodo.as_str(), "VTODO"); 298 | assert_eq!(CalendarComponentType::VJournal.as_str(), "VJOURNAL"); 299 | assert_eq!(CalendarComponentType::VFreeBusy.as_str(), "VFREEBUSY"); 300 | } 301 | 302 | #[test] 303 | fn test_calendar_properties_default() { 304 | let props = CalendarProperties::default(); 305 | assert!( 306 | props 307 | .supported_components 308 | .contains(&CalendarComponentType::VEvent) 309 | ); 310 | assert!( 311 | props 312 | .supported_components 313 | .contains(&CalendarComponentType::VTodo) 314 | ); 315 | assert_eq!(props.max_resource_size, Some(1024 * 1024)); 316 | } 317 | 318 | #[cfg(feature = "caldav")] 319 | #[test] 320 | fn test_validate_calendar_data() { 321 | let valid_ical = r#"BEGIN:VCALENDAR 322 | VERSION:2.0 323 | PRODID:-//Test//Test//EN 324 | BEGIN:VEVENT 325 | UID:test@example.com 326 | DTSTART:20240101T120000Z 327 | DTEND:20240101T130000Z 328 | SUMMARY:Test 329 | END:VEVENT 330 | END:VCALENDAR"#; 331 | 332 | assert!(validate_calendar_data(valid_ical).is_ok()); 333 | } 334 | } 335 | 336 | #[cfg(not(feature = "caldav"))] 337 | mod caldav_disabled_tests { 338 | use dav_server::{DavHandler, body::Body, fakels::FakeLs, memfs::MemFs}; 339 | use http::Request; 340 | 341 | #[tokio::test] 342 | async fn test_caldav_methods_return_not_implemented() { 343 | let server = DavHandler::builder() 344 | .filesystem(MemFs::new()) 345 | .locksystem(FakeLs::new()) 346 | .build_handler(); 347 | 348 | // Test REPORT method 349 | let req = Request::builder() 350 | .method("REPORT") 351 | .uri("/") 352 | .body(Body::empty()) 353 | .unwrap(); 354 | let resp = server.handle(req).await; 355 | assert_eq!(resp.status(), http::StatusCode::NOT_IMPLEMENTED); 356 | 357 | // Test MKCALENDAR method 358 | let req = Request::builder() 359 | .method("MKCALENDAR") 360 | .uri("/calendar") 361 | .body(Body::empty()) 362 | .unwrap(); 363 | let resp = server.handle(req).await; 364 | assert_eq!(resp.status(), http::StatusCode::NOT_IMPLEMENTED); 365 | } 366 | } 367 | -------------------------------------------------------------------------------- /src/handle_copymove.rs: -------------------------------------------------------------------------------- 1 | use futures_util::{FutureExt, StreamExt, future::BoxFuture}; 2 | use headers::HeaderMapExt; 3 | use http::{Request, Response, StatusCode}; 4 | 5 | use crate::async_stream::AsyncStream; 6 | use crate::body::Body; 7 | use crate::conditional::*; 8 | use crate::davheaders::{self, Depth}; 9 | use crate::davpath::DavPath; 10 | use crate::errors::*; 11 | use crate::fs::*; 12 | use crate::multierror::{MultiError, multi_error}; 13 | use crate::{DavInner, DavResult, util::DavMethod}; 14 | 15 | // map_err helper. 16 | async fn add_status<'a>( 17 | m_err: &'a mut MultiError, 18 | path: &'a DavPath, 19 | e: impl Into + 'static, 20 | ) -> DavResult<()> { 21 | let daverror = e.into(); 22 | if let Err(x) = m_err.add_status(path, daverror.statuscode()).await { 23 | return Err(x.into()); 24 | } 25 | Err(daverror) 26 | } 27 | 28 | impl DavInner { 29 | pub(crate) fn do_copy<'a>( 30 | &'a self, 31 | source: &'a DavPath, 32 | topdest: &'a DavPath, 33 | dest: &'a DavPath, 34 | depth: Depth, 35 | multierror: &'a mut MultiError, 36 | ) -> BoxFuture<'a, DavResult<()>> { 37 | async move { 38 | // when doing "COPY /a/b /a/b/c make sure we don't recursively 39 | // copy /a/b/c/ into /a/b/c. 40 | if source == topdest { 41 | return Ok(()); 42 | } 43 | 44 | // source must exist. 45 | let meta = match self.fs.metadata(source, &self.credentials).await { 46 | Err(e) => return add_status(multierror, source, e).await, 47 | Ok(m) => m, 48 | }; 49 | 50 | // if it's a file we can overwrite it. 51 | if !meta.is_dir() { 52 | return match self.fs.copy(source, dest, &self.credentials).await { 53 | Ok(_) => Ok(()), 54 | Err(e) => { 55 | debug!("do_copy: self.fs.copy error: {e:?}"); 56 | add_status(multierror, source, e).await 57 | } 58 | }; 59 | } 60 | 61 | // Copying a directory onto an existing directory with Depth 0 62 | // is not an error. It means "only copy properties" (which 63 | // we do not do yet). 64 | if let Err(e) = self.fs.create_dir(dest, &self.credentials).await 65 | && (depth != Depth::Zero || e != FsError::Exists) 66 | { 67 | debug!("do_copy: self.fs.create_dir({dest}) error: {e:?}"); 68 | return add_status(multierror, dest, e).await; 69 | } 70 | 71 | // only recurse when Depth > 0. 72 | if depth == Depth::Zero { 73 | return Ok(()); 74 | } 75 | 76 | let mut entries = match self 77 | .fs 78 | .read_dir(source, ReadDirMeta::DataSymlink, &self.credentials) 79 | .await 80 | { 81 | Ok(entries) => entries, 82 | Err(e) => { 83 | debug!("do_copy: self.fs.read_dir error: {e:?}"); 84 | return add_status(multierror, source, e).await; 85 | } 86 | }; 87 | 88 | // If we encounter errors, just print them, and keep going. 89 | // Last seen error is returned from function. 90 | let mut retval = Ok::<_, DavError>(()); 91 | while let Some(dirent) = entries.next().await { 92 | let dirent = match dirent { 93 | Ok(dirent) => dirent, 94 | Err(e) => return add_status(multierror, source, e).await, 95 | }; 96 | 97 | // NOTE: dirent.metadata() behaves like symlink_metadata() 98 | let meta = match dirent.metadata().await { 99 | Ok(meta) => meta, 100 | Err(e) => return add_status(multierror, source, e).await, 101 | }; 102 | let name = dirent.name(); 103 | let mut nsrc = source.clone(); 104 | let mut ndest = dest.clone(); 105 | nsrc.push_segment(&name); 106 | ndest.push_segment(&name); 107 | 108 | if meta.is_dir() { 109 | nsrc.add_slash(); 110 | ndest.add_slash(); 111 | } 112 | // recurse. 113 | if let Err(e) = self 114 | .do_copy(&nsrc, topdest, &ndest, depth, multierror) 115 | .await 116 | { 117 | retval = Err(e); 118 | } 119 | } 120 | 121 | retval 122 | } 123 | .boxed() 124 | } 125 | 126 | // Right now we handle MOVE with a simple RENAME. RFC4918 #9.9.2 talks 127 | // about "partially failed moves", which means that we might have to 128 | // try to move directories with increasing granularity to move as much 129 | // as possible instead of all-or-nothing. 130 | // 131 | // Note that this might not be optional, as the RFC says: 132 | // 133 | // "Any headers included with MOVE MUST be applied in processing every 134 | // resource to be moved with the exception of the Destination header." 135 | // 136 | // .. so for perfect compliance we might have to process all resources 137 | // one-by-one anyway. But seriously, who cares. 138 | // 139 | pub(crate) async fn do_move<'a>( 140 | &'a self, 141 | source: &'a DavPath, 142 | dest: &'a DavPath, 143 | multierror: &'a mut MultiError, 144 | ) -> DavResult<()> { 145 | if let Err(e) = self.fs.rename(source, dest, &self.credentials).await { 146 | add_status(multierror, source, e).await 147 | } else { 148 | Ok(()) 149 | } 150 | } 151 | 152 | pub(crate) async fn handle_copymove( 153 | self, 154 | req: &Request<()>, 155 | method: DavMethod, 156 | ) -> DavResult> { 157 | // get and check headers. 158 | let overwrite = req 159 | .headers() 160 | .typed_get::() 161 | .is_none_or(|o| o.0); 162 | let depth = match req.headers().typed_get::() { 163 | Some(Depth::Infinity) | None => Depth::Infinity, 164 | Some(Depth::Zero) if method == DavMethod::Copy => Depth::Zero, 165 | _ => return Err(StatusCode::BAD_REQUEST.into()), 166 | }; 167 | 168 | // decode and validate destination. 169 | let dest = match req.headers().typed_get::() { 170 | Some(dest) => DavPath::from_str_and_prefix(&dest.0, &self.prefix)?, 171 | None => return Err(StatusCode::BAD_REQUEST.into()), 172 | }; 173 | 174 | // for MOVE, tread with care- if the path ends in "/" but it actually 175 | // is a symlink, we want to move the symlink, not what it points to. 176 | let mut path = self.path(req); 177 | let meta = if method == DavMethod::Move { 178 | let meta = self.fs.symlink_metadata(&path, &self.credentials).await?; 179 | if meta.is_symlink() { 180 | let m2 = self.fs.metadata(&path, &self.credentials).await?; 181 | path.add_slash_if(m2.is_dir()); 182 | } 183 | meta 184 | } else { 185 | self.fs.metadata(&path, &self.credentials).await? 186 | }; 187 | path.add_slash_if(meta.is_dir()); 188 | 189 | // parent of the destination must exist. 190 | if !self.has_parent(&dest).await { 191 | return Err(StatusCode::CONFLICT.into()); 192 | } 193 | 194 | // for the destination, also check if it's a symlink. If we are going 195 | // to remove it first, we want to remove the link, not what it points to. 196 | let (dest_is_file, dmeta) = match self.fs.symlink_metadata(&dest, &self.credentials).await { 197 | Ok(meta) => { 198 | let mut is_file = false; 199 | if meta.is_symlink() 200 | && let Ok(m) = self.fs.metadata(&dest, &self.credentials).await 201 | { 202 | is_file = m.is_file(); 203 | } 204 | if meta.is_file() { 205 | is_file = true; 206 | } 207 | (is_file, Ok(meta)) 208 | } 209 | Err(e) => (false, Err(e)), 210 | }; 211 | 212 | // check if overwrite is "F" 213 | let exists = dmeta.is_ok(); 214 | if !overwrite && exists { 215 | return Err(StatusCode::PRECONDITION_FAILED.into()); 216 | } 217 | 218 | // check if source == dest 219 | if path == dest { 220 | return Err(StatusCode::FORBIDDEN.into()); 221 | } 222 | 223 | // check If and If-* headers for source URL 224 | let tokens = match if_match_get_tokens( 225 | req, 226 | Some(meta.as_ref()), 227 | self.fs.as_ref(), 228 | &self.ls, 229 | &path, 230 | &self.credentials, 231 | ) 232 | .await 233 | { 234 | Ok(t) => t, 235 | Err(s) => return Err(s.into()), 236 | }; 237 | 238 | // check locks. since we cancel the entire operation if there is 239 | // a conflicting lock, we do not return a 207 multistatus, but 240 | // just a simple status. 241 | if let Some(ref locksystem) = self.ls { 242 | let t = tokens.iter().map(|s| s.as_str()).collect::>(); 243 | let principal = self.principal.as_deref(); 244 | if method == DavMethod::Move { 245 | // for MOVE check if source path is locked 246 | if let Err(_l) = locksystem 247 | .check(&path, principal, false, true, t.clone()) 248 | .await 249 | { 250 | return Err(StatusCode::LOCKED.into()); 251 | } 252 | } 253 | // for MOVE and COPY check if destination is locked 254 | if let Err(_l) = locksystem.check(&dest, principal, false, true, t).await { 255 | return Err(StatusCode::LOCKED.into()); 256 | } 257 | } 258 | 259 | let req_path = path.clone(); 260 | 261 | let items = AsyncStream::new(|tx| { 262 | async move { 263 | let mut multierror = MultiError::new(tx); 264 | 265 | // see if we need to delete the destination first. 266 | if overwrite && exists && (depth != Depth::Zero || dest_is_file) { 267 | trace!("handle_copymove: deleting destination {dest}"); 268 | if self 269 | .delete_items(&mut multierror, Depth::Infinity, dmeta.unwrap(), &dest) 270 | .await 271 | .is_err() 272 | { 273 | return Ok(()); 274 | } 275 | // should really do this per item, in case the delete partially fails. See TODO.md 276 | if let Some(ref locksystem) = self.ls { 277 | let _ = locksystem.delete(&dest).await; 278 | } 279 | } 280 | 281 | // COPY or MOVE. 282 | if method == DavMethod::Copy { 283 | if self 284 | .do_copy(&path, &dest, &dest, depth, &mut multierror) 285 | .await 286 | .is_ok() 287 | { 288 | let s = if exists { 289 | StatusCode::NO_CONTENT 290 | } else { 291 | StatusCode::CREATED 292 | }; 293 | let _ = multierror.add_status(&path, s).await; 294 | } 295 | } else { 296 | // move and if successful, remove locks at old location. 297 | if self.do_move(&path, &dest, &mut multierror).await.is_ok() { 298 | if let Some(ref locksystem) = self.ls { 299 | locksystem.delete(&path).await.ok(); 300 | } 301 | let s = if exists { 302 | StatusCode::NO_CONTENT 303 | } else { 304 | StatusCode::CREATED 305 | }; 306 | let _ = multierror.add_status(&path, s).await; 307 | } 308 | } 309 | Ok::<_, DavError>(()) 310 | } 311 | }); 312 | 313 | multi_error(req_path, items).await 314 | } 315 | } 316 | --------------------------------------------------------------------------------