├── datalith ├── .gitignore ├── src │ ├── rocket_mounts │ │ ├── rocket_utils │ │ │ ├── mod.rs │ │ │ ├── content_length.rs │ │ │ ├── datalith_response.rs │ │ │ └── datalith_response_image.rs │ │ ├── fetch.rs │ │ ├── mod.rs │ │ ├── fetch_image.rs │ │ ├── operate.rs │ │ └── operate_image.rs │ ├── main.rs │ └── cli.rs ├── LICENSE ├── Cargo.toml ├── rustfmt.toml └── README.md ├── datalith-core ├── .gitignore ├── tests │ ├── data │ │ └── image.png │ ├── basic.rs │ ├── global.rs │ ├── convert.rs │ ├── clean_up.rs │ ├── auto_file_extension.rs │ ├── parallel_and_temporary.rs │ └── upload_download_data.rs ├── src │ ├── manager │ │ ├── mod.rs │ │ ├── datalith_manager_errors.rs │ │ └── datalith_manager.rs │ ├── image │ │ ├── sync.rs │ │ ├── datalith_image_errors.rs │ │ └── datalith_image.rs │ ├── resources │ │ ├── datalith_resource.rs │ │ └── mod.rs │ ├── sql │ │ └── schema.sql │ ├── magic_cookie_pool.rs │ ├── datalith_errors.rs │ ├── datalith_file.rs │ ├── guard.rs │ ├── lib.rs │ └── functions.rs ├── LICENSE ├── Cargo.toml ├── rustfmt.toml └── README.md ├── .dockerignore ├── .github ├── dependabot.yml └── workflows │ ├── ci-version.yml │ └── ci.yml ├── Cargo.toml ├── patches ├── config.patch └── configure.patch ├── README.md ├── docker-compose.yml ├── Dockerfile ├── docker-compose.image.yml ├── LICENSE ├── Makefile ├── Dockerfile.image └── .gitignore /datalith/.gitignore: -------------------------------------------------------------------------------- 1 | /tests/db -------------------------------------------------------------------------------- /datalith-core/.gitignore: -------------------------------------------------------------------------------- 1 | /tests/db -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | * 2 | 3 | !/patches 4 | !/Cargo.toml 5 | !/Cargo.lock 6 | 7 | !/*/src 8 | !/*/Cargo.toml 9 | !/*/Cargo.lock -------------------------------------------------------------------------------- /datalith-core/tests/data/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magiclen/datalith/master/datalith-core/tests/data/image.png -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" -------------------------------------------------------------------------------- /datalith-core/src/manager/mod.rs: -------------------------------------------------------------------------------- 1 | mod datalith_manager; 2 | mod datalith_manager_errors; 3 | 4 | pub use datalith_manager::*; 5 | pub use datalith_manager_errors::*; 6 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [profile.release] 2 | lto = true 3 | codegen-units = 1 4 | panic = "abort" 5 | strip = true 6 | 7 | [workspace] 8 | resolver = "2" 9 | members = [ 10 | "datalith-core", 11 | "datalith", 12 | ] -------------------------------------------------------------------------------- /patches/config.patch: -------------------------------------------------------------------------------- 1 | @@ -1,5 +1,8 @@ 2 | /* config/config.h.in. Generated from configure.ac by autoheader. */ 3 | 4 | +/* Whether hdri is enabled or not */ 5 | +#undef HDRI_ENABLE 6 | + 7 | /* Define if building universal (internal helper macro) */ 8 | #undef AC_APPLE_UNIVERSAL_BUILD 9 | 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Datalith 2 | ==================== 3 | 4 | A file management system powered by SQLite for metadata storage and the file system for file storage. 5 | 6 | ## Crates.io 7 | 8 | https://crates.io/crates/datalith 9 | 10 | ## Documentation 11 | 12 | https://docs.rs/datalith 13 | 14 | ## License 15 | 16 | [MIT](LICENSE) -------------------------------------------------------------------------------- /datalith/src/rocket_mounts/rocket_utils/mod.rs: -------------------------------------------------------------------------------- 1 | mod content_length; 2 | mod datalith_response; 3 | #[cfg(feature = "image-convert")] 4 | mod datalith_response_image; 5 | 6 | pub use content_length::*; 7 | pub use datalith_response::*; 8 | #[cfg(feature = "image-convert")] 9 | pub use datalith_response_image::ResolutionType; 10 | -------------------------------------------------------------------------------- /patches/configure.patch: -------------------------------------------------------------------------------- 1 | @@ -23387,6 +23387,7 @@ 2 | else 3 | magick_hdri_enable='0' 4 | fi 5 | +printf "%s\n" "#define HDRI_ENABLE $magick_hdri_enable" >>confdefs.h 6 | MAGICK_PCFLAGS="$MAGICK_PCFLAGS -DMAGICKCORE_HDRI_ENABLE=$magick_hdri_enable" 7 | CFLAGS="$CFLAGS -DMAGICKCORE_HDRI_ENABLE=$magick_hdri_enable" 8 | CPPFLAGS="$CPPFLAGS -DMAGICKCORE_HDRI_ENABLE=$magick_hdri_enable" -------------------------------------------------------------------------------- /datalith-core/tests/basic.rs: -------------------------------------------------------------------------------- 1 | mod global; 2 | 3 | #[cfg(feature = "manager")] 4 | use datalith_core::DatalithManager; 5 | use global::*; 6 | 7 | #[tokio::test] 8 | async fn initialize() { 9 | let datalith = datalith_init().await; 10 | 11 | #[cfg(feature = "manager")] 12 | DatalithManager::new(datalith.clone()).await.unwrap(); 13 | 14 | datalith_close(datalith).await; 15 | } 16 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | app: 3 | restart: always 4 | build: 5 | context: . 6 | dockerfile: Dockerfile 7 | image: datalith 8 | container_name: datalith 9 | environment: 10 | TZ: Asia/Taipei 11 | DATALITH_MAX_FILE_SIZE: 2 GiB 12 | DATALITH_TEMPORARY_FILE_LIFESPAN: 60 13 | volumes: 14 | - ~/docker/datalith:/app/shared 15 | ports: 16 | - "1111:1111" 17 | command: --environment /app/shared/db 18 | logging: 19 | driver: "syslog" 20 | options: 21 | tag: "docker/{{.ImageName}}/{{.Name}}" -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust AS builder 2 | 3 | RUN apt update && apt install -y libmagic-dev 4 | 5 | WORKDIR /build 6 | 7 | COPY . . 8 | 9 | RUN cargo build --release --no-default-features --features magic 10 | 11 | 12 | FROM debian:bookworm-slim 13 | 14 | RUN adduser --disabled-password \ 15 | --gecos "" \ 16 | --no-create-home \ 17 | user 18 | 19 | WORKDIR /app 20 | 21 | RUN chown user:user /app 22 | 23 | RUN apt update && apt install -y libmagic1 24 | 25 | RUN rm -rf /var/lib/apt/lists/* 26 | 27 | USER user 28 | 29 | COPY --chown=user:user --from=builder /build/target/release/datalith /app/ 30 | 31 | ENTRYPOINT ["/app/datalith"] -------------------------------------------------------------------------------- /docker-compose.image.yml: -------------------------------------------------------------------------------- 1 | services: 2 | app: 3 | restart: always 4 | build: 5 | context: . 6 | dockerfile: Dockerfile.image 7 | image: datalith 8 | container_name: datalith 9 | environment: 10 | TZ: Asia/Taipei 11 | DATALITH_MAX_FILE_SIZE: 2 GiB 12 | DATALITH_TEMPORARY_FILE_LIFESPAN: 60 13 | DATALITH_MAX_IMAGE_RESOLUTION: 50000000 14 | DATALITH_MAX_IMAGE_RESOLUTION_MULTIPLIER: 3 15 | volumes: 16 | - ~/docker/datalith:/app/shared 17 | ports: 18 | - "1111:1111" 19 | command: --environment /app/shared/db 20 | logging: 21 | driver: "syslog" 22 | options: 23 | tag: "docker/{{.ImageName}}/{{.Name}}" -------------------------------------------------------------------------------- /datalith-core/src/image/sync.rs: -------------------------------------------------------------------------------- 1 | use std::{ops::Deref, sync::Arc}; 2 | 3 | use image_convert::ImageResource; 4 | 5 | #[derive(Debug, Clone)] 6 | pub(crate) struct ReadOnlyImageResource { 7 | inner: Arc, 8 | } 9 | 10 | unsafe impl Send for ReadOnlyImageResource {} 11 | unsafe impl Sync for ReadOnlyImageResource {} 12 | 13 | impl From for ReadOnlyImageResource { 14 | #[inline] 15 | fn from(value: ImageResource) -> Self { 16 | Self { 17 | #[allow(clippy::arc_with_non_send_sync)] 18 | inner: Arc::new(value), 19 | } 20 | } 21 | } 22 | 23 | impl Deref for ReadOnlyImageResource { 24 | type Target = ImageResource; 25 | 26 | #[inline] 27 | fn deref(&self) -> &Self::Target { 28 | self.inner.as_ref() 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /datalith-core/src/manager/datalith_manager_errors.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | error::Error, 3 | fmt::{self, Display, Formatter}, 4 | }; 5 | 6 | use tokio_cron_scheduler::JobSchedulerError; 7 | 8 | /// Errors occurred during `DatalithManager` creation. 9 | #[derive(Debug)] 10 | pub enum DatalithManagerError { 11 | JobSchedulerError(JobSchedulerError), 12 | } 13 | 14 | impl From for DatalithManagerError { 15 | #[inline] 16 | fn from(error: JobSchedulerError) -> Self { 17 | Self::JobSchedulerError(error) 18 | } 19 | } 20 | 21 | impl Display for DatalithManagerError { 22 | #[inline] 23 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 24 | match self { 25 | Self::JobSchedulerError(error) => Display::fmt(error, f), 26 | } 27 | } 28 | } 29 | 30 | impl Error for DatalithManagerError {} 31 | -------------------------------------------------------------------------------- /datalith/src/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate rocket; 3 | 4 | mod cli; 5 | mod rocket_mounts; 6 | 7 | use cli::*; 8 | use datalith_core::{Datalith, DatalithManager}; 9 | use rocket::{Ignite, Rocket}; 10 | 11 | fn main() -> anyhow::Result<()> { 12 | let args = get_args(); 13 | 14 | let rocket = rocket_mounts::create(args.address, args.listen_port, args.max_file_size.as_u64()); 15 | 16 | rocket::execute(async { 17 | let datalith = Datalith::new(args.environment).await?; 18 | 19 | datalith.set_temporary_file_lifespan(args.temporary_file_lifespan); 20 | 21 | #[cfg(feature = "image-convert")] 22 | { 23 | datalith.set_max_image_resolution(args.max_image_resolution); 24 | datalith.set_max_image_resolution_multiplier(args.max_image_resolution_multiplier); 25 | } 26 | 27 | let datalith = DatalithManager::new(datalith).await?; 28 | 29 | let rocket = rocket.manage(datalith); 30 | 31 | Ok(rocket.launch().await?) as anyhow::Result> 32 | })?; 33 | 34 | Ok(()) 35 | } 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 magiclen.org (Ron Li) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /datalith/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 magiclen.org (Ron Li) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /datalith-core/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 magiclen.org (Ron Li) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /datalith-core/tests/global.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use std::{ 4 | path::Path, 5 | time::{SystemTime, UNIX_EPOCH}, 6 | }; 7 | 8 | use datalith_core::{Datalith, DatalithCreateError}; 9 | use lazy_static_include::lazy_static_include_bytes; 10 | 11 | const TEST_FOLDER: &str = slash_formatter::concat_with_file_separator!("tests", "db"); 12 | 13 | pub const IMAGE_PATH: &str = manifest_dir_macros::file_path!("tests", "data", "image.png"); 14 | pub const IMAGE_SIZE: u64 = 11658; 15 | 16 | lazy_static_include_bytes! { 17 | pub IMAGE_DATA => ("tests", "data", "image.png"), 18 | } 19 | 20 | #[inline] 21 | pub async fn datalith_init() -> Datalith { 22 | loop { 23 | let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 24 | 25 | match Datalith::new(Path::new(TEST_FOLDER).join(timestamp.as_micros().to_string())).await { 26 | Ok(datalith) => break datalith, 27 | Err(DatalithCreateError::AlreadyRun) => continue, 28 | Err(error) => panic!("{error}"), 29 | } 30 | } 31 | } 32 | 33 | #[inline] 34 | pub async fn datalith_close(datalith: Datalith) { 35 | datalith.drop_datalith().await.unwrap(); 36 | } 37 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | EXECUTABLE_NAME := datalith 2 | 3 | all: ./target/x86_64-unknown-linux-musl/release/$(EXECUTABLE_NAME) 4 | 5 | ./target/x86_64-unknown-linux-musl/release/$(EXECUTABLE_NAME): $(shell find . -type f -iname '*.rs' -o -name 'Cargo.toml' | sed 's/ /\\ /g') $(shell find ./front-end ./langs ./views ./data -type f | sed 's/ /\\ /g') 6 | PWD=$$(pwd) 7 | cd $$MAGICK_PATH && bash build.sh 8 | cd $$PWD 9 | IMAGE_MAGICK_INCLUDE_DIRS="$$MAGICK_PATH/linux/include/ImageMagick-7" IMAGE_MAGICK_LIB_DIRS="$$MUSL_PATH/x86_64-linux-musl/lib:$$MUSL_PATH/lib/gcc/x86_64-linux-musl/11.4.0:$$MAGICK_PATH/linux/lib" IMAGE_MAGICK_LIBS="z:bz2:lzma:zstd:jpeg:png:tiff:openjp2:jbig:sharpyuv:webpmux:webpdemux:webp:de265:x265:aom:stdc++:heif:iconv:gcc:xml2:freetype:fontconfig:gomp:MagickWand-7.Q16HDRI:MagickCore-7.Q16HDRI" IMAGE_MAGICK_STATIC=1 cargo build --release --target x86_64-unknown-linux-musl 10 | 11 | install: 12 | $(MAKE) 13 | sudo cp ./target/x86_64-unknown-linux-musl/release/$(EXECUTABLE_NAME) /usr/local/bin/$(EXECUTABLE_NAME) 14 | sudo chown root: /usr/local/bin/$(EXECUTABLE_NAME) 15 | sudo chmod 0755 /usr/local/bin/$(EXECUTABLE_NAME) 16 | 17 | test: 18 | cargo test --verbose 19 | 20 | clean: 21 | cargo clean 22 | -------------------------------------------------------------------------------- /datalith/src/rocket_mounts/fetch.rs: -------------------------------------------------------------------------------- 1 | use datalith_core::DatalithManager; 2 | use rocket::{http::Status, serde::uuid::Uuid, Build, Rocket, State}; 3 | use rocket_cache_response::CacheResponse; 4 | use rocket_etag_if_none_match::EtagIfNoneMatch; 5 | 6 | use crate::rocket_mounts::{rocket_utils::DatalithResponse, Boolean}; 7 | 8 | #[get("/?")] 9 | async fn get( 10 | etag_if_none_match: &EtagIfNoneMatch<'_>, 11 | file_center: &State, 12 | id: Uuid, 13 | download: Option, 14 | ) -> Result, Status> { 15 | let download = download.map(|e| e.0).unwrap_or(false); 16 | 17 | match DatalithResponse::from_resource_id(file_center.inner(), etag_if_none_match, id, download) 18 | .await 19 | { 20 | Ok(Some(response)) => { 21 | if response.is_temporary() { 22 | Ok(CacheResponse::NoStore(response)) 23 | } else { 24 | Ok(CacheResponse::NoCacheControl(response)) 25 | } 26 | }, 27 | Ok(None) => Err(Status::NotFound), 28 | Err(error) => { 29 | rocket::error!("{error}"); 30 | 31 | Err(Status::InternalServerError) 32 | }, 33 | } 34 | } 35 | 36 | #[inline] 37 | pub fn mounts(rocket: Rocket) -> Rocket { 38 | rocket.mount("/f", routes![get]) 39 | } 40 | -------------------------------------------------------------------------------- /datalith/src/rocket_mounts/mod.rs: -------------------------------------------------------------------------------- 1 | mod fetch; 2 | #[cfg(feature = "image-convert")] 3 | mod fetch_image; 4 | mod operate; 5 | #[cfg(feature = "image-convert")] 6 | mod operate_image; 7 | mod rocket_utils; 8 | 9 | use std::net::IpAddr; 10 | 11 | use rocket::{http::Status, Build, Config, Request, Rocket}; 12 | use validators::prelude::*; 13 | 14 | #[derive(Debug)] 15 | struct ServerConfig { 16 | pub(crate) max_file_size: u64, 17 | } 18 | 19 | #[derive(Debug, Clone, Copy, Validator)] 20 | #[validator(boolean)] 21 | struct Boolean(pub(crate) bool); 22 | 23 | #[catch(default)] 24 | fn default_error_catcher(status: Status, _req: &Request) -> String { 25 | format!("{status}") 26 | } 27 | 28 | pub fn create(address: IpAddr, listen_port: u16, max_file_size: u64) -> Rocket { 29 | let figment = Config::figment() 30 | .merge(("ident", "Datalith")) 31 | .merge(("address", address)) 32 | .merge(("port", listen_port)); 33 | 34 | let rocket = rocket::custom(figment) 35 | .manage(ServerConfig { 36 | max_file_size, 37 | }) 38 | .register("/", catchers![default_error_catcher]); 39 | 40 | #[cfg(feature = "image-convert")] 41 | let rocket = fetch_image::mounts(rocket); 42 | 43 | #[cfg(feature = "image-convert")] 44 | let rocket = operate_image::mounts(rocket); 45 | 46 | let rocket = fetch::mounts(rocket); 47 | 48 | operate::mounts(rocket) 49 | } 50 | -------------------------------------------------------------------------------- /datalith/src/rocket_mounts/fetch_image.rs: -------------------------------------------------------------------------------- 1 | use datalith_core::DatalithManager; 2 | use rocket::{http::Status, serde::uuid::Uuid, Build, Rocket, State}; 3 | use rocket_cache_response::CacheResponse; 4 | use rocket_etag_if_none_match::EtagIfNoneMatch; 5 | 6 | use crate::rocket_mounts::{ 7 | rocket_utils::{DatalithResponse, ResolutionType}, 8 | Boolean, 9 | }; 10 | 11 | #[get("/?&&")] 12 | async fn get( 13 | etag_if_none_match: &EtagIfNoneMatch<'_>, 14 | file_center: &State, 15 | id: Uuid, 16 | resolution: Option, 17 | fallback: Option, 18 | download: Option, 19 | ) -> Result, Status> { 20 | let fallback = fallback.map(|e| e.0).unwrap_or(false); 21 | let download = download.map(|e| e.0).unwrap_or(false); 22 | 23 | match DatalithResponse::from_image_id( 24 | file_center.inner(), 25 | etag_if_none_match, 26 | id, 27 | resolution, 28 | fallback, 29 | download, 30 | ) 31 | .await 32 | { 33 | Ok(Some(response)) => Ok(CacheResponse::NoCacheControl(response)), 34 | Ok(None) => Err(Status::NotFound), 35 | Err(error) => { 36 | rocket::error!("{error}"); 37 | 38 | Err(Status::InternalServerError) 39 | }, 40 | } 41 | } 42 | 43 | #[inline] 44 | pub fn mounts(rocket: Rocket) -> Rocket { 45 | rocket.mount("/i/f", routes![get]) 46 | } 47 | -------------------------------------------------------------------------------- /datalith/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "datalith" 3 | version = "0.1.0" 4 | authors = ["Magic Len "] 5 | edition = "2021" 6 | rust-version = "1.83" 7 | repository = "https://github.com/magiclen/datalith" 8 | homepage = "https://magiclen.org/datalith" 9 | keywords = ["datalith", "sqlite", "storage", "file", "oss"] 10 | categories = ["filesystem", "database"] 11 | description = "A file management system powered by SQLite for metadata storage and the file system for file storage." 12 | license = "MIT" 13 | include = ["src/**/*", "Cargo.toml", "README.md", "LICENSE"] 14 | 15 | [dependencies] 16 | datalith-core = { version = "0.1", path = "../datalith-core", default-features = false, features = ["manager"] } 17 | 18 | clap = { version = "4", features = ["derive", "env"] } 19 | concat-with = "0.2" 20 | terminal_size = "0.4" 21 | 22 | anyhow = "1" 23 | 24 | tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "macros"] } 25 | tokio-util = { version = "0.7", features = ["io"] } 26 | 27 | serde_json = "1" 28 | url-escape = "0.1" 29 | byte-unit = { version = "5", features = ["serde"]} 30 | 31 | rocket = { version = "0.5", features = ["uuid"]} 32 | rocket-multipart-form-data = "0.10" 33 | rocket-cache-response = "0.6" 34 | rocket-etag-if-none-match = "0.4" 35 | 36 | validators = { version = "0.25", default-features = false, features = ["derive", "boolean", "rocket"]} 37 | 38 | [features] 39 | default = ["magic", "image-convert"] 40 | magic = ["datalith-core/magic"] 41 | image-convert = ["datalith-core/image-convert"] -------------------------------------------------------------------------------- /datalith-core/tests/convert.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "image-convert")] 2 | 3 | mod global; 4 | 5 | use global::*; 6 | 7 | #[tokio::test] 8 | pub async fn put_image_by_resource() { 9 | let datalith = datalith_init().await; 10 | 11 | let image_data = IMAGE_DATA.as_ref(); 12 | 13 | let resource = 14 | datalith.put_resource_by_buffer(image_data, Some("image.png"), None).await.unwrap(); 15 | 16 | let image = datalith.put_image_by_resource(&resource, Some(32), None, None).await.unwrap(); 17 | assert_eq!("image", image.image_stem()); 18 | assert_eq!(32, image.image_width()); 19 | assert_eq!(32, image.image_height()); 20 | assert!(image.has_alpha_channel()); 21 | assert_eq!(resource.file(), image.original_file().unwrap()); 22 | 23 | let resource_id = resource.id(); 24 | let image_id = image.id(); 25 | 26 | assert!(datalith.check_resource_exist(resource_id).await.unwrap()); 27 | assert!(datalith.check_image_exist(image_id).await.unwrap()); 28 | 29 | datalith_close(datalith).await; 30 | } 31 | 32 | #[tokio::test] 33 | pub async fn convert_resource_to_image() { 34 | let datalith = datalith_init().await; 35 | 36 | let image_data = IMAGE_DATA.as_ref(); 37 | 38 | let resource = 39 | datalith.put_resource_by_buffer(image_data, Some("image.png"), None).await.unwrap(); 40 | let resource_id = resource.id(); 41 | 42 | let image = datalith.convert_resource_to_image(resource, Some(32), None, None).await.unwrap(); 43 | assert_eq!("image", image.image_stem()); 44 | assert_eq!(32, image.image_width()); 45 | assert_eq!(32, image.image_height()); 46 | assert!(image.has_alpha_channel()); 47 | 48 | let image_id = image.id(); 49 | 50 | assert!(!datalith.check_resource_exist(resource_id).await.unwrap()); 51 | assert!(datalith.check_image_exist(image_id).await.unwrap()); 52 | 53 | datalith_close(datalith).await; 54 | } 55 | -------------------------------------------------------------------------------- /datalith-core/src/image/datalith_image_errors.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | error::Error, 3 | fmt, 4 | fmt::{Display, Formatter}, 5 | io, 6 | }; 7 | 8 | use image_convert::MagickError; 9 | 10 | use crate::{DatalithReadError, DatalithWriteError}; 11 | 12 | /// Errors occurred during Datalith image write operations. 13 | #[derive(Debug)] 14 | pub enum DatalithImageWriteError { 15 | DatalithWriteError(DatalithWriteError), 16 | UnsupportedImageType, 17 | ResolutionTooBig, 18 | MagickError(MagickError), 19 | } 20 | 21 | impl From for DatalithImageWriteError { 22 | #[inline] 23 | fn from(error: DatalithReadError) -> Self { 24 | Self::DatalithWriteError(error.into()) 25 | } 26 | } 27 | 28 | impl From for DatalithImageWriteError { 29 | #[inline] 30 | fn from(error: DatalithWriteError) -> Self { 31 | Self::DatalithWriteError(error) 32 | } 33 | } 34 | 35 | impl From for DatalithImageWriteError { 36 | #[inline] 37 | fn from(error: MagickError) -> Self { 38 | Self::MagickError(error) 39 | } 40 | } 41 | 42 | impl From for DatalithImageWriteError { 43 | #[inline] 44 | fn from(error: io::Error) -> Self { 45 | Self::DatalithWriteError(error.into()) 46 | } 47 | } 48 | 49 | impl From for DatalithImageWriteError { 50 | #[inline] 51 | fn from(error: sqlx::Error) -> Self { 52 | Self::DatalithWriteError(error.into()) 53 | } 54 | } 55 | 56 | impl Display for DatalithImageWriteError { 57 | #[inline] 58 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 59 | match self { 60 | Self::DatalithWriteError(error) => Display::fmt(&error, f), 61 | Self::UnsupportedImageType => f.write_str("supported image type"), 62 | Self::ResolutionTooBig => f.write_str("the image resolution is too big"), 63 | Self::MagickError(error) => Display::fmt(&error, f), 64 | } 65 | } 66 | } 67 | 68 | impl Error for DatalithImageWriteError {} 69 | -------------------------------------------------------------------------------- /datalith/rustfmt.toml: -------------------------------------------------------------------------------- 1 | # array_width = 60 2 | # attr_fn_like_width = 70 3 | binop_separator = "Front" 4 | blank_lines_lower_bound = 0 5 | blank_lines_upper_bound = 1 6 | brace_style = "PreferSameLine" 7 | # chain_width = 60 8 | color = "Auto" 9 | # comment_width = 100 10 | condense_wildcard_suffixes = true 11 | control_brace_style = "AlwaysSameLine" 12 | empty_item_single_line = true 13 | enum_discrim_align_threshold = 80 14 | error_on_line_overflow = false 15 | error_on_unformatted = false 16 | # fn_call_width = 60 17 | fn_params_layout = "Tall" 18 | fn_single_line = false 19 | force_explicit_abi = true 20 | force_multiline_blocks = false 21 | format_code_in_doc_comments = true 22 | doc_comment_code_block_width = 80 23 | format_generated_files = true 24 | format_macro_matchers = true 25 | format_macro_bodies = true 26 | skip_macro_invocations = [] 27 | format_strings = true 28 | hard_tabs = false 29 | hex_literal_case = "Upper" 30 | imports_indent = "Block" 31 | imports_layout = "Mixed" 32 | indent_style = "Block" 33 | inline_attribute_width = 0 34 | match_arm_blocks = true 35 | match_arm_leading_pipes = "Never" 36 | match_block_trailing_comma = true 37 | max_width = 100 38 | merge_derives = true 39 | imports_granularity = "Crate" 40 | newline_style = "Unix" 41 | normalize_comments = false 42 | normalize_doc_attributes = true 43 | overflow_delimited_expr = true 44 | remove_nested_parens = true 45 | reorder_impl_items = true 46 | reorder_imports = true 47 | group_imports = "StdExternalCrate" 48 | reorder_modules = true 49 | short_array_element_width_threshold = 10 50 | # single_line_if_else_max_width = 50 51 | space_after_colon = true 52 | space_before_colon = false 53 | spaces_around_ranges = false 54 | struct_field_align_threshold = 80 55 | struct_lit_single_line = false 56 | # struct_lit_width = 18 57 | # struct_variant_width = 35 58 | tab_spaces = 4 59 | trailing_comma = "Vertical" 60 | trailing_semicolon = true 61 | type_punctuation_density = "Wide" 62 | use_field_init_shorthand = true 63 | use_small_heuristics = "Max" 64 | use_try_shorthand = true 65 | where_single_line = false 66 | wrap_comments = false -------------------------------------------------------------------------------- /datalith-core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "datalith-core" 3 | version = "0.1.0" 4 | authors = ["Magic Len "] 5 | edition = "2021" 6 | rust-version = "1.82" 7 | repository = "https://github.com/magiclen/datalith" 8 | homepage = "https://magiclen.org/datalith" 9 | keywords = ["datalith", "sqlite", "storage", "file", "oss"] 10 | categories = ["filesystem", "database"] 11 | description = "A file management system powered by SQLite for metadata storage and the file system for file storage." 12 | license = "MIT" 13 | include = ["src/**/*", "Cargo.toml", "README.md", "LICENSE"] 14 | 15 | [dependencies] 16 | tokio = { version = "1", features = ["fs", "io-util", "macros"] } 17 | tokio-cron-scheduler = { version = "0.14", optional = true } 18 | 19 | tracing = "0.1" 20 | 21 | sqlx = { version = "0.8", features = [ "runtime-tokio", "sqlite", "chrono", "uuid" ] } 22 | rdb-pagination = { version = "0.3", features = ["sqlite"] } 23 | 24 | num_cpus = "1" 25 | chrono = "0.4" 26 | 27 | educe = { version = "0.5", default-features = false, features = ["Debug", "Default", "PartialEq", "Eq", "Hash"] } 28 | once_cell = { version = "1", optional = true } 29 | 30 | sha2 = "0.10" 31 | mime = "0.3" 32 | mime_guess = "2" 33 | magic = { version = "0.16", optional = true } 34 | 35 | fs4 = { version = "0.13", features = ["tokio"] } 36 | 37 | uuid = { version = "1", features = [ "v4" ] } 38 | rand = "0.9" 39 | trim-in-place = "0.1" 40 | 41 | image-convert = { version = "0.20.1", optional = true } 42 | rc-u8-reader = { version = "2", optional = true } 43 | regex = { version = "1", optional = true } 44 | 45 | [dev-dependencies] 46 | tokio = { version = "1", features = ["macros", "test-util"] } 47 | manifest-dir-macros = "0.1" 48 | lazy-static-include = "3" 49 | slash-formatter = "3" 50 | 51 | [features] 52 | default = ["magic", "image-convert", "manager"] 53 | magic = ["dep:magic", "dep:once_cell"] 54 | image-convert = ["dep:image-convert", "dep:rc-u8-reader", "dep:regex"] 55 | manager = ["dep:tokio-cron-scheduler"] 56 | 57 | [package.metadata.docs.rs] 58 | all-features = true 59 | rustdoc-args = ["--cfg", "docsrs"] -------------------------------------------------------------------------------- /datalith-core/rustfmt.toml: -------------------------------------------------------------------------------- 1 | # array_width = 60 2 | # attr_fn_like_width = 70 3 | binop_separator = "Front" 4 | blank_lines_lower_bound = 0 5 | blank_lines_upper_bound = 1 6 | brace_style = "PreferSameLine" 7 | # chain_width = 60 8 | color = "Auto" 9 | # comment_width = 100 10 | condense_wildcard_suffixes = true 11 | control_brace_style = "AlwaysSameLine" 12 | empty_item_single_line = true 13 | enum_discrim_align_threshold = 80 14 | error_on_line_overflow = false 15 | error_on_unformatted = false 16 | # fn_call_width = 60 17 | fn_params_layout = "Tall" 18 | fn_single_line = false 19 | force_explicit_abi = true 20 | force_multiline_blocks = false 21 | format_code_in_doc_comments = true 22 | doc_comment_code_block_width = 80 23 | format_generated_files = true 24 | format_macro_matchers = true 25 | format_macro_bodies = true 26 | skip_macro_invocations = [] 27 | format_strings = true 28 | hard_tabs = false 29 | hex_literal_case = "Upper" 30 | imports_indent = "Block" 31 | imports_layout = "Mixed" 32 | indent_style = "Block" 33 | inline_attribute_width = 0 34 | match_arm_blocks = true 35 | match_arm_leading_pipes = "Never" 36 | match_block_trailing_comma = true 37 | max_width = 100 38 | merge_derives = true 39 | imports_granularity = "Crate" 40 | newline_style = "Unix" 41 | normalize_comments = false 42 | normalize_doc_attributes = true 43 | overflow_delimited_expr = true 44 | remove_nested_parens = true 45 | reorder_impl_items = true 46 | reorder_imports = true 47 | group_imports = "StdExternalCrate" 48 | reorder_modules = true 49 | short_array_element_width_threshold = 10 50 | # single_line_if_else_max_width = 50 51 | space_after_colon = true 52 | space_before_colon = false 53 | spaces_around_ranges = false 54 | struct_field_align_threshold = 80 55 | struct_lit_single_line = false 56 | # struct_lit_width = 18 57 | # struct_variant_width = 35 58 | tab_spaces = 4 59 | trailing_comma = "Vertical" 60 | trailing_semicolon = true 61 | type_punctuation_density = "Wide" 62 | use_field_init_shorthand = true 63 | use_small_heuristics = "Max" 64 | use_try_shorthand = true 65 | where_single_line = false 66 | wrap_comments = false -------------------------------------------------------------------------------- /datalith-core/src/resources/datalith_resource.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Local, TimeZone}; 2 | use educe::Educe; 3 | use mime::Mime; 4 | use uuid::Uuid; 5 | 6 | use crate::DatalithFile; 7 | 8 | /// A struct that represents an resource. 9 | #[derive(Debug, Educe)] 10 | #[educe(PartialEq, Eq, Hash)] 11 | pub struct DatalithResource { 12 | id: Uuid, 13 | #[educe(Eq(ignore), Hash(ignore))] 14 | created_at: DateTime, 15 | #[educe(Eq(ignore), Hash(ignore))] 16 | file_type: Mime, 17 | #[educe(Eq(ignore), Hash(ignore))] 18 | file_name: String, 19 | #[educe(Eq(ignore), Hash(ignore))] 20 | file: DatalithFile, 21 | #[educe(Eq(ignore), Hash(ignore))] 22 | is_temporary: bool, 23 | } 24 | 25 | impl DatalithResource { 26 | /// Create a resource instance. 27 | #[inline] 28 | pub(crate) fn new( 29 | id: impl Into, 30 | created_at: DateTime, 31 | file_type: Mime, 32 | file_name: impl Into, 33 | file: DatalithFile, 34 | is_temporary: bool, 35 | ) -> Self 36 | where { 37 | let id = id.into(); 38 | let file_name = file_name.into(); 39 | 40 | Self { 41 | id, 42 | created_at: created_at.with_timezone(&Local), 43 | file_type, 44 | file_name, 45 | file, 46 | is_temporary, 47 | } 48 | } 49 | } 50 | 51 | impl DatalithResource { 52 | /// Retrieve the resource ID (UUID). 53 | #[inline] 54 | pub const fn id(&self) -> Uuid { 55 | self.id 56 | } 57 | 58 | /// Retrieve the creation time. 59 | #[inline] 60 | pub const fn created_at(&self) -> DateTime { 61 | self.created_at 62 | } 63 | 64 | /// Retrieve the file type (MIME). 65 | #[inline] 66 | pub const fn file_type(&self) -> &Mime { 67 | &self.file_type 68 | } 69 | 70 | /// Retrieve the file name. 71 | #[inline] 72 | pub const fn file_name(&self) -> &String { 73 | &self.file_name 74 | } 75 | 76 | /// Retrieve the file. 77 | #[inline] 78 | pub const fn file(&self) -> &DatalithFile { 79 | &self.file 80 | } 81 | 82 | /// Check if this resource is temporary. 83 | #[inline] 84 | pub const fn is_temporary(&self) -> bool { 85 | self.is_temporary 86 | } 87 | } 88 | 89 | impl From for DatalithFile { 90 | #[inline] 91 | fn from(value: DatalithResource) -> Self { 92 | value.file 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /datalith-core/tests/clean_up.rs: -------------------------------------------------------------------------------- 1 | mod global; 2 | 3 | use std::time::Duration; 4 | 5 | use datalith_core::PATH_FILE_DIRECTORY; 6 | use global::*; 7 | use tokio::{fs, time}; 8 | 9 | #[tokio::test] 10 | async fn clear_expired_files() { 11 | let datalith = datalith_init().await; 12 | 13 | datalith.set_temporary_file_lifespan(Duration::from_secs(2)); 14 | 15 | let image = IMAGE_DATA.as_ref(); 16 | 17 | { 18 | let file = 19 | datalith.put_file_by_buffer_temporarily(image, Some("image.png"), None).await.unwrap(); 20 | 21 | let file_id = file.id(); 22 | 23 | drop(file); 24 | 25 | assert_eq!(0, datalith.clear_expired_files(Duration::from_millis(500)).await.unwrap()); 26 | 27 | assert!(datalith.check_file_exist(file_id).await.unwrap()); 28 | 29 | time::sleep(Duration::from_secs(2)).await; 30 | 31 | assert_eq!(1, datalith.clear_expired_files(Duration::from_millis(500)).await.unwrap()); 32 | 33 | assert!(!datalith.check_file_exist(file_id).await.unwrap()); 34 | } 35 | 36 | datalith_close(datalith).await; 37 | } 38 | 39 | #[tokio::test] 40 | async fn clear_untracked_files() { 41 | let datalith = datalith_init().await; 42 | 43 | let environment = datalith.get_environment(); 44 | 45 | { 46 | let image = IMAGE_DATA.as_ref(); 47 | 48 | let id_1 = datalith 49 | .put_file_by_buffer_temporarily(image, Some("image.png"), None) 50 | .await 51 | .unwrap() 52 | .id(); 53 | let id_2 = datalith.put_file_by_buffer(image, Some("image.png"), None).await.unwrap().id(); 54 | 55 | assert_eq!(0, datalith.clear_untracked_files().await.unwrap()); 56 | 57 | let file_path_hello = environment.join(PATH_FILE_DIRECTORY).join("hello.txt"); 58 | let file_path_uuid = 59 | environment.join(PATH_FILE_DIRECTORY).join("70b7c850506e4fa98a4a713aca21f594"); 60 | 61 | fs::write(file_path_hello.as_path(), b"Hello world!").await.unwrap(); 62 | fs::write(file_path_uuid.as_path(), b"Hello world!").await.unwrap(); 63 | 64 | assert!(fs::try_exists(file_path_hello.as_path()).await.unwrap()); 65 | assert!(fs::try_exists(file_path_uuid.as_path()).await.unwrap()); 66 | 67 | assert_eq!(2, datalith.clear_untracked_files().await.unwrap()); 68 | 69 | assert!(!fs::try_exists(file_path_hello.as_path()).await.unwrap()); 70 | assert!(!fs::try_exists(file_path_uuid.as_path()).await.unwrap()); 71 | 72 | assert!(datalith.check_file_exist(id_1).await.unwrap()); 73 | assert!(datalith.check_file_exist(id_2).await.unwrap()); 74 | } 75 | 76 | datalith_close(datalith).await; 77 | } 78 | -------------------------------------------------------------------------------- /datalith/README.md: -------------------------------------------------------------------------------- 1 | Datalith Core 2 | ==================== 3 | 4 | A file management system powered by SQLite for metadata storage and the file system for file storage. 5 | 6 | ## Examples 7 | 8 | #### Put a File 9 | 10 | ```rust 11 | use datalith_core::{mime, Datalith, FileTypeLevel}; 12 | use tokio::io::AsyncReadExt; 13 | 14 | let datalith = Datalith::new("datalith").await.unwrap(); 15 | 16 | let file = datalith.put_file_by_buffer(b"Hello world!", "plain.txt", Some((mime::TEXT_PLAIN_UTF_8, FileTypeLevel::Manual))).await.unwrap(); 17 | 18 | let mut reader = file.create_reader().await.unwrap(); 19 | 20 | let mut s = String::new(); 21 | reader.read_to_string(&mut s).await.unwrap(); 22 | 23 | println!("{s}"); // Hello world! 24 | 25 | datalith.close().await; 26 | ``` 27 | 28 | #### Get a File 29 | 30 | ```rust 31 | use std::str::FromStr; 32 | 33 | use datalith_core::{uuid::Uuid, Datalith, FileTypeLevel}; 34 | use tokio::io::AsyncReadExt; 35 | 36 | let datalith = Datalith::new("datalith").await.unwrap(); 37 | 38 | let file = datalith.get_file_by_id(Uuid::from_str("c31343fc-eae1-4416-809a-a6d96b69b3b9").unwrap()).await.unwrap(); 39 | 40 | if let Some(file) = file { 41 | // do something 42 | } else { 43 | println!("not found"); 44 | } 45 | 46 | datalith.close().await; 47 | ``` 48 | 49 | #### Put a Temporary File 50 | 51 | ```rust 52 | use datalith_core::{mime, Datalith, FileTypeLevel}; 53 | 54 | let datalith = Datalith::new("datalith").await.unwrap(); 55 | 56 | let file_id = datalith.put_file_by_buffer_temporarily(b"Hello world!", "plain.txt", Some((mime::TEXT_PLAIN_UTF_8, FileTypeLevel::Manual))).await.unwrap().id(); 57 | let file = datalith.get_file_by_id(file_id).await.unwrap().unwrap(); // A temporary file can be retrieved using the `get_file_by_id` function only once. After that, it cannot be retrieved again. 58 | 59 | // do something 60 | 61 | datalith.close().await; 62 | ``` 63 | 64 | #### Put an Image 65 | 66 | ```rust 67 | use datalith_core::{mime, CenterCrop, Datalith}; 68 | 69 | let datalith = Datalith::new("datalith").await.unwrap(); 70 | 71 | let image = datalith.put_image_by_path("/path/to/image", Some("my-image"), Some(1280), Some(720), CenterCrop::new(16.0, 9.0), true).await.unwrap(); 72 | 73 | println!("image size: {}x{}", image.image_width(), image.image_height()); 74 | 75 | let original_file = image.original_file(); 76 | let thumbnails = image.thumbnails(); // WebP files (1x, 2x, 3x) 77 | let fallback_thumbnails = image.fallback_thumbnails(); // JPEG or PNG files (1x, 2x, 3x) 78 | 79 | // do something 80 | 81 | datalith.close().await; 82 | ``` 83 | 84 | ## Crates.io 85 | 86 | https://crates.io/crates/datalith 87 | 88 | ## Documentation 89 | 90 | https://docs.rs/datalith 91 | 92 | ## License 93 | 94 | [MIT](LICENSE) -------------------------------------------------------------------------------- /datalith-core/src/sql/schema.sql: -------------------------------------------------------------------------------- 1 | -- File Table 2 | CREATE TABLE `files` ( 3 | -- UUID (128-bit) 4 | `id` BLOB NOT NULL PRIMARY KEY, 5 | -- hashed by SHA-256 6 | `hash` BLOB NOT NULL UNIQUE, 7 | -- UNIX timestamp (in milliseconds) 8 | `created_at` INTEGER NOT NULL, 9 | -- in bytes 10 | `file_size` INTEGER NOT NULL, 11 | -- MIME type 12 | `file_type` TEXT NOT NULL, 13 | -- the file name when it was first created 14 | `file_name` TEXT NOT NULL, 15 | -- the number of times this file was created 16 | `count` INTEGER NOT NULL DEFAULT 1, 17 | -- UNIX timestamp (in milliseconds). If this exists, the file is temporary 18 | `expired_at` INTEGER 19 | ); 20 | 21 | CREATE INDEX `files_created_at` ON `files` (`created_at`); 22 | CREATE INDEX `files_expired_at` ON `files` (`expired_at`); 23 | 24 | -- Resource Table 25 | CREATE TABLE `resources` ( 26 | -- UUID (128-bit) 27 | `id` BLOB NOT NULL PRIMARY KEY, 28 | -- UNIX timestamp (in milliseconds) 29 | `created_at` INTEGER NOT NULL, 30 | -- MIME type 31 | `file_type` TEXT NOT NULL, 32 | -- the file name when it was first created 33 | `file_name` TEXT NOT NULL, 34 | -- UUID (128-bit) 35 | `file_id` BLOB NOT NULL, 36 | -- UNIX timestamp (in milliseconds). If this exists, the resource is temporary 37 | `expired_at` INTEGER, 38 | 39 | FOREIGN KEY (`file_id`) REFERENCES `files` (`id`) 40 | ); 41 | 42 | CREATE INDEX `resources_created_at` ON `resources` (`created_at`); 43 | 44 | -- Image Table 45 | CREATE TABLE `images` ( 46 | -- UUID (128-bit) 47 | `id` BLOB NOT NULL PRIMARY KEY, 48 | -- UNIX timestamp (in milliseconds) 49 | `created_at` INTEGER NOT NULL, 50 | -- the file stem of this image 51 | `image_stem` TEXT NOT NULL, 52 | -- the width of 1x image (in pixels) 53 | `image_width` INTEGER NOT NULL, 54 | -- the height of 1x image (in pixels) 55 | `image_height` INTEGER NOT NULL, 56 | -- UUID (128-bit) 57 | `original_file_id` BLOB, 58 | -- boolean 59 | `has_alpha_channel` INTEGER NOT NULL, 60 | 61 | FOREIGN KEY (`original_file_id`) REFERENCES `files` (`id`) 62 | ); 63 | 64 | CREATE INDEX `images_created_at` ON `images` (`created_at`); 65 | 66 | -- Image Thumbnail Table 67 | CREATE TABLE `image_thumbnails` ( 68 | -- UUID (128-bit) 69 | `image_id` BLOB NOT NULL, 70 | `multiplier` INTEGER NOT NULL, 71 | -- boolean 72 | `fallback` INTEGER NOT NULL, 73 | -- UUID (128-bit) 74 | `file_id` BLOB NOT NULL, 75 | 76 | PRIMARY KEY (`image_id`, `multiplier`, `fallback`), 77 | FOREIGN KEY (`image_id`) REFERENCES `images` (`id`), 78 | FOREIGN KEY (`file_id`) REFERENCES `files` (`id`) 79 | ); -------------------------------------------------------------------------------- /datalith-core/src/magic_cookie_pool.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | ops::Deref, 3 | sync::atomic::{AtomicBool, Ordering}, 4 | thread, 5 | time::Duration, 6 | }; 7 | 8 | use magic::{ 9 | cookie::{Flags, Load}, 10 | Cookie, 11 | }; 12 | use tokio::time; 13 | 14 | #[derive(Debug)] 15 | pub(crate) struct MagicCookie<'a> { 16 | using: &'a AtomicBool, 17 | cookie: &'a Cookie, 18 | } 19 | 20 | impl Drop for MagicCookie<'_> { 21 | fn drop(&mut self) { 22 | self.using.swap(false, Ordering::Relaxed); 23 | } 24 | } 25 | 26 | impl Deref for MagicCookie<'_> { 27 | type Target = Cookie; 28 | 29 | #[inline] 30 | fn deref(&self) -> &Self::Target { 31 | self.cookie 32 | } 33 | } 34 | 35 | #[derive(Debug)] 36 | pub(crate) struct MagicCookiePool { 37 | cookies: Vec<(AtomicBool, Cookie)>, 38 | } 39 | 40 | unsafe impl Send for MagicCookiePool {} 41 | unsafe impl Sync for MagicCookiePool {} 42 | 43 | impl MagicCookiePool { 44 | pub(crate) fn new(size: usize) -> Option { 45 | assert!(size > 0); 46 | 47 | let mut cookies = Vec::with_capacity(size); 48 | 49 | for _ in 0..size { 50 | let cookie = match Cookie::open(Flags::MIME_TYPE) { 51 | Ok(cookie) => cookie, 52 | Err(_) => return None, 53 | }; 54 | 55 | let cookie = match cookie.load(&["/usr/share/file/magic.mgc"].try_into().unwrap()) { 56 | Ok(cookie) => cookie, 57 | Err(_) => return None, 58 | }; 59 | 60 | cookies.push((AtomicBool::new(false), cookie)); 61 | } 62 | 63 | Some(Self { 64 | cookies, 65 | }) 66 | } 67 | } 68 | 69 | impl MagicCookiePool { 70 | pub(crate) async fn acquire_cookie(&self) -> MagicCookie { 71 | loop { 72 | for (using, cookie) in self.cookies.iter() { 73 | if using.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_ok() 74 | { 75 | return MagicCookie { 76 | using, 77 | cookie, 78 | }; 79 | } 80 | } 81 | 82 | time::sleep(Duration::from_millis(10)).await; 83 | } 84 | } 85 | 86 | pub(crate) fn acquire_cookie_sync(&self) -> MagicCookie { 87 | loop { 88 | for (using, cookie) in self.cookies.iter() { 89 | if using.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_ok() 90 | { 91 | return MagicCookie { 92 | using, 93 | cookie, 94 | }; 95 | } 96 | } 97 | 98 | thread::sleep(Duration::from_millis(10)); 99 | } 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /datalith/src/rocket_mounts/rocket_utils/content_length.rs: -------------------------------------------------------------------------------- 1 | use std::num::ParseIntError; 2 | 3 | use rocket::{http::Status, outcome::Outcome, request, request::FromRequest, Request}; 4 | 5 | #[derive(Debug, Clone, Default)] 6 | pub struct FileLength(u64); 7 | 8 | impl FileLength { 9 | #[inline] 10 | pub const fn to_u64(&self) -> u64 { 11 | self.0 12 | } 13 | } 14 | 15 | #[rocket::async_trait] 16 | impl<'r> FromRequest<'r> for FileLength { 17 | type Error = ParseIntError; 18 | 19 | async fn from_request(request: &'r Request<'_>) -> request::Outcome { 20 | let content_length: Option<&str> = request.headers().get("content-length").next(); // Only fetch the first one. 21 | 22 | if let Some(content_length) = content_length { 23 | match content_length.parse::() { 24 | Ok(content_length) => Outcome::Success(Self(content_length)), 25 | Err(error) => Outcome::Error((Status::BadRequest, error)), 26 | } 27 | } else { 28 | let file_length: Option<&str> = request.headers().get("x-file-length").next(); // Only fetch the first one. 29 | 30 | if let Some(file_length) = file_length { 31 | match file_length.parse::() { 32 | Ok(file_length) => Outcome::Success(Self(file_length)), 33 | Err(error) => Outcome::Error((Status::BadRequest, error)), 34 | } 35 | } else { 36 | Outcome::Forward(Status::NotFound) 37 | } 38 | } 39 | } 40 | } 41 | 42 | #[rocket::async_trait] 43 | impl<'r> FromRequest<'r> for &'r FileLength { 44 | type Error = ParseIntError; 45 | 46 | async fn from_request(request: &'r Request<'_>) -> request::Outcome { 47 | let content_length: Option<&str> = request.headers().get("content-length").next(); // Only fetch the first one. 48 | 49 | if let Some(content_length) = content_length { 50 | match content_length.parse::() { 51 | Ok(content_length) => { 52 | Outcome::Success(request.local_cache(|| FileLength(content_length))) 53 | }, 54 | Err(error) => Outcome::Error((Status::BadRequest, error)), 55 | } 56 | } else { 57 | let file_length: Option<&str> = request.headers().get("x-file-length").next(); // Only fetch the first one. 58 | 59 | if let Some(file_length) = file_length { 60 | match file_length.parse::() { 61 | Ok(file_length) => { 62 | Outcome::Success(request.local_cache(|| FileLength(file_length))) 63 | }, 64 | Err(error) => Outcome::Error((Status::BadRequest, error)), 65 | } 66 | } else { 67 | Outcome::Forward(Status::NotFound) 68 | } 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /.github/workflows/ci-version.yml: -------------------------------------------------------------------------------- 1 | name: CI-version 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" 7 | 8 | env: 9 | CARGO_TERM_COLOR: always 10 | 11 | jobs: 12 | tests: 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | os: 17 | - ubuntu-latest 18 | # - macos-latest 19 | toolchain: 20 | - stable 21 | - nightly 22 | features: 23 | - 24 | - --no-default-features 25 | name: Test ${{ matrix.toolchain }} on ${{ matrix.os }} (${{ matrix.features }}) 26 | runs-on: ${{ matrix.os }} 27 | steps: 28 | - name: Install librsvg2 libwebp libmagic (Linux) 29 | run: | 30 | sudo apt update 31 | sudo apt install librsvg2-dev libwebp-dev libmagic-dev 32 | if: runner.os == 'Linux' 33 | - name: Install libwebp (macOS) 34 | run: | 35 | brew update 36 | brew list webp || brew install webp 37 | if: runner.os == 'macOS' 38 | - name: Install ImageMagick 39 | run: | 40 | wget https://imagemagick.org/archive/ImageMagick.tar.gz 41 | tar xf ImageMagick.tar.gz 42 | cd ImageMagick-* 43 | ./configure --enable-hdri --with-rsvg 44 | make -j$(nproc) 45 | sudo make install 46 | - run: sudo ldconfig 47 | if: runner.os == 'Linux' 48 | - uses: actions/checkout@v4 49 | - uses: actions-rust-lang/setup-rust-toolchain@v1 50 | with: 51 | toolchain: ${{ matrix.toolchain }} 52 | - run: cargo test --release ${{ matrix.features }} 53 | - run: cargo doc --release ${{ matrix.features }} 54 | 55 | MSRV: 56 | strategy: 57 | fail-fast: false 58 | matrix: 59 | os: 60 | - ubuntu-latest 61 | # - macos-latest 62 | toolchain: 63 | - 1.83 64 | features: 65 | - 66 | - --no-default-features 67 | name: Test ${{ matrix.toolchain }} on ${{ matrix.os }} (${{ matrix.features }}) 68 | runs-on: ${{ matrix.os }} 69 | steps: 70 | - name: Install librsvg2 libwebp libmagic (Linux) 71 | run: | 72 | sudo apt update 73 | sudo apt install librsvg2-dev libwebp-dev libmagic-dev 74 | if: runner.os == 'Linux' 75 | - name: Install libwebp (macOS) 76 | run: | 77 | brew update 78 | brew list webp || brew install webp 79 | if: runner.os == 'macOS' 80 | - name: Install ImageMagick 81 | run: | 82 | wget https://imagemagick.org/archive/ImageMagick.tar.gz 83 | tar xf ImageMagick.tar.gz 84 | cd ImageMagick-* 85 | ./configure --enable-hdri --with-rsvg 86 | make -j$(nproc) 87 | sudo make install 88 | - run: sudo ldconfig 89 | if: runner.os == 'Linux' 90 | - uses: actions/checkout@v4 91 | - uses: actions-rust-lang/setup-rust-toolchain@v1 92 | with: 93 | toolchain: ${{ matrix.toolchain }} 94 | - run: cargo test --release --lib --bins ${{ matrix.features }} -------------------------------------------------------------------------------- /Dockerfile.image: -------------------------------------------------------------------------------- 1 | ARG imagemagick_ver=7.1.1-36 2 | 3 | FROM rust AS builder 4 | 5 | RUN apt update && apt install -y libmagic-dev 6 | 7 | RUN apt install -y curl build-essential automake autopoint libtool intltool pkg-config cmake clang 8 | 9 | RUN mkdir /imagemagick_build 10 | 11 | RUN apt install -y \ 12 | libbz2-dev \ 13 | libdjvulibre-dev \ 14 | libfftw3-dev \ 15 | libfontconfig1-dev \ 16 | libfreetype-dev \ 17 | libgs-dev \ 18 | libgraphviz-dev \ 19 | libheif-dev \ 20 | libjbig-dev \ 21 | libjpeg-dev \ 22 | liblcms2-dev \ 23 | liblqr-1-0-dev \ 24 | liblzma-dev \ 25 | libopenexr-dev \ 26 | libopenjp2-7-dev \ 27 | libpango1.0-dev \ 28 | libpng-dev \ 29 | libraqm-dev \ 30 | libraw-dev \ 31 | librsvg2-dev \ 32 | libtiff-dev \ 33 | libwebp-dev \ 34 | libwmf-dev \ 35 | libxml2-dev \ 36 | libzip-dev \ 37 | zlib1g-dev \ 38 | libzstd-dev \ 39 | ghostscript 40 | 41 | WORKDIR /build/ImageMagick 42 | 43 | ARG imagemagick_ver 44 | RUN curl -fL https://github.com/ImageMagick/ImageMagick/archive/refs/tags/${imagemagick_ver}.tar.gz -o imagemagick.tar.gz && tar xzf imagemagick.tar.gz 45 | 46 | WORKDIR /build/ImageMagick/ImageMagick-${imagemagick_ver} 47 | 48 | COPY patches patches 49 | 50 | RUN patch config/config.h.in < patches/config.patch 51 | RUN patch configure < patches/configure.patch 52 | 53 | RUN PKG_CONFIG_PATH="/imagemagick_build/lib/pkgconfig" ./configure \ 54 | --prefix="/imagemagick_build" \ 55 | --disable-static \ 56 | --disable-installed \ 57 | --disable-docs \ 58 | --without-utilities \ 59 | --enable-shared \ 60 | --enable-zero-configuration \ 61 | --enable-hdri \ 62 | --with-fftw \ 63 | --with-gslib \ 64 | --with-rsvg \ 65 | --with-wmf 66 | 67 | RUN make -j$(nproc) 68 | 69 | RUN make install 70 | 71 | RUN cp -r -f /imagemagick_build/include /usr/local && cp -r -f /imagemagick_build/lib /usr/local && ldconfig 72 | 73 | WORKDIR /build 74 | 75 | COPY . . 76 | 77 | RUN cargo build --release 78 | 79 | 80 | FROM debian:bookworm-slim 81 | 82 | RUN adduser --disabled-password \ 83 | --gecos "" \ 84 | --no-create-home \ 85 | user 86 | 87 | WORKDIR /app 88 | 89 | RUN chown user:user /app 90 | 91 | RUN apt update && apt install -y libmagic1 92 | 93 | RUN apt install -y \ 94 | ghostscript \ 95 | libjbig0 \ 96 | libtiff6 \ 97 | libraqm0 \ 98 | libjpeg62-turbo \ 99 | libgs10 \ 100 | liblqr-1-0 \ 101 | libpng16-16 \ 102 | libpstoedit0c2a \ 103 | libdjvulibre21 \ 104 | libfftw3-bin \ 105 | libfontconfig1 \ 106 | libfreetype6 \ 107 | libheif1 \ 108 | libwebpmux3 \ 109 | libwebpdemux2 \ 110 | libwebp7 \ 111 | libwmflite-0.2-7 \ 112 | libxext6 \ 113 | liblzma5 \ 114 | libbz2-1.0 \ 115 | libopenexr-3-1-30 \ 116 | libopenjp2-7 \ 117 | libpangocairo-1.0-0 \ 118 | libpango-1.0-0 \ 119 | libraw20 \ 120 | liblcms2-2 \ 121 | librsvg2-2 \ 122 | libglib2.0-0 \ 123 | libcairo2 \ 124 | libxml2 \ 125 | libgvc6 \ 126 | libcgraph6 \ 127 | libzip4 \ 128 | libgomp1 129 | 130 | RUN rm -rf /var/lib/apt/lists/* 131 | 132 | COPY --from=builder /imagemagick_build/lib /usr/local/lib/ 133 | 134 | RUN ldconfig 135 | 136 | USER user 137 | 138 | COPY --chown=user:user --from=builder /build/target/release/datalith /app/ 139 | 140 | ENTRYPOINT ["/app/datalith"] -------------------------------------------------------------------------------- /datalith-core/src/manager/datalith_manager.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fmt, 3 | fmt::{Debug, Formatter}, 4 | ops::Deref, 5 | time::Duration, 6 | }; 7 | 8 | use chrono::Local; 9 | use tokio_cron_scheduler::{Job, JobScheduler}; 10 | 11 | use crate::{Datalith, DatalithManagerError}; 12 | 13 | /// The Datalith file storage center manager. 14 | #[derive(Clone)] 15 | pub struct DatalithManager { 16 | datalith: Datalith, 17 | scheduler: JobScheduler, 18 | } 19 | 20 | impl DatalithManager { 21 | pub async fn new(datalith: Datalith) -> Result { 22 | let scheduler = JobScheduler::new().await?; 23 | 24 | { 25 | let datalith = datalith.clone(); 26 | 27 | scheduler 28 | .add(Job::new_repeated_async(Duration::from_secs(60), move |_uuid, _l| { 29 | let datalith = datalith.clone(); 30 | 31 | Box::pin(async move { 32 | match datalith.clear_expired_files(Duration::from_secs(3)).await { 33 | Ok(count) => match count { 34 | 0 => tracing::debug!("no expired file needs to be deleted"), 35 | 1 => tracing::info!("one expired file has been deleted"), 36 | _ => tracing::info!("{count} expired files have been deleted"), 37 | }, 38 | Err(error) => { 39 | tracing::warn!("{error}"); 40 | }, 41 | } 42 | }) 43 | })?) 44 | .await?; 45 | } 46 | 47 | { 48 | let datalith = datalith.clone(); 49 | 50 | scheduler 51 | .add(Job::new_async_tz("0 0 */4 * * *", Local, move |_uuid, _l| { 52 | let datalith = datalith.clone(); 53 | 54 | Box::pin(async move { 55 | match datalith.clear_untracked_files().await { 56 | Ok(count) => match count { 57 | 0 => tracing::debug!("no untracked file needs to be deleted"), 58 | 1 => tracing::info!("one untracked file has been deleted"), 59 | _ => tracing::info!("{count} untracked files haves been deleted"), 60 | }, 61 | Err(error) => { 62 | tracing::warn!("{error}"); 63 | }, 64 | } 65 | }) 66 | })?) 67 | .await?; 68 | } 69 | 70 | scheduler.start().await?; 71 | 72 | Ok(Self { 73 | datalith, 74 | scheduler, 75 | }) 76 | } 77 | 78 | #[inline] 79 | pub async fn close(mut self) -> Result<(), DatalithManagerError> { 80 | self.scheduler.shutdown().await?; 81 | 82 | self.datalith.close().await; 83 | 84 | Ok(()) 85 | } 86 | } 87 | 88 | impl Debug for DatalithManager { 89 | #[inline] 90 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 91 | Debug::fmt(&self.datalith, f) 92 | } 93 | } 94 | 95 | impl Deref for DatalithManager { 96 | type Target = Datalith; 97 | 98 | #[inline] 99 | fn deref(&self) -> &Self::Target { 100 | &self.datalith 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | datalith.sqlite 2 | datalith.files 3 | datalith.temp 4 | 5 | ### Intellij+all ### 6 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 7 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 8 | 9 | # User-specific stuff 10 | .idea/**/workspace.xml 11 | .idea/**/tasks.xml 12 | .idea/**/usage.statistics.xml 13 | .idea/**/dictionaries 14 | .idea/**/shelf 15 | 16 | # AWS User-specific 17 | .idea/**/aws.xml 18 | 19 | # Generated files 20 | .idea/**/contentModel.xml 21 | 22 | # Sensitive or high-churn files 23 | .idea/**/dataSources/ 24 | .idea/**/dataSources.ids 25 | .idea/**/dataSources.local.xml 26 | .idea/**/sqlDataSources.xml 27 | .idea/**/dynamic.xml 28 | .idea/**/uiDesigner.xml 29 | .idea/**/dbnavigator.xml 30 | 31 | # Gradle 32 | .idea/**/gradle.xml 33 | .idea/**/libraries 34 | 35 | # Gradle and Maven with auto-import 36 | # When using Gradle or Maven with auto-import, you should exclude module files, 37 | # since they will be recreated, and may cause churn. Uncomment if using 38 | # auto-import. 39 | # .idea/artifacts 40 | # .idea/compiler.xml 41 | # .idea/jarRepositories.xml 42 | # .idea/modules.xml 43 | # .idea/*.iml 44 | # .idea/modules 45 | # *.iml 46 | # *.ipr 47 | 48 | # CMake 49 | cmake-build-*/ 50 | 51 | # Mongo Explorer plugin 52 | .idea/**/mongoSettings.xml 53 | 54 | # File-based project format 55 | *.iws 56 | 57 | # IntelliJ 58 | out/ 59 | 60 | # mpeltonen/sbt-idea plugin 61 | .idea_modules/ 62 | 63 | # JIRA plugin 64 | atlassian-ide-plugin.xml 65 | 66 | # Cursive Clojure plugin 67 | .idea/replstate.xml 68 | 69 | # SonarLint plugin 70 | .idea/sonarlint/ 71 | 72 | # Crashlytics plugin (for Android Studio and IntelliJ) 73 | com_crashlytics_export_strings.xml 74 | crashlytics.properties 75 | crashlytics-build.properties 76 | fabric.properties 77 | 78 | # Editor-based Rest Client 79 | .idea/httpRequests 80 | 81 | # Android studio 3.1+ serialized cache file 82 | .idea/caches/build_file_checksums.ser 83 | 84 | ### Intellij+all Patch ### 85 | # Ignore everything but code style settings and run configurations 86 | # that are supposed to be shared within teams. 87 | 88 | .idea/* 89 | 90 | !.idea/codeStyles 91 | !.idea/runConfigurations 92 | 93 | ### Rust ### 94 | # Generated by Cargo 95 | # will have compiled files and executables 96 | debug/ 97 | target/ 98 | 99 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 100 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 101 | Cargo.lock 102 | 103 | # These are backup files generated by rustfmt 104 | **/*.rs.bk 105 | 106 | # MSVC Windows builds of rustc generate these, which store debugging information 107 | *.pdb 108 | 109 | ### Vim ### 110 | # Swap 111 | [._]*.s[a-v][a-z] 112 | !*.svg # comment out if you don't need vector files 113 | [._]*.sw[a-p] 114 | [._]s[a-rt-v][a-z] 115 | [._]ss[a-gi-z] 116 | [._]sw[a-p] 117 | 118 | # Session 119 | Session.vim 120 | Sessionx.vim 121 | 122 | # Temporary 123 | .netrwhist 124 | *~ 125 | # Auto-generated tag files 126 | tags 127 | # Persistent undo 128 | [._]*.un~ 129 | 130 | ### VisualStudioCode ### 131 | .vscode/* 132 | !.vscode/settings.json 133 | !.vscode/tasks.json 134 | !.vscode/launch.json 135 | !.vscode/extensions.json 136 | !.vscode/*.code-snippets 137 | 138 | # Local History for Visual Studio Code 139 | .history/ 140 | 141 | # Built Visual Studio Code Extensions 142 | *.vsix 143 | 144 | ### VisualStudioCode Patch ### 145 | # Ignore all local history of files 146 | .history 147 | .ionide -------------------------------------------------------------------------------- /datalith/src/cli.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | net::{AddrParseError, IpAddr}, 3 | num::ParseIntError, 4 | path::PathBuf, 5 | str::FromStr, 6 | time::Duration, 7 | }; 8 | 9 | use byte_unit::Byte; 10 | use clap::{CommandFactory, FromArgMatches, Parser}; 11 | use concat_with::concat_line; 12 | use terminal_size::terminal_size; 13 | 14 | const APP_NAME: &str = "Datalith"; 15 | const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); 16 | const CARGO_PKG_AUTHORS: &str = env!("CARGO_PKG_AUTHORS"); 17 | 18 | const APP_ABOUT: &str = concat!( 19 | "\nExamples:\n", 20 | concat_line!(prefix "datalith ", 21 | " # Start the service using the current working directory as the root of the environment", 22 | "--environment ./db # Start the service using `./db` as the root of the environment", 23 | ) 24 | ); 25 | 26 | #[derive(Debug, Parser)] 27 | #[command(name = APP_NAME)] 28 | #[command(term_width = terminal_size().map(|(width, _)| width.0 as usize).unwrap_or(0))] 29 | #[command(version = CARGO_PKG_VERSION)] 30 | #[command(author = CARGO_PKG_AUTHORS)] 31 | pub struct CLIArgs { 32 | #[arg(long, visible_alias = "addr", env = "DATALITH_ADDRESS")] 33 | #[cfg_attr(debug_assertions, arg(default_value = "127.0.0.1"))] 34 | #[cfg_attr(not(debug_assertions), arg(default_value = "0.0.0.0"))] 35 | #[arg(value_parser = parse_ip_addr)] 36 | #[arg(help = "Assign the address that Datalith binds")] 37 | pub address: IpAddr, 38 | 39 | #[arg(long, env = "DATALITH_LISTEN_PORT")] 40 | #[arg(default_value = "1111")] 41 | #[arg(help = "Assign a TCP port for the HTTP service")] 42 | pub listen_port: u16, 43 | 44 | #[arg(long, env = "DATALITH_ENVIRONMENT")] 45 | #[arg(value_hint = clap::ValueHint::DirPath)] 46 | #[arg(default_value = ".")] 47 | #[arg(help = "Assign the root path of the environment. This should be a directory path")] 48 | pub environment: PathBuf, 49 | 50 | #[arg(long, env = "DATALITH_MAX_FILE_SIZE")] 51 | #[arg(default_value = "2 GiB")] 52 | #[arg(help = "Assign the maximum file size (in bytes) for each of the uploaded files")] 53 | pub max_file_size: Byte, 54 | 55 | #[arg(long, env = "DATALITH_TEMPORARY_FILE_LIFESPAN")] 56 | #[arg(default_value = "60")] 57 | #[arg(help = "Assign the lifespan (in seconds) for each of the uploaded temporary files")] 58 | #[arg(long_help = "Assign the lifespan (in seconds) for each of the uploaded temporary \ 59 | files. The lifespan ranges from 1 second to 10,000 hours")] 60 | #[arg(value_parser = parse_duration_sec)] 61 | pub temporary_file_lifespan: Duration, 62 | 63 | #[cfg(feature = "image-convert")] 64 | #[arg(long, env = "DATALITH_MAX_IMAGE_RESOLUTION")] 65 | #[arg(default_value = "50000000")] 66 | #[arg(help = "Assign the maximum resolution (in pixels) for each of the uploaded images")] 67 | pub max_image_resolution: u32, 68 | 69 | #[cfg(feature = "image-convert")] 70 | #[arg(long, env = "DATALITH_MAX_IMAGE_RESOLUTION_MULTIPLIER")] 71 | #[arg(default_value = "3")] 72 | #[arg(help = "Assign the maximum image resolution multiplier for each of the uploaded images")] 73 | pub max_image_resolution_multiplier: u8, 74 | } 75 | 76 | #[inline] 77 | fn parse_ip_addr(arg: &str) -> Result { 78 | IpAddr::from_str(arg) 79 | } 80 | 81 | #[inline] 82 | fn parse_duration_sec(arg: &str) -> Result { 83 | Ok(Duration::from_secs(arg.parse()?)) 84 | } 85 | 86 | pub fn get_args() -> CLIArgs { 87 | let args = CLIArgs::command(); 88 | 89 | let about = format!("{APP_NAME} {CARGO_PKG_VERSION}\n{CARGO_PKG_AUTHORS}\n{APP_ABOUT}"); 90 | 91 | let args = args.about(about); 92 | 93 | let matches = args.get_matches(); 94 | 95 | match CLIArgs::from_arg_matches(&matches) { 96 | Ok(args) => args, 97 | Err(err) => { 98 | err.exit(); 99 | }, 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | CARGO_TERM_COLOR: always 7 | 8 | jobs: 9 | rustfmt: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: actions-rust-lang/setup-rust-toolchain@v1 14 | with: 15 | toolchain: nightly 16 | components: rustfmt 17 | - uses: actions-rust-lang/rustfmt@v1 18 | 19 | clippy: 20 | runs-on: ubuntu-latest 21 | steps: 22 | - name: Install ImageMagick 23 | run: | 24 | sudo apt update 25 | sudo apt install libwebp-dev 26 | wget https://imagemagick.org/archive/ImageMagick.tar.gz 27 | tar xf ImageMagick.tar.gz 28 | cd ImageMagick-* 29 | ./configure --enable-hdri --with-webp 30 | make -j$(nproc) 31 | sudo make install 32 | - run: sudo ldconfig 33 | if: runner.os == 'Linux' 34 | - uses: actions/checkout@v4 35 | - uses: actions-rust-lang/setup-rust-toolchain@v1 36 | with: 37 | components: clippy 38 | - run: cargo clippy --all-targets --all-features -- -D warnings 39 | 40 | tests: 41 | strategy: 42 | fail-fast: false 43 | matrix: 44 | os: 45 | - ubuntu-latest 46 | # - macos-latest 47 | toolchain: 48 | - stable 49 | - nightly 50 | features: 51 | - 52 | - --no-default-features 53 | name: Test ${{ matrix.toolchain }} on ${{ matrix.os }} (${{ matrix.features }}) 54 | runs-on: ${{ matrix.os }} 55 | steps: 56 | - name: Install librsvg2 libwebp libmagic (Linux) 57 | run: | 58 | sudo apt update 59 | sudo apt install librsvg2-dev libwebp-dev libmagic-dev 60 | if: runner.os == 'Linux' 61 | - name: Install libwebp (macOS) 62 | run: | 63 | brew update 64 | brew list webp || brew install webp 65 | if: runner.os == 'macOS' 66 | - name: Install ImageMagick 67 | run: | 68 | wget https://imagemagick.org/archive/ImageMagick.tar.gz 69 | tar xf ImageMagick.tar.gz 70 | cd ImageMagick-* 71 | ./configure --enable-hdri --with-rsvg 72 | make -j$(nproc) 73 | sudo make install 74 | - run: sudo ldconfig 75 | if: runner.os == 'Linux' 76 | - uses: actions/checkout@v4 77 | - uses: actions-rust-lang/setup-rust-toolchain@v1 78 | with: 79 | toolchain: ${{ matrix.toolchain }} 80 | - run: cargo test ${{ matrix.features }} 81 | - run: cargo doc ${{ matrix.features }} 82 | 83 | MSRV: 84 | strategy: 85 | fail-fast: false 86 | matrix: 87 | os: 88 | - ubuntu-latest 89 | # - macos-latest 90 | toolchain: 91 | - 1.83 92 | features: 93 | - 94 | - --no-default-features 95 | name: Test ${{ matrix.toolchain }} on ${{ matrix.os }} (${{ matrix.features }}) 96 | runs-on: ${{ matrix.os }} 97 | steps: 98 | - name: Install librsvg2 libwebp libmagic (Linux) 99 | run: | 100 | sudo apt update 101 | sudo apt install librsvg2-dev libwebp-dev libmagic-dev 102 | if: runner.os == 'Linux' 103 | - name: Install libwebp (macOS) 104 | run: | 105 | brew update 106 | brew list webp || brew install webp 107 | if: runner.os == 'macOS' 108 | - name: Install ImageMagick 109 | run: | 110 | wget https://imagemagick.org/archive/ImageMagick.tar.gz 111 | tar xf ImageMagick.tar.gz 112 | cd ImageMagick-* 113 | ./configure --enable-hdri --with-rsvg 114 | make -j$(nproc) 115 | sudo make install 116 | - run: sudo ldconfig 117 | if: runner.os == 'Linux' 118 | - uses: actions/checkout@v4 119 | - uses: actions-rust-lang/setup-rust-toolchain@v1 120 | with: 121 | toolchain: ${{ matrix.toolchain }} 122 | - run: cargo test --lib --bins ${{ matrix.features }} -------------------------------------------------------------------------------- /datalith-core/src/image/datalith_image.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Local, TimeZone}; 2 | use educe::Educe; 3 | use uuid::Uuid; 4 | 5 | use crate::DatalithFile; 6 | 7 | /// A struct that represents an image. 8 | #[derive(Debug, Educe)] 9 | #[educe(PartialEq, Eq, Hash)] 10 | pub struct DatalithImage { 11 | id: Uuid, 12 | #[educe(Eq(ignore), Hash(ignore))] 13 | created_at: DateTime, 14 | #[educe(Eq(ignore), Hash(ignore))] 15 | image_stem: String, 16 | #[educe(Eq(ignore), Hash(ignore))] 17 | image_width: u16, 18 | #[educe(Eq(ignore), Hash(ignore))] 19 | image_height: u16, 20 | #[educe(Eq(ignore), Hash(ignore))] 21 | original_file: Option, 22 | #[educe(Eq(ignore), Hash(ignore))] 23 | thumbnails: Vec, 24 | #[educe(Eq(ignore), Hash(ignore))] 25 | fallback_thumbnails: Vec, 26 | #[educe(Eq(ignore), Hash(ignore))] 27 | has_alpha_channel: bool, 28 | } 29 | 30 | impl DatalithImage { 31 | #[allow(clippy::too_many_arguments)] 32 | /// Create an image instance. 33 | #[inline] 34 | pub(crate) fn new( 35 | id: impl Into, 36 | created_at: DateTime, 37 | image_stem: impl Into, 38 | image_width: u16, 39 | image_height: u16, 40 | original_file: Option, 41 | thumbnails: Vec, 42 | fallback_thumbnails: Vec, 43 | has_alpha_channel: bool, 44 | ) -> Self 45 | where { 46 | let id = id.into(); 47 | let image_stem = image_stem.into(); 48 | 49 | Self { 50 | id, 51 | created_at: created_at.with_timezone(&Local), 52 | image_stem, 53 | image_width, 54 | image_height, 55 | original_file, 56 | thumbnails, 57 | fallback_thumbnails, 58 | has_alpha_channel, 59 | } 60 | } 61 | } 62 | 63 | impl DatalithImage { 64 | /// Retrieve the image ID (UUID). 65 | #[inline] 66 | pub const fn id(&self) -> Uuid { 67 | self.id 68 | } 69 | 70 | /// Retrieve the creation time. 71 | #[inline] 72 | pub const fn created_at(&self) -> DateTime { 73 | self.created_at 74 | } 75 | 76 | /// Retrieve the file stem of this image. 77 | #[inline] 78 | pub const fn image_stem(&self) -> &String { 79 | &self.image_stem 80 | } 81 | 82 | /// Retrieve the width of the 1x image. 83 | #[inline] 84 | pub const fn image_width(&self) -> u16 { 85 | self.image_width 86 | } 87 | 88 | /// Retrieve the height of the 1x image. 89 | #[inline] 90 | pub const fn image_height(&self) -> u16 { 91 | self.image_height 92 | } 93 | 94 | /// Retrieve the original file. 95 | #[inline] 96 | pub const fn original_file(&self) -> Option<&DatalithFile> { 97 | self.original_file.as_ref() 98 | } 99 | 100 | /// Retrieve the thumbnails. (WebP) 101 | #[inline] 102 | pub const fn thumbnails(&self) -> &Vec { 103 | &self.thumbnails 104 | } 105 | 106 | /// Retrieve the fallback thumbnails. (PNG or JPEG) 107 | #[inline] 108 | pub const fn fallback_thumbnails(&self) -> &Vec { 109 | &self.fallback_thumbnails 110 | } 111 | 112 | /// Check if the image has transparency. If it does, the fallback thumbnails are in PNG format; otherwise, they are in JPEG format. 113 | #[inline] 114 | pub const fn has_alpha_channel(&self) -> bool { 115 | self.has_alpha_channel 116 | } 117 | } 118 | 119 | impl DatalithImage { 120 | /// Convert to the original file. 121 | #[inline] 122 | pub fn into_original_file(self) -> Option { 123 | self.original_file 124 | } 125 | 126 | /// Convert to the thumbnails. (WebP) 127 | #[inline] 128 | pub fn into_thumbnails(self) -> Vec { 129 | self.thumbnails 130 | } 131 | 132 | /// Convert to the fallback thumbnails. (PNG or JPEG) 133 | #[inline] 134 | pub fn into_fallback_thumbnails(self) -> Vec { 135 | self.fallback_thumbnails 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /datalith/src/rocket_mounts/rocket_utils/datalith_response.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use datalith_core::{ 4 | chrono::{DateTime, Local}, 5 | mime::Mime, 6 | Datalith, DatalithFile, DatalithReadError, ReadableDatalithFile, Uuid, 7 | }; 8 | use rocket::{http::Status, response, response::Responder, Request, Response}; 9 | use rocket_etag_if_none_match::{entity_tag::EntityTag, EtagIfNoneMatch}; 10 | 11 | #[derive(Debug)] 12 | pub struct ResponseData { 13 | pub etag: EntityTag<'static>, 14 | pub file: ReadableDatalithFile, 15 | pub download: bool, 16 | pub uuid: Uuid, 17 | pub date: DateTime, 18 | pub file_name: String, 19 | pub file_type: Mime, 20 | pub extra_headers: HashMap<&'static str, String>, 21 | pub is_temporary: bool, 22 | } 23 | 24 | #[derive(Debug)] 25 | pub struct DatalithResponse { 26 | pub(super) data: Option, 27 | } 28 | 29 | impl DatalithResponse { 30 | #[inline] 31 | pub const fn is_temporary(&self) -> bool { 32 | if let Some(data) = self.data.as_ref() { 33 | data.is_temporary 34 | } else { 35 | false 36 | } 37 | } 38 | } 39 | 40 | impl DatalithResponse { 41 | pub async fn from_resource_id<'a>( 42 | datalith: &'a Datalith, 43 | etag_if_none_match: &EtagIfNoneMatch<'a>, 44 | id: Uuid, 45 | download: bool, 46 | ) -> Result, DatalithReadError> { 47 | let etag = EntityTag::with_string(true, format!("{:x}", id.as_u128())).unwrap(); 48 | 49 | let is_etag_match = etag_if_none_match.weak_eq(&etag); 50 | 51 | if is_etag_match { 52 | Ok(Some(DatalithResponse { 53 | data: None 54 | })) 55 | } else { 56 | let resource = datalith.get_resource_by_id(id).await?; 57 | 58 | match resource { 59 | Some(resource) => { 60 | let uuid = resource.id(); 61 | let date = resource.created_at(); 62 | 63 | let file_name = resource.file_name().clone(); 64 | let file_type = resource.file_type().clone(); 65 | let is_temporary = resource.is_temporary(); 66 | 67 | Ok(Some(Self { 68 | data: Some(ResponseData { 69 | etag, 70 | file: DatalithFile::from(resource).into_readable().await?, 71 | download, 72 | uuid, 73 | date, 74 | file_name, 75 | file_type, 76 | extra_headers: HashMap::new(), 77 | is_temporary, 78 | }), 79 | })) 80 | }, 81 | None => Ok(None), 82 | } 83 | } 84 | } 85 | } 86 | 87 | impl<'r, 'o: 'r> Responder<'r, 'o> for DatalithResponse { 88 | fn respond_to(self, _: &'r Request<'_>) -> response::Result<'o> { 89 | let mut response = Response::build(); 90 | 91 | if let Some(data) = self.data { 92 | if !data.file.is_temporary() { 93 | response.raw_header("etag", data.etag.to_string()); 94 | } 95 | 96 | { 97 | let mut v = format!( 98 | "{}; filename*=UTF-8''", 99 | if data.download { "attachment" } else { "inline" } 100 | ); 101 | 102 | url_escape::encode_component_to_string(data.file_name, &mut v); 103 | 104 | response.raw_header("content-disposition", v); 105 | } 106 | 107 | response.raw_header("x-uuid", data.uuid.to_string()); 108 | response.raw_header("date", data.date.to_rfc2822()); 109 | response.raw_header("content-type", data.file_type.to_string()); 110 | 111 | for (name, value) in data.extra_headers { 112 | response.raw_header(name, value); 113 | } 114 | 115 | response.sized_body(data.file.file_size().try_into().ok(), data.file); 116 | } else { 117 | response.status(Status::NotModified); 118 | } 119 | 120 | response.ok() 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /datalith-core/src/datalith_errors.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | error::Error, 3 | fmt::{self, Display, Formatter}, 4 | io, 5 | }; 6 | 7 | use mime::Mime; 8 | 9 | /// Errors occurred during Datalith creation. 10 | #[derive(Debug)] 11 | pub enum DatalithCreateError { 12 | IOError(io::Error), 13 | SQLError(sqlx::Error), 14 | DatabaseTooNewError { app_db_version: u32, current_db_version: u32 }, 15 | DatabaseTooOldError { app_db_version: u32, current_db_version: u32 }, 16 | AlreadyRun, 17 | } 18 | 19 | impl From for DatalithCreateError { 20 | #[inline] 21 | fn from(error: io::Error) -> Self { 22 | Self::IOError(error) 23 | } 24 | } 25 | 26 | impl From for DatalithCreateError { 27 | #[inline] 28 | fn from(error: sqlx::Error) -> Self { 29 | Self::SQLError(error) 30 | } 31 | } 32 | 33 | impl Display for DatalithCreateError { 34 | #[inline] 35 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 36 | match self { 37 | Self::IOError(error) => Display::fmt(error, f), 38 | Self::SQLError(error) => Display::fmt(error, f), 39 | Self::DatabaseTooNewError { 40 | app_db_version, 41 | current_db_version, 42 | } => f.write_fmt(format_args!( 43 | "this application is too old to use the database ({app_db_version} < \ 44 | {current_db_version})" 45 | )), 46 | Self::DatabaseTooOldError { 47 | app_db_version, 48 | current_db_version, 49 | } => f.write_fmt(format_args!( 50 | "this application is too new to upgrade the database ({app_db_version} > \ 51 | {current_db_version})" 52 | )), 53 | Self::AlreadyRun => f.write_str("there is already an existing instance"), 54 | } 55 | } 56 | } 57 | 58 | impl Error for DatalithCreateError {} 59 | 60 | /// Errors occurred during Datalith read operations. 61 | #[derive(Debug)] 62 | pub enum DatalithReadError { 63 | IOError(io::Error), 64 | SQLError(sqlx::Error), 65 | } 66 | 67 | impl From for DatalithReadError { 68 | #[inline] 69 | fn from(error: io::Error) -> Self { 70 | Self::IOError(error) 71 | } 72 | } 73 | 74 | impl From for DatalithReadError { 75 | #[inline] 76 | fn from(error: sqlx::Error) -> Self { 77 | Self::SQLError(error) 78 | } 79 | } 80 | 81 | impl Display for DatalithReadError { 82 | #[inline] 83 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 84 | match self { 85 | Self::IOError(error) => Display::fmt(error, f), 86 | Self::SQLError(error) => Display::fmt(error, f), 87 | } 88 | } 89 | } 90 | 91 | impl Error for DatalithReadError {} 92 | 93 | /// Errors occurred during Datalith write operations. 94 | #[derive(Debug)] 95 | pub enum DatalithWriteError { 96 | FileTypeInvalid { file_type: Mime, expected_file_type: Mime }, 97 | FileLengthTooLarge { expected_file_length: u64, actual_file_length: u64 }, 98 | IOError(io::Error), 99 | SQLError(sqlx::Error), 100 | } 101 | 102 | impl From for DatalithWriteError { 103 | #[inline] 104 | fn from(error: DatalithReadError) -> Self { 105 | match error { 106 | DatalithReadError::IOError(error) => Self::IOError(error), 107 | DatalithReadError::SQLError(error) => Self::SQLError(error), 108 | } 109 | } 110 | } 111 | 112 | impl From for DatalithWriteError { 113 | #[inline] 114 | fn from(error: io::Error) -> Self { 115 | Self::IOError(error) 116 | } 117 | } 118 | 119 | impl From for DatalithWriteError { 120 | #[inline] 121 | fn from(error: sqlx::Error) -> Self { 122 | Self::SQLError(error) 123 | } 124 | } 125 | 126 | impl Display for DatalithWriteError { 127 | #[inline] 128 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 129 | match self { 130 | Self::FileTypeInvalid { 131 | file_type, 132 | expected_file_type, 133 | } => f.write_fmt(format_args!( 134 | "the file type {file_type:?} is invalid (expect: {expected_file_type:?})" 135 | )), 136 | Self::FileLengthTooLarge { 137 | expected_file_length, 138 | actual_file_length, 139 | } => f.write_fmt(format_args!( 140 | "the file length {actual_file_length:?} is larger than the expected one (expect: \ 141 | {expected_file_length:?})" 142 | )), 143 | Self::IOError(error) => Display::fmt(error, f), 144 | Self::SQLError(error) => Display::fmt(error, f), 145 | } 146 | } 147 | } 148 | 149 | impl Error for DatalithWriteError {} 150 | -------------------------------------------------------------------------------- /datalith-core/README.md: -------------------------------------------------------------------------------- 1 | Datalith Core 2 | ==================== 3 | 4 | A file management system powered by SQLite for metadata storage and the file system for file storage. 5 | 6 | ## Data Structures 7 | 8 | * `File`: Represents a real, concrete file that is physically stored in the file system. 9 | * `Resource`: Represents a regular file in any type. One or more `Resource` entries can point to the same `File`. 10 | * `Image`: Represents any image in a format supported by Datalith. Each `Image` point to multiple `File`s because it includes **the original image file** as well as **thumbnails** in different resolutions and types. 11 | 12 | ## Examples 13 | 14 | #### Put a File 15 | 16 | ```rust 17 | use datalith_core::{mime, Datalith, FileTypeLevel}; 18 | use tokio::io::AsyncReadExt; 19 | 20 | let datalith = Datalith::new("datalith").await.unwrap(); 21 | 22 | let file = datalith.put_file_by_buffer(b"Hello world!", Some("plain.txt"), Some((mime::TEXT_PLAIN_UTF_8, FileTypeLevel::Manual))).await.unwrap(); 23 | 24 | let mut reader = file.create_reader().await.unwrap(); 25 | 26 | let mut s = String::new(); 27 | reader.read_to_string(&mut s).await.unwrap(); 28 | 29 | println!("{s}"); // Hello world! 30 | 31 | datalith.close().await; 32 | ``` 33 | 34 | #### Get a File 35 | 36 | ```rust 37 | use std::str::FromStr; 38 | 39 | use datalith_core::{uuid::Uuid, Datalith, FileTypeLevel}; 40 | 41 | let datalith = Datalith::new("datalith").await.unwrap(); 42 | 43 | let file = datalith.get_file_by_id(Uuid::from_str("c31343fc-eae1-4416-809a-a6d96b69b3b9").unwrap()).await.unwrap(); 44 | 45 | if let Some(file) = file { 46 | // do something 47 | } else { 48 | println!("not found"); 49 | } 50 | 51 | datalith.close().await; 52 | ``` 53 | 54 | #### Put a Temporary File 55 | 56 | ```rust 57 | use datalith_core::{mime, Datalith, FileTypeLevel}; 58 | 59 | let datalith = Datalith::new("datalith").await.unwrap(); 60 | 61 | let file_id = datalith.put_file_by_buffer_temporarily(b"Hello world!", Some("plain.txt"), Some((mime::TEXT_PLAIN_UTF_8, FileTypeLevel::Manual))).await.unwrap().id(); 62 | let file = datalith.get_file_by_id(file_id).await.unwrap().unwrap(); // A temporary file can be retrieved using the `get_file_by_id` function only once. After that, it cannot be retrieved again. 63 | 64 | // do something 65 | 66 | datalith.close().await; 67 | ``` 68 | 69 | #### Put a Resource 70 | 71 | ```rust,no_run 72 | use datalith_core::{mime, Datalith, FileTypeLevel}; 73 | use tokio::io::AsyncReadExt; 74 | 75 | let datalith = Datalith::new("datalith").await.unwrap(); 76 | 77 | let resource = datalith.put_resource_by_buffer(b"Hello world!", Some("plain.txt"), Some((mime::TEXT_PLAIN_UTF_8, FileTypeLevel::Manual))).await.unwrap(); 78 | 79 | let mut reader = resource.file().create_reader().await.unwrap(); 80 | 81 | let mut s = String::new(); 82 | reader.read_to_string(&mut s).await.unwrap(); 83 | 84 | println!("{s}"); // Hello world! 85 | 86 | datalith.close().await; 87 | ``` 88 | 89 | #### Get a Resource 90 | 91 | ```rust,no_run 92 | use std::str::FromStr; 93 | 94 | use datalith_core::{uuid::Uuid, Datalith, FileTypeLevel}; 95 | 96 | let datalith = Datalith::new("datalith").await.unwrap(); 97 | 98 | let resource = datalith.get_resource_by_id(Uuid::from_str("c31343fc-eae1-4416-809a-a6d96b69b3b9").unwrap()).await.unwrap(); 99 | 100 | if let Some(resource) = resource { 101 | // do something 102 | } else { 103 | println!("not found"); 104 | } 105 | 106 | datalith.close().await; 107 | ``` 108 | 109 | #### Put a Temporary Resource 110 | 111 | ```rust,no_run 112 | use datalith_core::{mime, Datalith, FileTypeLevel}; 113 | 114 | let datalith = Datalith::new("datalith").await.unwrap(); 115 | 116 | let resource_id = datalith.put_resource_by_buffer_temporarily(b"Hello world!", Some("plain.txt"), Some((mime::TEXT_PLAIN_UTF_8, FileTypeLevel::Manual))).await.unwrap().id(); 117 | let resource = datalith.get_resource_by_id(resource_id).await.unwrap().unwrap(); // A temporary resource can be retrieved using the `get_resource_by_id` function only once. After that, it cannot be retrieved again. 118 | 119 | // do something 120 | 121 | datalith.close().await; 122 | ``` 123 | 124 | #### Put an Image 125 | 126 | ```rust 127 | use datalith_core::{mime, CenterCrop, Datalith}; 128 | 129 | let datalith = Datalith::new("datalith").await.unwrap(); 130 | 131 | let image = datalith.put_image_by_path("/path/to/image", Some("my-image"), Some(1280), Some(720), CenterCrop::new(16.0, 9.0), true).await.unwrap(); 132 | 133 | println!("image size: {}x{}", image.image_width(), image.image_height()); 134 | 135 | let original_file = image.original_file(); 136 | let thumbnails = image.thumbnails(); // WebP files (1x, 2x, 3x) 137 | let fallback_thumbnails = image.fallback_thumbnails(); // JPEG or PNG files (1x, 2x, 3x) 138 | 139 | // do something 140 | 141 | datalith.close().await; 142 | ``` 143 | 144 | ## Crates.io 145 | 146 | https://crates.io/crates/datalith 147 | 148 | ## Documentation 149 | 150 | https://docs.rs/datalith 151 | 152 | ## License 153 | 154 | [MIT](LICENSE) -------------------------------------------------------------------------------- /datalith-core/src/datalith_file.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io, 3 | io::SeekFrom, 4 | ops::Deref, 5 | pin::Pin, 6 | task::{Context, Poll}, 7 | }; 8 | 9 | use chrono::{DateTime, Local, TimeZone}; 10 | use educe::Educe; 11 | use mime::Mime; 12 | use tokio::{ 13 | fs::File, 14 | io::{AsyncRead, AsyncSeek, ReadBuf}, 15 | }; 16 | use uuid::Uuid; 17 | 18 | use crate::{guard::OpenGuard, Datalith}; 19 | 20 | /// A struct that represents a file. 21 | #[derive(Debug, Educe)] 22 | #[educe(PartialEq, Eq, Hash)] 23 | pub struct DatalithFile { 24 | #[educe(Eq(ignore), Hash(ignore))] 25 | _datalith: Datalith, 26 | #[educe(Eq(ignore), Hash(ignore))] 27 | _guard: OpenGuard, 28 | id: Uuid, 29 | #[educe(Eq(ignore), Hash(ignore))] 30 | created_at: DateTime, 31 | #[educe(Eq(ignore), Hash(ignore))] 32 | file_size: u64, 33 | #[educe(Eq(ignore), Hash(ignore))] 34 | file_type: Mime, 35 | #[educe(Eq(ignore), Hash(ignore))] 36 | file_name: String, 37 | #[educe(Eq(ignore), Hash(ignore))] 38 | is_temporary: bool, 39 | #[educe(Eq(ignore), Hash(ignore))] 40 | is_new: bool, 41 | } 42 | 43 | impl DatalithFile { 44 | /// Create a file instance. 45 | #[allow(clippy::too_many_arguments)] 46 | #[inline] 47 | pub(crate) fn new( 48 | datalith: Datalith, 49 | guard: OpenGuard, 50 | id: impl Into, 51 | created_at: DateTime, 52 | file_size: impl Into, 53 | file_type: Mime, 54 | file_name: impl Into, 55 | is_temporary: bool, 56 | is_new: bool, 57 | ) -> Self 58 | where { 59 | let id = id.into(); 60 | 61 | Self { 62 | _datalith: datalith, 63 | _guard: guard, 64 | id, 65 | created_at: created_at.with_timezone(&Local), 66 | file_size: file_size.into(), 67 | file_type, 68 | file_name: file_name.into(), 69 | is_temporary, 70 | is_new, 71 | } 72 | } 73 | } 74 | 75 | impl DatalithFile { 76 | /// Retrieve the file ID (UUID). 77 | #[inline] 78 | pub const fn id(&self) -> Uuid { 79 | self.id 80 | } 81 | 82 | /// Retrieve the creation time. 83 | #[inline] 84 | pub const fn created_at(&self) -> DateTime { 85 | self.created_at 86 | } 87 | 88 | /// Retrieve the file type (MIME). 89 | #[inline] 90 | pub const fn file_type(&self) -> &Mime { 91 | &self.file_type 92 | } 93 | 94 | /// Retrieve the file size (in bytes). 95 | #[inline] 96 | pub const fn file_size(&self) -> u64 { 97 | self.file_size 98 | } 99 | 100 | /// Retrieve the file name. 101 | #[inline] 102 | pub const fn file_name(&self) -> &String { 103 | &self.file_name 104 | } 105 | 106 | /// Check if this file is temporary. 107 | #[inline] 108 | pub const fn is_temporary(&self) -> bool { 109 | self.is_temporary 110 | } 111 | 112 | /// Check if this file is a new file. 113 | #[inline] 114 | pub const fn is_new(&self) -> bool { 115 | self.is_new 116 | } 117 | } 118 | 119 | impl DatalithFile { 120 | /// Create an reader. 121 | #[inline] 122 | pub async fn create_reader(&self) -> io::Result { 123 | let file_path = self._datalith.get_file_path(self.id).await?; 124 | 125 | let file = File::open(file_path).await?; 126 | 127 | Ok(DatalithFileReader { 128 | _file: self, 129 | file, 130 | }) 131 | } 132 | 133 | /// Create a readable . 134 | #[inline] 135 | pub async fn into_readable(self) -> io::Result { 136 | let file_path = self._datalith.get_file_path(self.id).await?; 137 | 138 | let file = File::open(file_path).await?; 139 | 140 | Ok(ReadableDatalithFile { 141 | _file: self, 142 | file, 143 | }) 144 | } 145 | } 146 | 147 | /// A struct that provides an asynchronous read interface for files. 148 | #[derive(Debug)] 149 | pub struct DatalithFileReader<'a> { 150 | _file: &'a DatalithFile, 151 | file: File, 152 | } 153 | 154 | impl AsyncRead for DatalithFileReader<'_> { 155 | #[inline] 156 | fn poll_read( 157 | mut self: Pin<&mut Self>, 158 | cx: &mut Context<'_>, 159 | buf: &mut ReadBuf<'_>, 160 | ) -> Poll> { 161 | Pin::new(&mut self.file).poll_read(cx, buf) 162 | } 163 | } 164 | 165 | /// A struct that represents a file and provides an asynchronous read interface for files. 166 | #[derive(Debug)] 167 | pub struct ReadableDatalithFile { 168 | _file: DatalithFile, 169 | file: File, 170 | } 171 | 172 | impl Deref for ReadableDatalithFile { 173 | type Target = DatalithFile; 174 | 175 | #[inline] 176 | fn deref(&self) -> &Self::Target { 177 | &self._file 178 | } 179 | } 180 | 181 | impl AsyncRead for ReadableDatalithFile { 182 | #[inline] 183 | fn poll_read( 184 | mut self: Pin<&mut Self>, 185 | cx: &mut Context<'_>, 186 | buf: &mut ReadBuf<'_>, 187 | ) -> Poll> { 188 | Pin::new(&mut self.file).poll_read(cx, buf) 189 | } 190 | } 191 | 192 | impl AsyncSeek for ReadableDatalithFile { 193 | #[inline] 194 | fn start_seek(mut self: Pin<&mut Self>, position: SeekFrom) -> io::Result<()> { 195 | Pin::new(&mut self.file).start_seek(position) 196 | } 197 | 198 | #[inline] 199 | fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 200 | Pin::new(&mut self.file).poll_complete(cx) 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /datalith-core/src/guard.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "image-convert")] 2 | use std::collections::HashSet; 3 | use std::{fs, path::PathBuf, time::Duration}; 4 | 5 | use tokio::time; 6 | use uuid::Uuid; 7 | 8 | use crate::Datalith; 9 | 10 | #[derive(Debug)] 11 | pub(crate) struct PutGuard { 12 | _datalith: Datalith, 13 | hash: [u8; 32], 14 | } 15 | 16 | impl Drop for PutGuard { 17 | #[inline] 18 | fn drop(&mut self) { 19 | let mut uploading_files = self._datalith.0._uploading_files.lock().unwrap(); 20 | 21 | uploading_files.remove(&self.hash); 22 | } 23 | } 24 | 25 | impl PutGuard { 26 | pub async fn new(datalith: Datalith, hash: [u8; 32]) -> Self { 27 | loop { 28 | { 29 | let mut uploading_files = datalith.0._uploading_files.lock().unwrap(); 30 | 31 | if !uploading_files.contains(&hash) { 32 | uploading_files.insert(hash); 33 | break; 34 | } 35 | } 36 | 37 | time::sleep(Duration::from_millis(10)).await; 38 | } 39 | 40 | Self { 41 | _datalith: datalith, 42 | hash, 43 | } 44 | } 45 | } 46 | 47 | #[derive(Debug)] 48 | pub(crate) struct OpenGuard { 49 | _datalith: Datalith, 50 | id: Uuid, 51 | } 52 | 53 | impl Drop for OpenGuard { 54 | #[inline] 55 | fn drop(&mut self) { 56 | // recover the count 57 | 58 | let mut opening_files = self._datalith.0._opening_files.lock().unwrap(); 59 | 60 | let need_remove = { 61 | let id = opening_files.get_mut(&self.id).unwrap(); 62 | 63 | match *id { 64 | 0 | 1 => true, 65 | _ => { 66 | *id -= 1; 67 | 68 | false 69 | }, 70 | } 71 | }; 72 | 73 | if need_remove { 74 | opening_files.remove(&self.id); 75 | } 76 | } 77 | } 78 | 79 | impl OpenGuard { 80 | pub async fn new(datalith: Datalith, id: impl Into) -> Self { 81 | let id = id.into(); 82 | 83 | // increase the count 84 | { 85 | let mut opening_files = datalith.0._opening_files.lock().unwrap(); 86 | 87 | if let Some(opening_count) = opening_files.get_mut(&id) { 88 | *opening_count += 1; 89 | } else { 90 | opening_files.insert(id, 1); 91 | } 92 | } 93 | 94 | Self { 95 | _datalith: datalith, 96 | id, 97 | } 98 | } 99 | } 100 | 101 | #[derive(Debug)] 102 | pub(crate) struct DeleteGuard { 103 | _datalith: Datalith, 104 | pub(crate) id: Uuid, 105 | } 106 | 107 | impl Drop for DeleteGuard { 108 | #[inline] 109 | fn drop(&mut self) { 110 | let mut _deleting_files = self._datalith.0._deleting_files.lock().unwrap(); 111 | 112 | _deleting_files.remove(&self.id); 113 | } 114 | } 115 | 116 | impl DeleteGuard { 117 | pub async fn new(datalith: Datalith, id: impl Into) -> Self { 118 | let id = id.into(); 119 | 120 | loop { 121 | { 122 | let mut deleting_files = datalith.0._deleting_files.lock().unwrap(); 123 | 124 | if !deleting_files.contains(&id) { 125 | deleting_files.insert(id); 126 | break; 127 | } 128 | } 129 | 130 | time::sleep(Duration::from_millis(10)).await; 131 | } 132 | 133 | Self { 134 | _datalith: datalith, 135 | id, 136 | } 137 | } 138 | 139 | #[cfg(feature = "image-convert")] 140 | #[inline] 141 | pub async fn acquire_multiple(guards: &mut Vec, datalith: Datalith, ids: &HashSet) { 142 | while !Self::acquire_multiple_immediately(guards, datalith.clone(), ids).await { 143 | time::sleep(Duration::from_millis(10)).await; 144 | } 145 | } 146 | 147 | #[cfg(feature = "image-convert")] 148 | pub async fn acquire_multiple_immediately( 149 | guards: &mut Vec, 150 | datalith: Datalith, 151 | ids: &HashSet, 152 | ) -> bool { 153 | let mut deleting_files = datalith.0._deleting_files.lock().unwrap(); 154 | let mut buffer = Vec::with_capacity(ids.len()); 155 | 156 | for id in ids { 157 | if deleting_files.contains(id) { 158 | // there is a lock cannot be acquired 159 | 160 | // rollback 161 | for id in buffer { 162 | deleting_files.remove(id); 163 | } 164 | 165 | return false; 166 | } else { 167 | deleting_files.insert(*id); 168 | buffer.push(id); 169 | } 170 | } 171 | 172 | for id in ids { 173 | guards.push(Self { 174 | _datalith: datalith.clone(), id: *id 175 | }) 176 | } 177 | 178 | true 179 | } 180 | } 181 | 182 | #[derive(Debug)] 183 | pub(crate) struct TemporaryFileGuard { 184 | moved: bool, 185 | file_path: PathBuf, 186 | } 187 | 188 | impl Drop for TemporaryFileGuard { 189 | #[inline] 190 | fn drop(&mut self) { 191 | if !self.moved { 192 | let _ = fs::remove_file(self.file_path.as_path()); 193 | } 194 | } 195 | } 196 | 197 | impl TemporaryFileGuard { 198 | #[inline] 199 | pub fn new(file_path: impl Into) -> Self { 200 | let file_path = file_path.into(); 201 | 202 | Self { 203 | moved: false, 204 | file_path, 205 | } 206 | } 207 | 208 | #[inline] 209 | pub fn set_moved(&mut self) { 210 | self.moved = true; 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /datalith/src/rocket_mounts/rocket_utils/datalith_response_image.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, fmt::Write, path::Path}; 2 | 3 | use datalith_core::{get_image_extension, mime, Datalith, DatalithReadError, Uuid, MIME_WEBP}; 4 | use rocket::{ 5 | form, 6 | form::{FromFormField, ValueField}, 7 | }; 8 | use rocket_etag_if_none_match::{entity_tag::EntityTag, EtagIfNoneMatch}; 9 | 10 | use super::{DatalithResponse, ResponseData}; 11 | 12 | #[derive(Debug)] 13 | pub enum ResolutionType { 14 | Original, 15 | Multiplier(u8), 16 | } 17 | 18 | #[rocket::async_trait] 19 | impl<'r> FromFormField<'r> for ResolutionType { 20 | fn from_value(field: ValueField<'r>) -> form::Result<'r, Self> { 21 | if field.value.eq_ignore_ascii_case("original") { 22 | return Ok(Self::Original); 23 | } 24 | 25 | if let Some(v) = field.value.strip_suffix("x") { 26 | if let Ok(v) = v.parse::() { 27 | return Ok(Self::Multiplier(v)); 28 | } 29 | } 30 | 31 | let mut errors = form::Errors::new(); 32 | 33 | errors.push(form::Error::validation("not 1x, 2x, 3x, ..., etc")); 34 | 35 | Err(errors) 36 | } 37 | } 38 | 39 | impl DatalithResponse { 40 | pub async fn from_image_id<'a>( 41 | datalith: &'a Datalith, 42 | etag_if_none_match: &EtagIfNoneMatch<'a>, 43 | id: Uuid, 44 | resolution_type: Option, 45 | fallback: bool, 46 | download: bool, 47 | ) -> Result, DatalithReadError> { 48 | let etag = EntityTag::with_string(true, format!("{:x}", id.as_u128())).unwrap(); 49 | 50 | let is_etag_match = etag_if_none_match.weak_eq(&etag); 51 | 52 | if is_etag_match { 53 | Ok(Some(DatalithResponse { 54 | data: None 55 | })) 56 | } else { 57 | let resolution_type = resolution_type.unwrap_or(ResolutionType::Multiplier(1)); 58 | 59 | let image = datalith.get_image_by_id(id).await?; 60 | 61 | match image { 62 | Some(image) => { 63 | let uuid = image.id(); 64 | let date = image.created_at(); 65 | 66 | let mut file_name = image.image_stem().clone(); 67 | let image_width = image.image_width(); 68 | let image_height = image.image_height(); 69 | let has_alpha_channel = image.has_alpha_channel(); 70 | 71 | let mut extra_headers = HashMap::with_capacity(2); 72 | 73 | let (file, multiplier) = match resolution_type { 74 | ResolutionType::Original => { 75 | if image.original_file().is_some() { 76 | (image.into_original_file().unwrap(), 0) 77 | } else { 78 | let v = if fallback { 79 | image.into_fallback_thumbnails() 80 | } else { 81 | image.into_thumbnails() 82 | }; 83 | let multiplier = v.len(); 84 | 85 | (v.into_iter().next_back().unwrap(), multiplier) 86 | } 87 | }, 88 | ResolutionType::Multiplier(multiplier) => { 89 | let multiplier = 90 | (multiplier as usize).clamp(1, image.thumbnails().len()); 91 | let v = if fallback { 92 | image.into_fallback_thumbnails() 93 | } else { 94 | image.into_thumbnails() 95 | }; 96 | 97 | (v.into_iter().nth(multiplier - 1).unwrap(), multiplier) 98 | }, 99 | }; 100 | 101 | let file_type = if multiplier == 0 { 102 | if let Some(ext) = get_image_extension(file.file_type()).or_else(|| { 103 | Path::new(file.file_name()).extension().and_then(|e| e.to_str()) 104 | }) { 105 | file_name.push('.'); 106 | file_name.push_str(ext); 107 | } 108 | 109 | file.file_type().clone() 110 | } else { 111 | let (ext, file_type) = if fallback { 112 | if has_alpha_channel { 113 | ("png", mime::IMAGE_PNG) 114 | } else { 115 | ("jpg", mime::IMAGE_JPEG) 116 | } 117 | } else { 118 | ("webp", MIME_WEBP.clone()) 119 | }; 120 | 121 | file_name.write_fmt(format_args!("@{multiplier}x.{ext}")).unwrap(); 122 | 123 | let multiplier_u16 = multiplier as u16; 124 | 125 | extra_headers 126 | .insert("x-image-width", (image_width * multiplier_u16).to_string()); 127 | extra_headers 128 | .insert("x-image-height", (image_height * multiplier_u16).to_string()); 129 | 130 | file_type 131 | }; 132 | 133 | Ok(Some(Self { 134 | data: Some(ResponseData { 135 | etag, 136 | file: file.into_readable().await?, 137 | download, 138 | uuid, 139 | date, 140 | file_name, 141 | file_type, 142 | extra_headers, 143 | is_temporary: false, 144 | }), 145 | })) 146 | }, 147 | None => Ok(None), 148 | } 149 | } 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /datalith-core/tests/auto_file_extension.rs: -------------------------------------------------------------------------------- 1 | mod global; 2 | 3 | use global::*; 4 | use tokio::fs::File; 5 | 6 | #[tokio::test] 7 | async fn auto_file_extension() { 8 | let datalith = datalith_init().await; 9 | 10 | let image = IMAGE_DATA.as_ref(); 11 | 12 | { 13 | let id = { 14 | let file = datalith 15 | .put_file_by_buffer_temporarily(image, Some("MagicLen"), None) 16 | .await 17 | .unwrap(); 18 | 19 | #[cfg(feature = "magic")] 20 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 21 | #[cfg(feature = "magic")] 22 | assert_eq!("MagicLen.png", file.file_name()); 23 | #[cfg(not(feature = "magic"))] 24 | assert_eq!("MagicLen.bin", file.file_name()); 25 | 26 | file.id() 27 | }; 28 | 29 | // delete 30 | assert!(datalith.delete_file_by_id(id).await.unwrap()); 31 | } 32 | 33 | { 34 | let id = { 35 | let file = datalith.put_file_by_buffer(image, Some("MagicLen"), None).await.unwrap(); 36 | 37 | #[cfg(feature = "magic")] 38 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 39 | #[cfg(feature = "magic")] 40 | assert_eq!("MagicLen.png", file.file_name()); 41 | #[cfg(not(feature = "magic"))] 42 | assert_eq!("MagicLen.bin", file.file_name()); 43 | 44 | file.id() 45 | }; 46 | 47 | // delete 48 | assert!(datalith.delete_file_by_id(id).await.unwrap()); 49 | } 50 | 51 | { 52 | let id = { 53 | let file = datalith 54 | .put_file_by_path_temporarily(IMAGE_PATH, Some("MagicLen"), None) 55 | .await 56 | .unwrap(); 57 | 58 | #[cfg(feature = "magic")] 59 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 60 | assert_eq!("MagicLen.png", file.file_name()); 61 | 62 | file.id() 63 | }; 64 | 65 | // delete 66 | assert!(datalith.delete_file_by_id(id).await.unwrap()); 67 | } 68 | 69 | { 70 | let id = { 71 | let file = datalith.put_file_by_path(IMAGE_PATH, Some("MagicLen"), None).await.unwrap(); 72 | 73 | #[cfg(feature = "magic")] 74 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 75 | assert_eq!("MagicLen.png", file.file_name()); 76 | 77 | file.id() 78 | }; 79 | 80 | // delete 81 | assert!(datalith.delete_file_by_id(id).await.unwrap()); 82 | } 83 | 84 | { 85 | let id = { 86 | let mut file = File::open(IMAGE_PATH).await.unwrap(); 87 | 88 | let file = datalith 89 | .put_file_by_reader_temporarily(&mut file, Some("MagicLen"), None, Some(IMAGE_SIZE)) 90 | .await 91 | .unwrap(); 92 | 93 | #[cfg(feature = "magic")] 94 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 95 | #[cfg(feature = "magic")] 96 | assert_eq!("MagicLen.png", file.file_name()); 97 | #[cfg(not(feature = "magic"))] 98 | assert_eq!("MagicLen.bin", file.file_name()); 99 | 100 | file.id() 101 | }; 102 | 103 | // delete 104 | assert!(datalith.delete_file_by_id(id).await.unwrap()); 105 | } 106 | 107 | { 108 | let id = { 109 | let mut file = File::open(IMAGE_PATH).await.unwrap(); 110 | 111 | let file = datalith 112 | .put_file_by_reader(&mut file, Some("MagicLen"), None, Some(IMAGE_SIZE)) 113 | .await 114 | .unwrap(); 115 | 116 | #[cfg(feature = "magic")] 117 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 118 | #[cfg(feature = "magic")] 119 | assert_eq!("MagicLen.png", file.file_name()); 120 | #[cfg(not(feature = "magic"))] 121 | assert_eq!("MagicLen.bin", file.file_name()); 122 | 123 | file.id() 124 | }; 125 | 126 | // delete 127 | assert!(datalith.delete_file_by_id(id).await.unwrap()); 128 | } 129 | 130 | datalith_close(datalith).await; 131 | } 132 | 133 | #[cfg(feature = "image-convert")] 134 | #[tokio::test] 135 | async fn image_auto_file_extension() { 136 | let datalith = datalith_init().await; 137 | 138 | let image = IMAGE_DATA.as_ref(); 139 | 140 | { 141 | let id = { 142 | let image = datalith 143 | .put_image_by_buffer(image, Some("MagicLen"), Some(32), None, None, true) 144 | .await 145 | .unwrap(); 146 | 147 | let original_file = image.original_file().unwrap(); 148 | assert_eq!(&mime::IMAGE_PNG, original_file.file_type()); 149 | assert_eq!("MagicLen.png", original_file.file_name()); 150 | 151 | image.id() 152 | }; 153 | 154 | // delete 155 | assert!(datalith.delete_image_by_id(id).await.unwrap()); 156 | } 157 | 158 | { 159 | let id = { 160 | let image = datalith 161 | .put_image_by_path(IMAGE_PATH, Some("MagicLen"), Some(32), None, None, true) 162 | .await 163 | .unwrap(); 164 | 165 | let original_file = image.original_file().unwrap(); 166 | assert_eq!(&mime::IMAGE_PNG, original_file.file_type()); 167 | assert_eq!("MagicLen.png", original_file.file_name()); 168 | 169 | image.id() 170 | }; 171 | 172 | // delete 173 | assert!(datalith.delete_image_by_id(id).await.unwrap()); 174 | } 175 | 176 | { 177 | let id = { 178 | let mut file = File::open(IMAGE_PATH).await.unwrap(); 179 | 180 | let image = datalith 181 | .put_image_by_reader( 182 | &mut file, 183 | Some("MagicLen"), 184 | Some(32), 185 | None, 186 | None, 187 | true, 188 | Some(IMAGE_SIZE), 189 | ) 190 | .await 191 | .unwrap(); 192 | 193 | let original_file = image.original_file().unwrap(); 194 | assert_eq!(&mime::IMAGE_PNG, original_file.file_type()); 195 | assert_eq!("MagicLen.png", original_file.file_name()); 196 | 197 | image.id() 198 | }; 199 | 200 | // delete 201 | assert!(datalith.delete_image_by_id(id).await.unwrap()); 202 | } 203 | 204 | datalith_close(datalith).await; 205 | } 206 | -------------------------------------------------------------------------------- /datalith-core/src/lib.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | # Datalith Core 3 | 4 | A file management system powered by SQLite for metadata storage and the file system for file storage. 5 | 6 | ## Data Structures 7 | 8 | * `File`: Represents a real, concrete file that is physically stored in the file system. 9 | * `Resource`: Represents a regular file in any type. One or more `Resource` entries can point to the same `File`. 10 | * `Image`: Represents any image in a format supported by Datalith. Each `Image` point to multiple `File`s because it includes **the original image file** as well as **thumbnails** in different resolutions and types. 11 | 12 | ## Examples 13 | 14 | #### Put a File 15 | 16 | ```rust,no_run 17 | use datalith_core::{mime, Datalith, FileTypeLevel}; 18 | use tokio::io::AsyncReadExt; 19 | 20 | # #[tokio::main(flavor = "current_thread")] 21 | # async fn main() { 22 | let datalith = Datalith::new("datalith").await.unwrap(); 23 | 24 | let file = datalith.put_file_by_buffer(b"Hello world!", Some("plain.txt"), Some((mime::TEXT_PLAIN_UTF_8, FileTypeLevel::Manual))).await.unwrap(); 25 | 26 | let mut reader = file.create_reader().await.unwrap(); 27 | 28 | let mut s = String::new(); 29 | reader.read_to_string(&mut s).await.unwrap(); 30 | 31 | println!("{s}"); // Hello world! 32 | 33 | datalith.close().await; 34 | # } 35 | ``` 36 | 37 | #### Get a File 38 | 39 | ```rust,no_run 40 | use std::str::FromStr; 41 | 42 | use datalith_core::{uuid::Uuid, Datalith, FileTypeLevel}; 43 | 44 | # #[tokio::main(flavor = "current_thread")] 45 | # async fn main() { 46 | let datalith = Datalith::new("datalith").await.unwrap(); 47 | 48 | let file = datalith.get_file_by_id(Uuid::from_str("c31343fc-eae1-4416-809a-a6d96b69b3b9").unwrap()).await.unwrap(); 49 | 50 | if let Some(file) = file { 51 | // do something 52 | } else { 53 | println!("not found"); 54 | } 55 | 56 | datalith.close().await; 57 | # } 58 | ``` 59 | 60 | #### Put a Temporary File 61 | 62 | ```rust,no_run 63 | use datalith_core::{mime, Datalith, FileTypeLevel}; 64 | 65 | # #[tokio::main(flavor = "current_thread")] 66 | # async fn main() { 67 | let datalith = Datalith::new("datalith").await.unwrap(); 68 | 69 | let file_id = datalith.put_file_by_buffer_temporarily(b"Hello world!", Some("plain.txt"), Some((mime::TEXT_PLAIN_UTF_8, FileTypeLevel::Manual))).await.unwrap().id(); 70 | let file = datalith.get_file_by_id(file_id).await.unwrap().unwrap(); // A temporary file can be retrieved using the `get_file_by_id` function only once. After that, it cannot be retrieved again. 71 | 72 | // do something 73 | 74 | datalith.close().await; 75 | # } 76 | ``` 77 | 78 | #### Put a Resource 79 | 80 | ```rust,no_run 81 | use datalith_core::{mime, Datalith, FileTypeLevel}; 82 | use tokio::io::AsyncReadExt; 83 | 84 | # #[tokio::main(flavor = "current_thread")] 85 | # async fn main() { 86 | let datalith = Datalith::new("datalith").await.unwrap(); 87 | 88 | let resource = datalith.put_resource_by_buffer(b"Hello world!", Some("plain.txt"), Some((mime::TEXT_PLAIN_UTF_8, FileTypeLevel::Manual))).await.unwrap(); 89 | 90 | let mut reader = resource.file().create_reader().await.unwrap(); 91 | 92 | let mut s = String::new(); 93 | reader.read_to_string(&mut s).await.unwrap(); 94 | 95 | println!("{s}"); // Hello world! 96 | 97 | datalith.close().await; 98 | # } 99 | ``` 100 | 101 | #### Get a Resource 102 | 103 | ```rust,no_run 104 | use std::str::FromStr; 105 | 106 | use datalith_core::{uuid::Uuid, Datalith, FileTypeLevel}; 107 | 108 | # #[tokio::main(flavor = "current_thread")] 109 | # async fn main() { 110 | let datalith = Datalith::new("datalith").await.unwrap(); 111 | 112 | let resource = datalith.get_resource_by_id(Uuid::from_str("c31343fc-eae1-4416-809a-a6d96b69b3b9").unwrap()).await.unwrap(); 113 | 114 | if let Some(resource) = resource { 115 | // do something 116 | } else { 117 | println!("not found"); 118 | } 119 | 120 | datalith.close().await; 121 | # } 122 | ``` 123 | 124 | #### Put a Temporary Resource 125 | 126 | ```rust,no_run 127 | use datalith_core::{mime, Datalith, FileTypeLevel}; 128 | 129 | # #[tokio::main(flavor = "current_thread")] 130 | # async fn main() { 131 | let datalith = Datalith::new("datalith").await.unwrap(); 132 | 133 | let resource_id = datalith.put_resource_by_buffer_temporarily(b"Hello world!", Some("plain.txt"), Some((mime::TEXT_PLAIN_UTF_8, FileTypeLevel::Manual))).await.unwrap().id(); 134 | let resource = datalith.get_resource_by_id(resource_id).await.unwrap().unwrap(); // A temporary resource can be retrieved using the `get_resource_by_id` function only once. After that, it cannot be retrieved again. 135 | 136 | // do something 137 | 138 | datalith.close().await; 139 | # } 140 | ``` 141 | 142 | #### Put an Image 143 | 144 | ```rust,no_run 145 | # #[cfg(feature = "image-convert")] 146 | use datalith_core::{mime, CenterCrop, Datalith}; 147 | 148 | # #[cfg(feature = "image-convert")] 149 | # #[tokio::main(flavor = "current_thread")] 150 | # async fn main() { 151 | let datalith = Datalith::new("datalith").await.unwrap(); 152 | 153 | let image = datalith.put_image_by_path("/path/to/image", Some("my-image"), Some(1280), Some(720), CenterCrop::new(16.0, 9.0), true).await.unwrap(); 154 | 155 | println!("image size: {}x{}", image.image_width(), image.image_height()); 156 | 157 | let original_file = image.original_file(); 158 | let thumbnails = image.thumbnails(); // WebP files (1x, 2x, 3x) 159 | let fallback_thumbnails = image.fallback_thumbnails(); // JPEG or PNG files (1x, 2x, 3x) 160 | 161 | // do something 162 | 163 | datalith.close().await; 164 | # } 165 | # 166 | # #[cfg(not(feature = "image-convert"))] 167 | # fn main () {} 168 | ``` 169 | */ 170 | 171 | pub extern crate chrono; 172 | pub extern crate mime; 173 | pub extern crate uuid; 174 | 175 | mod datalith; 176 | mod datalith_errors; 177 | mod datalith_file; 178 | mod functions; 179 | mod guard; 180 | #[cfg(feature = "image-convert")] 181 | mod image; 182 | #[cfg(feature = "magic")] 183 | mod magic_cookie_pool; 184 | #[cfg(feature = "manager")] 185 | mod manager; 186 | mod resources; 187 | 188 | pub use datalith::*; 189 | pub use datalith_errors::*; 190 | pub use datalith_file::*; 191 | #[cfg(feature = "image-convert")] 192 | pub use functions::get_image_extension; 193 | #[cfg(feature = "image-convert")] 194 | pub use image::*; 195 | #[cfg(feature = "manager")] 196 | pub use manager::*; 197 | use mime::{Mime, APPLICATION_OCTET_STREAM}; 198 | pub use rdb_pagination::{OrderMethod, OrderMethodValue, Pagination, PaginationOptions}; 199 | pub use resources::*; 200 | 201 | /// The default mime type. 202 | pub const DEFAULT_MIME_TYPE: Mime = APPLICATION_OCTET_STREAM; 203 | 204 | /// A string of an encrypted file ID which can be used as a URL component. 205 | pub type IDToken = String; 206 | -------------------------------------------------------------------------------- /datalith-core/src/functions.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "magic")] 2 | use std::str::FromStr; 3 | use std::{ 4 | io, 5 | io::ErrorKind, 6 | path::{Path, PathBuf}, 7 | sync::Arc, 8 | time::{SystemTime, UNIX_EPOCH}, 9 | }; 10 | 11 | use chrono::{DateTime, TimeZone}; 12 | use mime::Mime; 13 | #[cfg(feature = "magic")] 14 | use once_cell::sync::Lazy; 15 | use rand::TryRngCore; 16 | use sha2::{Digest, Sha256}; 17 | #[cfg(feature = "magic")] 18 | use tokio::task; 19 | use tokio::{fs::File, io::AsyncReadExt}; 20 | use trim_in_place::TrimInPlace; 21 | 22 | #[cfg(feature = "magic")] 23 | use crate::magic_cookie_pool::MagicCookiePool; 24 | 25 | /// The buffer size used when reading a file. 26 | pub(crate) const BUFFER_SIZE: usize = 64 * 1024; 27 | 28 | #[cfg(feature = "magic")] 29 | static MAGIC_COOKIE: Lazy> = 30 | Lazy::new(|| MagicCookiePool::new(num_cpus::get() * 2)); 31 | 32 | #[cfg(feature = "magic")] 33 | pub(crate) async fn detect_file_type_by_buffer(file_data: impl AsRef<[u8]>) -> Option { 34 | if let Some(magic_cookie) = MAGIC_COOKIE.as_ref() { 35 | let cookie = magic_cookie.acquire_cookie().await; 36 | 37 | match cookie.buffer(file_data.as_ref()) { 38 | Ok(result) => Mime::from_str(&result).ok(), 39 | Err(_) => None, 40 | } 41 | } else { 42 | None 43 | } 44 | } 45 | 46 | #[cfg(not(feature = "magic"))] 47 | #[inline] 48 | pub(crate) async fn detect_file_type_by_buffer(_file_data: impl AsRef<[u8]>) -> Option { 49 | None 50 | } 51 | 52 | pub(crate) async fn detect_file_type_by_path( 53 | file_path: impl Into, 54 | detect_using_path: bool, 55 | ) -> Option { 56 | let file_path = Arc::new(file_path.into()); 57 | 58 | #[cfg(feature = "magic")] 59 | if let Some(magic_cookie) = MAGIC_COOKIE.as_ref() { 60 | let file_path = file_path.clone(); 61 | 62 | let result = task::spawn_blocking(move || { 63 | let cookie = magic_cookie.acquire_cookie_sync(); 64 | 65 | cookie.file(file_path.as_path()) 66 | }) 67 | .await 68 | .unwrap(); 69 | 70 | if let Ok(result) = result { 71 | return Mime::from_str(&result).ok(); 72 | } 73 | } 74 | 75 | if detect_using_path { 76 | file_path 77 | .extension() 78 | .and_then(|extension| extension.to_str()) 79 | .map(|extension| mime_guess::from_ext(extension).first_or_octet_stream()) 80 | } else { 81 | None 82 | } 83 | } 84 | 85 | #[inline] 86 | pub(crate) fn get_current_timestamp() -> i64 { 87 | SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as i64 88 | } 89 | 90 | pub(crate) fn get_file_name( 91 | file_name: Option>, 92 | date_time: DateTime, 93 | mime_type: &Mime, 94 | ) -> String { 95 | let mut file_name = if let Some(file_name) = file_name { 96 | let mut file_name = file_name.into(); 97 | 98 | file_name.trim_in_place(); 99 | 100 | file_name 101 | } else { 102 | String::new() 103 | }; 104 | 105 | let ext = get_mime_extension(mime_type); 106 | 107 | if file_name.is_empty() { 108 | if let Some(ext) = ext { 109 | format!("{}.{}", date_time.timestamp_millis(), ext) 110 | } else { 111 | date_time.timestamp_millis().to_string() 112 | } 113 | } else { 114 | if Path::new(file_name.as_str()).extension().is_none() { 115 | if let Some(ext) = ext { 116 | file_name.push('.'); 117 | file_name.push_str(ext); 118 | } 119 | } 120 | 121 | file_name 122 | } 123 | } 124 | 125 | #[inline] 126 | fn get_mime_extension(mime_type: &Mime) -> Option<&'static str> { 127 | match mime_type.subtype() { 128 | mime::JPEG => Some("jpg"), 129 | mime::GIF => Some("gif"), 130 | mime::PNG => Some("png"), 131 | mime::BMP => Some("bmp"), 132 | mime::SVG => Some("svg"), 133 | mime::OCTET_STREAM => Some("bin"), 134 | _ => match mime_type.essence_str() { 135 | "image/webp" => Some("webp"), 136 | "image/heic" => Some("heic"), 137 | "application/vnd.rar" | "application/x-rar" => Some("rar"), 138 | "application/x-iso9660-image" => Some("iso"), 139 | "application/x-ms-installer" | "application/x-msi" => Some("msi"), 140 | _ => mime_guess::get_mime_extensions(mime_type).map(|e| e[0]), 141 | }, 142 | } 143 | } 144 | 145 | #[cfg(feature = "image-convert")] 146 | /// Get an image extension for a given Mime. 147 | /// 148 | /// This function allows you to generate a file name based on `image_stem` and `file_type`. 149 | #[inline] 150 | pub fn get_image_extension(mime_type: &Mime) -> Option<&'static str> { 151 | match mime_type.subtype() { 152 | mime::JPEG => Some("jpg"), 153 | mime::PNG => Some("png"), 154 | _ => match mime_type.essence_str() { 155 | "image/webp" => Some("webp"), 156 | _ => None, 157 | }, 158 | } 159 | } 160 | 161 | pub(crate) async fn get_hash_by_path(file_path: impl AsRef) -> io::Result<[u8; 32]> { 162 | let file_path = file_path.as_ref(); 163 | 164 | let mut file = File::open(file_path).await?; 165 | let expected_file_size = file.metadata().await?.len(); 166 | 167 | let mut hasher = Sha256::new(); 168 | 169 | let mut buffer = vec![0; calculate_buffer_size(expected_file_size)]; 170 | 171 | loop { 172 | let c = file.read(&mut buffer).await?; 173 | 174 | if c == 0 { 175 | break; 176 | } 177 | 178 | hasher.update(&buffer[..c]); 179 | } 180 | 181 | Ok(hasher.finalize().into()) 182 | } 183 | 184 | #[inline] 185 | pub(crate) fn get_hash_by_buffer(buffer: impl AsRef<[u8]>) -> [u8; 32] { 186 | let buffer = buffer.as_ref(); 187 | 188 | let mut hasher = Sha256::new(); 189 | 190 | hasher.update(buffer); 191 | 192 | hasher.finalize().into() 193 | } 194 | 195 | #[inline] 196 | pub(crate) fn get_random_hash() -> [u8; 32] { 197 | let mut rng = rand::rngs::OsRng; 198 | let mut data = [0u8; 32]; 199 | 200 | rng.try_fill_bytes(&mut data).unwrap(); 201 | 202 | data 203 | } 204 | 205 | #[inline] 206 | pub(crate) fn allow_not_found_error(result: io::Result<()>) -> io::Result<()> { 207 | match result { 208 | Ok(()) => Ok(()), 209 | Err(error) if error.kind() == ErrorKind::NotFound => Ok(()), 210 | Err(error) => Err(error), 211 | } 212 | } 213 | 214 | #[inline] 215 | pub(crate) fn calculate_buffer_size(expected_length: u64) -> usize { 216 | expected_length.clamp(64, BUFFER_SIZE as u64) as usize 217 | } 218 | -------------------------------------------------------------------------------- /datalith/src/rocket_mounts/operate.rs: -------------------------------------------------------------------------------- 1 | use std::{io::ErrorKind, str::FromStr}; 2 | 3 | use datalith_core::{ 4 | mime::Mime, DatalithManager, DatalithResource, DatalithWriteError, FileTypeLevel, 5 | }; 6 | use rocket::{ 7 | http::{ContentType, Status}, 8 | response::content::RawJson, 9 | serde::uuid::Uuid, 10 | Build, Data, Rocket, State, 11 | }; 12 | use rocket_multipart_form_data::{ 13 | MultipartFormData, MultipartFormDataError, MultipartFormDataField, MultipartFormDataOptions, 14 | }; 15 | use serde_json::{json, Value}; 16 | use validators::prelude::*; 17 | 18 | use super::{Boolean, ServerConfig}; 19 | use crate::rocket_mounts::rocket_utils::FileLength; 20 | 21 | #[post("/", format = "multipart/form-data", data = "")] 22 | async fn upload( 23 | server_config: &State, 24 | datalith: &State, 25 | content_type: &ContentType, 26 | data: Data<'_>, 27 | ) -> Result, Status> { 28 | let options = MultipartFormDataOptions { 29 | max_data_bytes: server_config.max_file_size + 1024, 30 | allowed_fields: vec![ 31 | MultipartFormDataField::file("file").size_limit(server_config.max_file_size), 32 | MultipartFormDataField::text("file_name").size_limit(512), 33 | MultipartFormDataField::text("file_type").size_limit(100), 34 | MultipartFormDataField::text("temporary").size_limit(5), 35 | ], 36 | ..MultipartFormDataOptions::default() 37 | }; 38 | 39 | let multipart_form_data = 40 | MultipartFormData::parse(content_type, data, options).await.map_err(|err| match err { 41 | MultipartFormDataError::DataTooLargeError(field) => { 42 | if field.as_ref() == "file" { 43 | Status::PayloadTooLarge 44 | } else { 45 | Status::BadRequest 46 | } 47 | }, 48 | _ => Status::BadRequest, 49 | })?; 50 | 51 | let file_field = 52 | multipart_form_data.files.get("file").ok_or(Status::BadRequest)?.first().unwrap(); 53 | 54 | let file_name = if let Some(file_name) = multipart_form_data.texts.get("file_name") { 55 | Some(&file_name.first().unwrap().text) 56 | } else { 57 | file_field.file_name.as_ref() 58 | }; 59 | 60 | let mime_type = if let Some(file_type) = multipart_form_data.texts.get("file_type") { 61 | let file_type = file_type.first().unwrap(); 62 | 63 | let mime = Mime::from_str(file_type.text.as_str()).map_err(|_| Status::BadRequest)?; 64 | 65 | Some((mime, FileTypeLevel::Manual)) 66 | } else { 67 | file_field.content_type.clone().map(|e| (e, FileTypeLevel::Fallback)) 68 | }; 69 | 70 | let temporary = if let Some(temporary) = multipart_form_data.texts.get("temporary") { 71 | let temporary = temporary.first().unwrap(); 72 | 73 | match Boolean::parse_str(temporary.text.as_str()) { 74 | Ok(b) => b.0, 75 | Err(_) => return Err(Status::BadRequest), 76 | } 77 | } else { 78 | false 79 | }; 80 | 81 | match if temporary { 82 | datalith 83 | .put_resource_by_path_temporarily(file_field.path.as_path(), file_name, mime_type) 84 | .await 85 | } else { 86 | datalith.put_resource_by_path(file_field.path.as_path(), file_name, mime_type).await 87 | } { 88 | Ok(resource) => { 89 | let value = datalith_resource_to_json_value(resource); 90 | 91 | Ok(RawJson(serde_json::to_string(&value).unwrap())) 92 | }, 93 | Err(error) => { 94 | rocket::error!("{error}"); 95 | 96 | Err(Status::InternalServerError) 97 | }, 98 | } 99 | } 100 | 101 | #[put("/?&&", data = "")] 102 | #[allow(clippy::too_many_arguments)] 103 | async fn stream_upload( 104 | server_config: &State, 105 | datalith: &State, 106 | content_type: Option<&ContentType>, 107 | file_length: Option<&FileLength>, 108 | file_name: Option<&str>, 109 | file_type: Option<&str>, 110 | temporary: Option, 111 | data: Data<'_>, 112 | ) -> Result, Status> { 113 | let content_type = match file_type { 114 | Some(file_type) => match Mime::from_str(file_type) { 115 | Ok(mime_type) => Some(mime_type), 116 | Err(_) => return Err(Status::BadRequest), 117 | }, 118 | None => match content_type { 119 | Some(content_type) => match Mime::from_str(&content_type.to_string()) { 120 | Ok(mime_type) => Some(mime_type), 121 | Err(_) => return Err(Status::BadRequest), 122 | }, 123 | None => None, 124 | }, 125 | }; 126 | let mime_type = content_type.clone().map(|e| (e, FileTypeLevel::Manual)); 127 | 128 | let expected_reader_length = validate_content_length(server_config, file_length)?; 129 | 130 | let temporary = temporary.map(|e| e.0).unwrap_or(false); 131 | 132 | // max_file_size plus 1 in order to distinguish the too large payload 133 | let stream = data.open((server_config.max_file_size + 1).into()); 134 | 135 | match if temporary { 136 | datalith 137 | .put_resource_by_reader_temporarily( 138 | stream, 139 | file_name, 140 | mime_type, 141 | Some(expected_reader_length), 142 | ) 143 | .await 144 | } else { 145 | datalith 146 | .put_resource_by_reader(stream, file_name, mime_type, Some(expected_reader_length)) 147 | .await 148 | } { 149 | Ok(resource) => { 150 | let value = datalith_resource_to_json_value(resource); 151 | 152 | Ok(RawJson(serde_json::to_string(&value).unwrap())) 153 | }, 154 | Err(DatalithWriteError::FileLengthTooLarge { 155 | .. 156 | }) => Err(Status::PayloadTooLarge), 157 | Err(DatalithWriteError::IOError(error)) if error.kind() == ErrorKind::Other => { 158 | Err(Status::BadRequest) 159 | }, 160 | Err(error) => { 161 | rocket::error!("{error}"); 162 | 163 | Err(Status::InternalServerError) 164 | }, 165 | } 166 | } 167 | 168 | #[delete("/")] 169 | async fn delete(datalith: &State, id: Uuid) -> Result<&'static str, Status> { 170 | match datalith.delete_resource_by_id(id).await { 171 | Ok(true) => Ok("ok"), 172 | Ok(false) => Err(Status::NotFound), 173 | Err(error) => { 174 | rocket::error!("{error}"); 175 | 176 | Err(Status::InternalServerError) 177 | }, 178 | } 179 | } 180 | 181 | #[inline] 182 | pub fn mounts(rocket: Rocket) -> Rocket { 183 | rocket.mount("/o", routes![upload, stream_upload, delete]) 184 | } 185 | 186 | #[inline] 187 | pub fn validate_content_length( 188 | server_config: &State, 189 | content_length: Option<&FileLength>, 190 | ) -> Result { 191 | let max_file_size = server_config.max_file_size; 192 | 193 | if let Some(content_length) = content_length { 194 | let content_length = content_length.to_u64(); 195 | 196 | if content_length > max_file_size { 197 | return Err(Status::PayloadTooLarge); 198 | } 199 | 200 | Ok(content_length) 201 | } else { 202 | Ok(max_file_size) 203 | } 204 | } 205 | 206 | #[inline] 207 | fn datalith_resource_to_json_value(resource: DatalithResource) -> Value { 208 | json!( 209 | { 210 | "id": resource.id().to_string(), 211 | "created_at": resource.created_at().to_rfc3339(), 212 | "file_type": resource.file_type().essence_str(), 213 | "file_size": resource.file().file_size(), 214 | "file_name": resource.file_name(), 215 | "is_temporary": resource.file().is_temporary(), 216 | } 217 | ) 218 | } 219 | -------------------------------------------------------------------------------- /datalith/src/rocket_mounts/operate_image.rs: -------------------------------------------------------------------------------- 1 | use datalith_core::{ 2 | CenterCrop, DatalithImage, DatalithImageWriteError, DatalithManager, DatalithWriteError, Uuid, 3 | }; 4 | use rocket::{ 5 | http::{ContentType, Status}, 6 | response::content::RawJson, 7 | Build, Data, Rocket, State, 8 | }; 9 | use rocket_multipart_form_data::{ 10 | MultipartFormData, MultipartFormDataError, MultipartFormDataField, MultipartFormDataOptions, 11 | }; 12 | use serde_json::{json, Value}; 13 | use validators::prelude::*; 14 | 15 | use super::{Boolean, ServerConfig}; 16 | use crate::rocket_mounts::{operate::validate_content_length, rocket_utils::FileLength}; 17 | 18 | #[post("/", format = "multipart/form-data", data = "")] 19 | async fn upload( 20 | server_config: &State, 21 | datalith: &State, 22 | content_type: &ContentType, 23 | data: Data<'_>, 24 | ) -> Result, Status> { 25 | let options = MultipartFormDataOptions { 26 | max_data_bytes: server_config.max_file_size + 1024, 27 | allowed_fields: vec![ 28 | MultipartFormDataField::file("file").size_limit(server_config.max_file_size), 29 | MultipartFormDataField::text("file_name").size_limit(512), 30 | MultipartFormDataField::text("max_width").size_limit(10), 31 | MultipartFormDataField::text("max_height").size_limit(10), 32 | MultipartFormDataField::text("center_crop").size_limit(30), 33 | MultipartFormDataField::text("save_original_file").size_limit(5), 34 | ], 35 | ..MultipartFormDataOptions::default() 36 | }; 37 | 38 | let mut multipart_form_data = 39 | MultipartFormData::parse(content_type, data, options).await.map_err(|err| match err { 40 | MultipartFormDataError::DataTooLargeError(field) => { 41 | if field.as_ref() == "file" { 42 | Status::PayloadTooLarge 43 | } else { 44 | Status::BadRequest 45 | } 46 | }, 47 | _ => Status::BadRequest, 48 | })?; 49 | 50 | let file_field = 51 | multipart_form_data.files.get("file").ok_or(Status::BadRequest)?.first().unwrap(); 52 | 53 | let file_name = if let Some(mut file_name) = multipart_form_data.texts.remove("file_name") { 54 | Some(file_name.remove(0).text) 55 | } else { 56 | file_field.file_name.clone() 57 | }; 58 | 59 | let max_width: Option = if let Some(max_width) = multipart_form_data.texts.get("max_width") 60 | { 61 | Some(max_width[0].text.parse().map_err(|_| Status::BadRequest)?) 62 | } else { 63 | None 64 | }; 65 | 66 | let max_height: Option = 67 | if let Some(max_height) = multipart_form_data.texts.get("max_height") { 68 | Some(max_height[0].text.parse().map_err(|_| Status::BadRequest)?) 69 | } else { 70 | None 71 | }; 72 | 73 | let center_crop = multipart_form_data.texts.get("center_crop").map(|v| v[0].text.as_str()); 74 | let center_crop = parse_center_crop(center_crop)?; 75 | 76 | let save_original_file = 77 | if let Some(save_original_file) = multipart_form_data.texts.get("save_original_file") { 78 | let save_original_file = save_original_file.first().unwrap(); 79 | 80 | match Boolean::parse_str(save_original_file.text.as_str()) { 81 | Ok(b) => b.0, 82 | Err(_) => return Err(Status::BadRequest), 83 | } 84 | } else { 85 | true 86 | }; 87 | 88 | match datalith 89 | .put_image_by_path( 90 | file_field.path.as_path(), 91 | file_name.as_ref(), 92 | max_width, 93 | max_height, 94 | center_crop, 95 | save_original_file, 96 | ) 97 | .await 98 | { 99 | Ok(image) => { 100 | let value = datalith_image_to_json_value(image); 101 | 102 | Ok(RawJson(serde_json::to_string(&value).unwrap())) 103 | }, 104 | Err(error) => { 105 | rocket::error!("{error}"); 106 | 107 | if let DatalithImageWriteError::UnsupportedImageType = error { 108 | Err(Status::BadRequest) 109 | } else { 110 | Err(Status::InternalServerError) 111 | } 112 | }, 113 | } 114 | } 115 | 116 | #[allow(clippy::too_many_arguments)] 117 | #[put("/?&&&&", data = "")] 118 | async fn stream_upload( 119 | server_config: &State, 120 | datalith: &State, 121 | file_length: Option<&FileLength>, 122 | file_name: Option<&str>, 123 | max_width: Option, 124 | max_height: Option, 125 | center_crop: Option<&str>, 126 | save_original_file: Option, 127 | data: Data<'_>, 128 | ) -> Result, Status> { 129 | let expected_reader_length = validate_content_length(server_config, file_length)?; 130 | let center_crop = parse_center_crop(center_crop)?; 131 | let save_original_file = save_original_file.map(|e| e.0).unwrap_or(true); 132 | 133 | // max_file_size plus 1 in order to distinguish the too large payload 134 | let stream = data.open((server_config.max_file_size + 1).into()); 135 | 136 | match datalith 137 | .put_image_by_reader( 138 | stream, 139 | file_name, 140 | max_width, 141 | max_height, 142 | center_crop, 143 | save_original_file, 144 | Some(expected_reader_length), 145 | ) 146 | .await 147 | { 148 | Ok(image) => { 149 | let value = datalith_image_to_json_value(image); 150 | 151 | Ok(RawJson(serde_json::to_string(&value).unwrap())) 152 | }, 153 | Err(DatalithImageWriteError::DatalithWriteError( 154 | DatalithWriteError::FileLengthTooLarge { 155 | .. 156 | }, 157 | )) => Err(Status::PayloadTooLarge), 158 | Err(error) => { 159 | rocket::error!("{error}"); 160 | 161 | if let DatalithImageWriteError::UnsupportedImageType = error { 162 | Err(Status::BadRequest) 163 | } else { 164 | Err(Status::InternalServerError) 165 | } 166 | }, 167 | } 168 | } 169 | 170 | #[delete("/")] 171 | async fn delete(datalith: &State, id: Uuid) -> Result<&'static str, Status> { 172 | match datalith.delete_image_by_id(id).await { 173 | Ok(true) => Ok("ok"), 174 | Ok(false) => Err(Status::NotFound), 175 | Err(error) => { 176 | rocket::error!("{error}"); 177 | 178 | Err(Status::InternalServerError) 179 | }, 180 | } 181 | } 182 | 183 | #[delete("/?convert-image&&&")] 184 | async fn convert_image( 185 | datalith: &State, 186 | id: Uuid, 187 | max_width: Option, 188 | max_height: Option, 189 | center_crop: Option<&str>, 190 | ) -> Result, Status> { 191 | let center_crop = parse_center_crop(center_crop)?; 192 | 193 | let resource = match datalith.get_resource_by_id(id).await { 194 | Ok(Some(resource)) => resource, 195 | Ok(None) => return Err(Status::NotFound), 196 | Err(error) => { 197 | rocket::error!("{error}"); 198 | 199 | return Err(Status::InternalServerError); 200 | }, 201 | }; 202 | 203 | match datalith.convert_resource_to_image(resource, max_width, max_height, center_crop).await { 204 | Ok(image) => { 205 | let value = datalith_image_to_json_value(image); 206 | 207 | Ok(RawJson(serde_json::to_string(&value).unwrap())) 208 | }, 209 | Err(error) => { 210 | rocket::error!("{error}"); 211 | 212 | if let DatalithImageWriteError::UnsupportedImageType = error { 213 | Err(Status::BadRequest) 214 | } else { 215 | Err(Status::InternalServerError) 216 | } 217 | }, 218 | } 219 | } 220 | 221 | #[inline] 222 | pub fn mounts(rocket: Rocket) -> Rocket { 223 | rocket.mount("/i/o", routes![upload, stream_upload, delete]).mount("/o", routes![convert_image]) 224 | } 225 | 226 | #[inline] 227 | fn datalith_image_to_json_value(image: DatalithImage) -> Value { 228 | json!( 229 | { 230 | "id": image.id().to_string(), 231 | "created_at": image.created_at().to_rfc3339(), 232 | "image_width": image.image_width(), 233 | "image_height": image.image_height(), 234 | "image_stem": image.image_stem(), 235 | } 236 | ) 237 | } 238 | 239 | #[inline] 240 | fn parse_center_crop(center_crop: Option<&str>) -> Result, Status> { 241 | if let Some(center_crop) = center_crop { 242 | let mut split = center_crop.split(':'); 243 | 244 | let mut read_next_f64 = || { 245 | if let Some(t) = split.next() { 246 | t.parse::().map_err(|_| Status::BadRequest) 247 | } else { 248 | Err(Status::BadRequest) 249 | } 250 | }; 251 | 252 | let w = read_next_f64()?; 253 | let h = read_next_f64()?; 254 | 255 | if split.next().is_some() { 256 | return Err(Status::BadRequest); 257 | } 258 | 259 | Ok(CenterCrop::new(w, h)) 260 | } else { 261 | Ok(None) 262 | } 263 | } 264 | -------------------------------------------------------------------------------- /datalith-core/tests/parallel_and_temporary.rs: -------------------------------------------------------------------------------- 1 | mod global; 2 | 3 | use std::time::Duration; 4 | 5 | use datalith_core::PaginationOptions; 6 | use global::*; 7 | use tokio::time; 8 | 9 | #[tokio::test] 10 | async fn parallel_and_temporary() { 11 | let datalith = datalith_init().await; 12 | 13 | let image = IMAGE_DATA.as_ref(); 14 | 15 | { 16 | let (file_1, file_2, file_3, file_4) = tokio::join!( 17 | datalith.put_file_by_buffer_temporarily(image, Some("image.png"), None), 18 | datalith.put_file_by_buffer_temporarily(image, Some("image.png"), None), 19 | datalith.put_file_by_buffer(image, Some("image.png"), None), 20 | datalith.put_file_by_buffer(image, Some("image.png"), None), 21 | ); 22 | 23 | let file_1 = file_1.unwrap(); 24 | let file_2 = file_2.unwrap(); 25 | let file_3 = file_3.unwrap(); 26 | let file_4 = file_4.unwrap(); 27 | 28 | assert_ne!(file_1, file_2); 29 | assert_ne!(file_1, file_3); 30 | assert_ne!(file_2, file_3); 31 | assert_eq!(file_3, file_4); 32 | 33 | let (file_ids, _) = datalith.list_file_ids(PaginationOptions::default()).await.unwrap(); 34 | assert_eq!(3, file_ids.len()); 35 | 36 | let (id_1, id_2, id_3_4) = (file_1.id(), file_2.id(), file_3.id()); 37 | 38 | let (delete_result_1, delete_result_2, delete_result_3, delete_result_4) = tokio::join!( 39 | time::timeout(Duration::from_secs(1), datalith.delete_file_by_id(id_1)), 40 | time::timeout(Duration::from_secs(1), datalith.delete_file_by_id(id_2)), 41 | time::timeout(Duration::from_secs(1), datalith.delete_file_by_id(id_3_4)), 42 | time::timeout(Duration::from_secs(1), datalith.delete_file_by_id(id_3_4)), 43 | ); 44 | 45 | // timeout errors will be thrown 46 | assert!(delete_result_1.is_err()); 47 | assert!(delete_result_2.is_err()); 48 | // 3 or 4 will be deleted successfully because they are the same file and the **count** is 2. After deleting, the count will be updated to 1 49 | assert!(delete_result_3.is_err() ^ delete_result_4.is_err()); 50 | 51 | drop(file_1); 52 | drop(file_2); 53 | drop(file_3); 54 | drop(file_4); 55 | 56 | let (delete_result_1, delete_result_2, delete_result_3_4) = tokio::join!( 57 | datalith.delete_file_by_id(id_1), 58 | datalith.delete_file_by_id(id_2), 59 | datalith.delete_file_by_id(id_3_4), 60 | ); 61 | 62 | assert!(delete_result_1.unwrap()); 63 | assert!(delete_result_2.unwrap()); 64 | assert!(delete_result_3_4.unwrap()); 65 | 66 | let (delete_result_1, delete_result_2, delete_result_3_4) = tokio::join!( 67 | datalith.delete_file_by_id(id_1), 68 | datalith.delete_file_by_id(id_2), 69 | datalith.delete_file_by_id(id_3_4), 70 | ); 71 | 72 | assert!(!delete_result_1.unwrap()); 73 | assert!(!delete_result_2.unwrap()); 74 | assert!(!delete_result_3_4.unwrap()); 75 | } 76 | 77 | datalith_close(datalith).await; 78 | } 79 | 80 | #[tokio::test] 81 | async fn resource_parallel_and_temporary() { 82 | let datalith = datalith_init().await; 83 | 84 | let image = IMAGE_DATA.as_ref(); 85 | 86 | { 87 | let (resource_1, resource_2, resource_3, resource_4) = tokio::join!( 88 | datalith.put_resource_by_buffer_temporarily(image, Some("image.png"), None), 89 | datalith.put_resource_by_buffer_temporarily(image, Some("image.png"), None), 90 | datalith.put_resource_by_buffer(image, Some("image.png"), None), 91 | datalith.put_resource_by_buffer(image, Some("image.png"), None), 92 | ); 93 | 94 | let resource_1 = resource_1.unwrap(); 95 | let resource_2 = resource_2.unwrap(); 96 | let resource_3 = resource_3.unwrap(); 97 | let resource_4 = resource_4.unwrap(); 98 | 99 | assert_ne!(resource_1, resource_2); 100 | assert_ne!(resource_1, resource_3); 101 | assert_ne!(resource_2, resource_3); 102 | assert_ne!(resource_3, resource_4); 103 | 104 | let (file_ids, _) = datalith.list_resource_ids(PaginationOptions::default()).await.unwrap(); 105 | assert_eq!(4, file_ids.len()); 106 | 107 | let (id_1, id_2, id_3, id_4) = 108 | (resource_1.id(), resource_2.id(), resource_3.id(), resource_4.id()); 109 | 110 | let (delete_result_1, delete_result_2, delete_result_3, delete_result_4) = tokio::join!( 111 | time::timeout(Duration::from_secs(1), datalith.delete_resource_by_id(id_1)), 112 | time::timeout(Duration::from_secs(1), datalith.delete_resource_by_id(id_2)), 113 | time::timeout(Duration::from_secs(1), datalith.delete_resource_by_id(id_3)), 114 | time::timeout(Duration::from_secs(1), datalith.delete_resource_by_id(id_4)), 115 | ); 116 | 117 | // only one will fail to delete because they are the same file and the **count** is 4 118 | { 119 | let mut failures = 0usize; 120 | 121 | if delete_result_1.is_err() { 122 | failures += 1; 123 | } 124 | 125 | if delete_result_2.is_err() { 126 | failures += 1; 127 | } 128 | 129 | if delete_result_3.is_err() { 130 | failures += 1; 131 | } 132 | 133 | if delete_result_4.is_err() { 134 | failures += 1; 135 | } 136 | 137 | assert_eq!(1, failures); 138 | } 139 | 140 | drop(resource_1); 141 | drop(resource_2); 142 | drop(resource_3); 143 | drop(resource_4); 144 | 145 | let (delete_result_1, delete_result_2, delete_result_3, delete_result_4) = tokio::join!( 146 | datalith.delete_resource_by_id(id_1), 147 | datalith.delete_resource_by_id(id_2), 148 | datalith.delete_resource_by_id(id_3), 149 | datalith.delete_resource_by_id(id_4), 150 | ); 151 | 152 | // only one will succeed to delete because the others are already deleted 153 | { 154 | let mut success = 0usize; 155 | 156 | if delete_result_1.unwrap() { 157 | success += 1; 158 | } 159 | 160 | if delete_result_2.unwrap() { 161 | success += 1; 162 | } 163 | 164 | if delete_result_3.unwrap() { 165 | success += 1; 166 | } 167 | 168 | if delete_result_4.unwrap() { 169 | success += 1; 170 | } 171 | 172 | assert_eq!(1, success); 173 | } 174 | 175 | let (delete_result_1, delete_result_2, delete_result_3, delete_result_4) = tokio::join!( 176 | datalith.delete_resource_by_id(id_1), 177 | datalith.delete_resource_by_id(id_2), 178 | datalith.delete_resource_by_id(id_3), 179 | datalith.delete_resource_by_id(id_4), 180 | ); 181 | 182 | assert!(!delete_result_1.unwrap()); 183 | assert!(!delete_result_2.unwrap()); 184 | assert!(!delete_result_3.unwrap()); 185 | assert!(!delete_result_4.unwrap()); 186 | } 187 | 188 | datalith_close(datalith).await; 189 | } 190 | 191 | #[cfg(feature = "image-convert")] 192 | #[tokio::test] 193 | async fn image_parallel() { 194 | let datalith = datalith_init().await; 195 | 196 | let image = IMAGE_DATA.as_ref(); 197 | 198 | { 199 | let (image_1, image_2, image_3) = tokio::join!( 200 | datalith.put_image_by_buffer( 201 | image.to_vec(), 202 | Some("image.png"), 203 | Some(32), 204 | None, 205 | None, 206 | true 207 | ), 208 | datalith.put_image_by_buffer( 209 | image.to_vec(), 210 | Some("image.png"), 211 | Some(32), 212 | None, 213 | None, 214 | true 215 | ), 216 | datalith.put_image_by_buffer( 217 | image.to_vec(), 218 | Some("image.png"), 219 | Some(48), 220 | None, 221 | None, 222 | true 223 | ), 224 | ); 225 | 226 | let image_1 = image_1.unwrap(); 227 | let image_2 = image_2.unwrap(); 228 | let image_3 = image_3.unwrap(); 229 | 230 | assert_ne!(image_1, image_2); 231 | assert_ne!(image_1, image_3); 232 | assert_ne!(image_2, image_3); 233 | assert_eq!(image_1.original_file(), image_2.original_file()); 234 | assert_eq!(image_1.original_file(), image_3.original_file()); 235 | assert_eq!(image_1.thumbnails()[0], image_2.thumbnails()[0]); 236 | assert_ne!(image_1.thumbnails()[0], image_3.thumbnails()[0]); 237 | assert_eq!(image_1.thumbnails()[2], image_3.thumbnails()[1]); 238 | 239 | let (image_ids, _) = datalith.list_image_ids(PaginationOptions::default()).await.unwrap(); 240 | assert_eq!(3, image_ids.len()); 241 | 242 | let id_1 = image_1.id(); 243 | let id_2 = image_2.id(); 244 | let id_3 = image_3.id(); 245 | 246 | let (delete_result_1, delete_result_2, delete_result_3) = tokio::join!( 247 | time::timeout(Duration::from_secs(5), datalith.delete_image_by_id(id_1)), 248 | time::timeout(Duration::from_secs(5), datalith.delete_image_by_id(id_2)), 249 | time::timeout(Duration::from_secs(1), datalith.delete_image_by_id(id_3)), 250 | ); 251 | 252 | // timeout errors will be thrown 253 | // 1 or 2 will be deleted successfully because they are using the same files and the **count** is 2. After deleting, the count will be updated to 1 254 | assert!(delete_result_1.is_err() ^ delete_result_2.is_err()); 255 | assert!(delete_result_3.is_err()); 256 | 257 | drop(image_1); 258 | drop(image_2); 259 | drop(image_3); 260 | 261 | let (delete_result_1, delete_result_2, delete_result_3) = tokio::join!( 262 | datalith.delete_image_by_id(id_1), 263 | datalith.delete_image_by_id(id_2), 264 | datalith.delete_image_by_id(id_3), 265 | ); 266 | 267 | // 1 or 2 will be deleted successfully because one of them is already deleted 268 | assert!(delete_result_1.unwrap() ^ delete_result_2.unwrap()); 269 | assert!(delete_result_3.unwrap()); 270 | 271 | let (delete_result_1, delete_result_2, delete_result_3) = tokio::join!( 272 | datalith.delete_image_by_id(id_1), 273 | datalith.delete_image_by_id(id_2), 274 | datalith.delete_image_by_id(id_3) 275 | ); 276 | 277 | assert!(!delete_result_1.unwrap()); 278 | assert!(!delete_result_2.unwrap()); 279 | assert!(!delete_result_3.unwrap()); 280 | } 281 | 282 | datalith_close(datalith).await; 283 | } 284 | -------------------------------------------------------------------------------- /datalith-core/src/resources/mod.rs: -------------------------------------------------------------------------------- 1 | mod datalith_resource; 2 | 3 | use std::{path::Path, str::FromStr}; 4 | 5 | use chrono::prelude::*; 6 | pub use datalith_resource::*; 7 | use educe::Educe; 8 | use mime::Mime; 9 | use rdb_pagination::{ 10 | prelude::*, OrderByOptions, OrderMethod, Pagination, PaginationOptions, SqlJoin, 11 | SqlOrderByComponent, 12 | }; 13 | use tokio::io::AsyncRead; 14 | use uuid::Uuid; 15 | 16 | use crate::{ 17 | functions::{get_current_timestamp, get_file_name}, 18 | guard::DeleteGuard, 19 | Datalith, DatalithFile, DatalithReadError, DatalithWriteError, FileTypeLevel, 20 | }; 21 | 22 | /// A struct that defines the ordering options for querying resources. 23 | #[derive(Debug, Clone, Educe, OrderByOptions)] 24 | #[educe(Default)] 25 | #[orderByOptions(name = resources)] 26 | pub struct DatalithResourceOrderBy { 27 | #[educe(Default = 102)] 28 | #[orderByOptions((resources, id), unique)] 29 | pub id: OrderMethod, 30 | #[educe(Default = -101)] 31 | #[orderByOptions((resources, created_at))] 32 | pub created_at: OrderMethod, 33 | } 34 | 35 | // Upload 36 | impl Datalith { 37 | /// Input a resource into Datalith using a buffer. 38 | #[inline] 39 | pub async fn put_resource_by_buffer( 40 | &self, 41 | buffer: impl AsRef<[u8]>, 42 | file_name: Option>, 43 | file_type: Option<(Mime, FileTypeLevel)>, 44 | ) -> Result { 45 | let file_name = file_name.map(|e| e.into()); 46 | 47 | let file = self.put_file_by_buffer(buffer, file_name.clone(), file_type.clone()).await?; 48 | 49 | self.put_resource(file, file_name, file_type, false).await 50 | } 51 | 52 | /// Temporarily input a resource into Datalith using a buffer. 53 | /// 54 | /// The term `temporarily` means the file can be retrieved using the `get_resource_by_id` function only once. After that, it cannot be retrieved again. 55 | #[inline] 56 | pub async fn put_resource_by_buffer_temporarily( 57 | &self, 58 | buffer: impl AsRef<[u8]>, 59 | file_name: Option>, 60 | file_type: Option<(Mime, FileTypeLevel)>, 61 | ) -> Result { 62 | let file_name = file_name.map(|e| e.into()); 63 | 64 | let file = self.put_file_by_buffer(buffer, file_name.clone(), file_type.clone()).await?; 65 | 66 | self.put_resource(file, file_name, file_type, true).await 67 | } 68 | 69 | /// Input a resource into Datalith using a file path. 70 | #[inline] 71 | pub async fn put_resource_by_path( 72 | &self, 73 | file_path: impl AsRef, 74 | file_name: Option>, 75 | file_type: Option<(Mime, FileTypeLevel)>, 76 | ) -> Result { 77 | let file_name = file_name.map(|e| e.into()); 78 | 79 | let file = self.put_file_by_path(file_path, file_name.clone(), file_type.clone()).await?; 80 | 81 | self.put_resource(file, file_name, file_type, false).await 82 | } 83 | 84 | /// Temporarily input a resource into Datalith using a file path. 85 | /// 86 | /// The term `temporarily` means the file can be retrieved using the `get_resource_by_id` function only once. After that, it cannot be retrieved again. 87 | #[inline] 88 | pub async fn put_resource_by_path_temporarily( 89 | &self, 90 | file_path: impl AsRef, 91 | file_name: Option>, 92 | file_type: Option<(Mime, FileTypeLevel)>, 93 | ) -> Result { 94 | let file_name = file_name.map(|e| e.into()); 95 | 96 | let file = self.put_file_by_path(file_path, file_name.clone(), file_type.clone()).await?; 97 | 98 | self.put_resource(file, file_name, file_type, true).await 99 | } 100 | 101 | /// Input a resource into Datalith using a reader. 102 | #[inline] 103 | pub async fn put_resource_by_reader( 104 | &self, 105 | reader: impl AsyncRead + Unpin, 106 | file_name: Option>, 107 | file_type: Option<(Mime, FileTypeLevel)>, 108 | expected_reader_length: Option, 109 | ) -> Result { 110 | let file_name = file_name.map(|e| e.into()); 111 | 112 | let file = self 113 | .put_file_by_reader( 114 | reader, 115 | file_name.clone(), 116 | file_type.clone(), 117 | expected_reader_length, 118 | ) 119 | .await?; 120 | 121 | self.put_resource(file, file_name, file_type, false).await 122 | } 123 | 124 | /// Temporarily input a resource into Datalith using a reader. 125 | /// 126 | /// The term `temporarily` means the file can be retrieved using the `get_resource_by_id` function only once. After that, it cannot be retrieved again. 127 | #[inline] 128 | pub async fn put_resource_by_reader_temporarily( 129 | &self, 130 | reader: impl AsyncRead + Unpin, 131 | file_name: Option>, 132 | file_type: Option<(Mime, FileTypeLevel)>, 133 | expected_reader_length: Option, 134 | ) -> Result { 135 | let file_name = file_name.map(|e| e.into()); 136 | 137 | let file = self 138 | .put_file_by_reader( 139 | reader, 140 | file_name.clone(), 141 | file_type.clone(), 142 | expected_reader_length, 143 | ) 144 | .await?; 145 | 146 | self.put_resource(file, file_name, file_type, true).await 147 | } 148 | 149 | async fn put_resource( 150 | &self, 151 | file: DatalithFile, 152 | file_name: Option, 153 | file_type: Option<(Mime, FileTypeLevel)>, 154 | temporary: bool, 155 | ) -> Result { 156 | macro_rules! recover_file { 157 | () => {{ 158 | let id = file.id(); 159 | 160 | drop(file); 161 | 162 | self.delete_file_by_id(id).await?; 163 | }}; 164 | } 165 | 166 | let (created_at, file_name, file_type) = if file.is_new() { 167 | (file.created_at(), file.file_name().clone(), file.file_type().clone()) 168 | } else { 169 | let created_at = Local::now(); 170 | 171 | let file_type = if let Some((file_type, level)) = file_type { 172 | if matches!(level, FileTypeLevel::Manual | FileTypeLevel::ExactMatch) { 173 | file_type 174 | } else { 175 | // the fallback file type may not be correct, use the existing type instead 176 | file.file_type().clone() 177 | } 178 | } else { 179 | file.file_type().clone() 180 | }; 181 | 182 | let file_name = get_file_name(file_name, created_at, &file_type); 183 | 184 | (created_at, file_name, file_type) 185 | }; 186 | 187 | let expired_at = 188 | if temporary { Some(self.get_expired_timestamp(created_at)) } else { None }; 189 | 190 | let id = Uuid::new_v4(); 191 | 192 | // insert resources 193 | { 194 | #[rustfmt::skip] 195 | let result = sqlx::query( 196 | " 197 | INSERT INTO `resources` (`id`, `created_at`, `file_name`, `file_type`, `file_id`, `expired_at`) 198 | VALUES (?, ?, ?, ?, ?, ?) 199 | ", 200 | ) 201 | .bind(id) 202 | .bind(created_at.timestamp_millis()) 203 | .bind(file_name.as_str()) 204 | .bind(file_type.essence_str()) 205 | .bind(file.id()) 206 | .bind(expired_at) 207 | .execute(&self.0.db) 208 | .await; 209 | 210 | let result = match result { 211 | Ok(result) => result, 212 | Err(error) => { 213 | recover_file!(); 214 | 215 | return Err(error.into()); 216 | }, 217 | }; 218 | 219 | debug_assert!(result.rows_affected() > 0); 220 | } 221 | 222 | Ok(DatalithResource::new(id, created_at, file_type, file_name, file, expired_at.is_some())) 223 | } 224 | } 225 | 226 | // Download 227 | impl Datalith { 228 | /// Check whether the resource exists or not. 229 | pub async fn check_resource_exist( 230 | &self, 231 | id: impl Into, 232 | ) -> Result { 233 | let current_timestamp = get_current_timestamp(); 234 | 235 | #[rustfmt::skip] 236 | let row = sqlx::query( 237 | " 238 | SELECT 239 | 1 240 | FROM 241 | `resources` 242 | WHERE 243 | `id` = ? 244 | AND ( `expired_at` IS NULL OR `expired_at` > ? ) 245 | ", 246 | ) 247 | .bind(id.into()) 248 | .bind(current_timestamp) 249 | .fetch_optional(&self.0.db) 250 | .await?; 251 | 252 | Ok(row.is_some()) 253 | } 254 | 255 | /// Retrieve the resource metadata using an ID. 256 | pub async fn get_resource_by_id( 257 | &self, 258 | id: impl Into, 259 | ) -> Result, DatalithReadError> { 260 | let current_timestamp = get_current_timestamp(); 261 | 262 | let id = id.into(); 263 | 264 | let is_temporary = { 265 | let result = sqlx::query( 266 | " 267 | UPDATE 268 | `resources` 269 | SET 270 | `expired_at` = ? 271 | WHERE 272 | `id` = ? 273 | AND `expired_at` > ? 274 | ", 275 | ) 276 | .bind(current_timestamp) 277 | .bind(id) 278 | .bind(current_timestamp) 279 | .execute(&self.0.db) 280 | .await?; 281 | 282 | result.rows_affected() > 0 283 | }; 284 | 285 | #[rustfmt::skip] 286 | let row: Option<(i64, String, String, Uuid)> = sqlx::query_as( 287 | " 288 | SELECT 289 | `created_at`, 290 | `file_type`, 291 | `file_name`, 292 | `file_id` 293 | FROM 294 | `resources` 295 | WHERE 296 | `id` = ? 297 | AND ( `expired_at` IS NULL OR `expired_at` = ? ) 298 | ", 299 | ) 300 | .bind(id) 301 | .bind(current_timestamp) 302 | .fetch_optional(&self.0.db) 303 | .await?; 304 | 305 | if let Some((created_at, file_type, file_name, file_id)) = row { 306 | let file = self.get_file_by_id(file_id).await?; 307 | 308 | if let Some(file) = file { 309 | let created_at = DateTime::from_timestamp_millis(created_at).unwrap(); 310 | 311 | return Ok(Some(DatalithResource::new( 312 | id, 313 | created_at, 314 | Mime::from_str(&file_type).unwrap(), 315 | file_name, 316 | file, 317 | is_temporary, 318 | ))); 319 | } 320 | } 321 | 322 | Ok(None) 323 | } 324 | 325 | /// List resource IDs. 326 | pub async fn list_resource_ids( 327 | &self, 328 | mut pagination_options: PaginationOptions, 329 | ) -> Result<(Vec, Pagination), DatalithReadError> { 330 | loop { 331 | let (joins, order_by_components) = pagination_options.order_by.to_sql(); 332 | 333 | let mut sql_join = String::new(); 334 | let mut sql_order_by = String::new(); 335 | let mut sql_limit_offset = String::new(); 336 | 337 | SqlJoin::format_sqlite_join_clauses(&joins, &mut sql_join); 338 | SqlOrderByComponent::format_sqlite_order_by_components( 339 | &order_by_components, 340 | &mut sql_order_by, 341 | ); 342 | pagination_options.to_sqlite_limit_offset(&mut sql_limit_offset); 343 | 344 | let mut tx = self.0.db.begin().await?; 345 | 346 | let total_items = { 347 | let row: (u32,) = { 348 | #[rustfmt::skip] 349 | let query = sqlx::query_as( 350 | " 351 | SELECT 352 | COUNT(*) 353 | FROM 354 | `resources` 355 | " 356 | ); 357 | 358 | query.fetch_one(&mut *tx).await? 359 | }; 360 | 361 | row.0 362 | }; 363 | 364 | let rows: Vec<(Uuid,)> = { 365 | #[rustfmt::skip] 366 | let sql = format!( 367 | " 368 | SELECT 369 | `id` 370 | FROM 371 | `resources` 372 | {sql_join} 373 | WHERE 374 | (`expired_at` IS NULL OR `expired_at` > ?) 375 | {sql_order_by} 376 | {sql_limit_offset} 377 | " 378 | ); 379 | 380 | let current_timestamp = get_current_timestamp(); 381 | 382 | let query = sqlx::query_as(&sql).bind(current_timestamp); 383 | 384 | query.fetch_all(&mut *tx).await? 385 | }; 386 | 387 | let total_items = total_items as usize; 388 | 389 | drop(tx); 390 | 391 | let pagination = Pagination::new() 392 | .items_per_page(pagination_options.items_per_page) 393 | .total_items(total_items) 394 | .page(pagination_options.page); 395 | 396 | if rows.is_empty() { 397 | if total_items > 0 && pagination_options.page > 1 { 398 | pagination_options.page = pagination.get_total_pages(); 399 | 400 | continue; 401 | } else { 402 | return Ok((Vec::new(), pagination)); 403 | } 404 | } 405 | 406 | let ids = rows.into_iter().map(|(id,)| id).collect::>(); 407 | 408 | return Ok((ids, pagination)); 409 | } 410 | } 411 | } 412 | 413 | // Delete 414 | impl Datalith { 415 | /// Remove a resource using an ID. The related `DatalithResource` instances should be dropped before calling this function. 416 | #[inline] 417 | pub async fn delete_resource_by_id( 418 | &self, 419 | id: impl Into, 420 | ) -> Result { 421 | let id = id.into(); 422 | 423 | #[rustfmt::skip] 424 | let row: Option<(Uuid,)> = sqlx::query_as( 425 | " 426 | SELECT 427 | `file_id` 428 | FROM 429 | `resources` 430 | WHERE 431 | `id` = ? 432 | ", 433 | ) 434 | .bind(id) 435 | .fetch_optional(&self.0.db) 436 | .await?; 437 | 438 | if let Some((file_id,)) = row { 439 | let guard = DeleteGuard::new(self.clone(), file_id).await; 440 | 441 | self.wait_for_opening_files(&guard).await?; 442 | 443 | #[rustfmt::skip] 444 | let result = sqlx::query( 445 | " 446 | DELETE FROM 447 | `resources` 448 | WHERE 449 | `id` = ? 450 | ", 451 | ) 452 | .bind(id) 453 | .execute(&self.0.db) 454 | .await?; 455 | 456 | if result.rows_affected() == 0 { 457 | return Ok(false); 458 | } 459 | 460 | // delete the related file 461 | 462 | self.delete_file_by_id_inner(file_id, guard).await?; 463 | 464 | Ok(true) 465 | } else { 466 | Ok(false) 467 | } 468 | } 469 | } 470 | -------------------------------------------------------------------------------- /datalith-core/tests/upload_download_data.rs: -------------------------------------------------------------------------------- 1 | mod global; 2 | 3 | use global::*; 4 | #[cfg(feature = "image-convert")] 5 | use rdb_pagination::PaginationOptions; 6 | use tokio::{fs::File, io::AsyncReadExt}; 7 | 8 | #[tokio::test] 9 | async fn upload_download_data() { 10 | let datalith = datalith_init().await; 11 | 12 | let image = IMAGE_DATA.as_ref(); 13 | 14 | { 15 | let id = { 16 | let file = datalith 17 | .put_file_by_buffer_temporarily(image, Some("image.png"), None) 18 | .await 19 | .unwrap(); 20 | 21 | #[cfg(feature = "magic")] 22 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 23 | assert_eq!(IMAGE_SIZE, file.file_size()); 24 | assert_eq!("image.png", file.file_name()); 25 | assert!(file.is_temporary()); 26 | assert!(file.is_new()); 27 | 28 | let mut reader = file.create_reader().await.unwrap(); 29 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 30 | reader.read_to_end(&mut buffer).await.unwrap(); 31 | assert_eq!(image, buffer); 32 | 33 | file.id() 34 | }; 35 | 36 | // get 37 | { 38 | let file = datalith.get_file_by_id(id).await.unwrap().unwrap(); 39 | 40 | #[cfg(feature = "magic")] 41 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 42 | assert_eq!(IMAGE_SIZE, file.file_size()); 43 | assert_eq!("image.png", file.file_name()); 44 | assert!(file.is_temporary()); 45 | assert!(!file.is_new()); 46 | 47 | let mut reader = file.create_reader().await.unwrap(); 48 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 49 | reader.read_to_end(&mut buffer).await.unwrap(); 50 | assert_eq!(image, buffer); 51 | } 52 | 53 | // temporarily files can only get once 54 | assert!(datalith.get_file_by_id(id).await.unwrap().is_none()); 55 | assert!(!datalith.check_file_exist(id).await.unwrap()); 56 | } 57 | 58 | { 59 | let id = { 60 | let file = datalith.put_file_by_buffer(image, Some("image.png"), None).await.unwrap(); 61 | 62 | #[cfg(feature = "magic")] 63 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 64 | assert_eq!(IMAGE_SIZE, file.file_size()); 65 | assert_eq!("image.png", file.file_name()); 66 | assert!(!file.is_temporary()); 67 | assert!(file.is_new()); 68 | 69 | let mut reader = file.create_reader().await.unwrap(); 70 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 71 | reader.read_to_end(&mut buffer).await.unwrap(); 72 | assert_eq!(image, buffer); 73 | 74 | file.id() 75 | }; 76 | 77 | // get 78 | { 79 | let file = datalith.get_file_by_id(id).await.unwrap().unwrap(); 80 | 81 | #[cfg(feature = "magic")] 82 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 83 | assert_eq!(IMAGE_SIZE, file.file_size()); 84 | assert_eq!("image.png", file.file_name()); 85 | assert!(!file.is_temporary()); 86 | assert!(!file.is_new()); 87 | 88 | let mut reader = file.create_reader().await.unwrap(); 89 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 90 | reader.read_to_end(&mut buffer).await.unwrap(); 91 | assert_eq!(image, buffer); 92 | } 93 | 94 | assert!(datalith.get_file_by_id(id).await.unwrap().is_some()); 95 | assert!(datalith.check_file_exist(id).await.unwrap()); 96 | 97 | // delete 98 | assert!(datalith.delete_file_by_id(id).await.unwrap()); 99 | assert!(!datalith.delete_file_by_id(id).await.unwrap()); 100 | } 101 | 102 | { 103 | let id = { 104 | let file = datalith 105 | .put_file_by_path_temporarily(IMAGE_PATH, None::<&str>, None) 106 | .await 107 | .unwrap(); 108 | 109 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 110 | assert_eq!(IMAGE_SIZE, file.file_size()); 111 | assert_eq!("image.png", file.file_name()); 112 | assert!(file.is_temporary()); 113 | assert!(file.is_new()); 114 | 115 | let mut reader = file.create_reader().await.unwrap(); 116 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 117 | reader.read_to_end(&mut buffer).await.unwrap(); 118 | assert_eq!(image, buffer); 119 | 120 | file.id() 121 | }; 122 | 123 | // get 124 | { 125 | let file = datalith.get_file_by_id(id).await.unwrap().unwrap(); 126 | 127 | #[cfg(feature = "magic")] 128 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 129 | assert_eq!(IMAGE_SIZE, file.file_size()); 130 | assert_eq!("image.png", file.file_name()); 131 | assert!(file.is_temporary()); 132 | assert!(!file.is_new()); 133 | 134 | let mut reader = file.create_reader().await.unwrap(); 135 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 136 | reader.read_to_end(&mut buffer).await.unwrap(); 137 | assert_eq!(image, buffer); 138 | } 139 | 140 | // temporarily files can only get once 141 | assert!(datalith.get_file_by_id(id).await.unwrap().is_none()); 142 | assert!(!datalith.check_file_exist(id).await.unwrap()); 143 | } 144 | 145 | { 146 | let id = { 147 | let file = datalith.put_file_by_path(IMAGE_PATH, None::<&str>, None).await.unwrap(); 148 | 149 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 150 | assert_eq!(IMAGE_SIZE, file.file_size()); 151 | assert_eq!("image.png", file.file_name()); 152 | assert!(!file.is_temporary()); 153 | assert!(file.is_new()); 154 | 155 | let mut reader = file.create_reader().await.unwrap(); 156 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 157 | reader.read_to_end(&mut buffer).await.unwrap(); 158 | assert_eq!(image, buffer); 159 | 160 | file.id() 161 | }; 162 | 163 | // get 164 | { 165 | let file = datalith.get_file_by_id(id).await.unwrap().unwrap(); 166 | 167 | #[cfg(feature = "magic")] 168 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 169 | assert_eq!(IMAGE_SIZE, file.file_size()); 170 | assert_eq!("image.png", file.file_name()); 171 | assert!(!file.is_temporary()); 172 | assert!(!file.is_new()); 173 | 174 | let mut reader = file.create_reader().await.unwrap(); 175 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 176 | reader.read_to_end(&mut buffer).await.unwrap(); 177 | assert_eq!(image, buffer); 178 | } 179 | 180 | assert!(datalith.get_file_by_id(id).await.unwrap().is_some()); 181 | assert!(datalith.check_file_exist(id).await.unwrap()); 182 | 183 | // delete 184 | assert!(datalith.delete_file_by_id(id).await.unwrap()); 185 | assert!(!datalith.delete_file_by_id(id).await.unwrap()); 186 | } 187 | 188 | { 189 | let id = { 190 | let mut file = File::open(IMAGE_PATH).await.unwrap(); 191 | 192 | let file = datalith 193 | .put_file_by_reader_temporarily( 194 | &mut file, 195 | Some("image.png"), 196 | None, 197 | Some(IMAGE_SIZE), 198 | ) 199 | .await 200 | .unwrap(); 201 | 202 | #[cfg(feature = "magic")] 203 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 204 | assert_eq!(IMAGE_SIZE, file.file_size()); 205 | assert_eq!("image.png", file.file_name()); 206 | assert!(file.is_temporary()); 207 | assert!(file.is_new()); 208 | 209 | let mut reader = file.create_reader().await.unwrap(); 210 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 211 | reader.read_to_end(&mut buffer).await.unwrap(); 212 | assert_eq!(image, buffer); 213 | 214 | file.id() 215 | }; 216 | 217 | // get 218 | { 219 | let file = datalith.get_file_by_id(id).await.unwrap().unwrap(); 220 | 221 | #[cfg(feature = "magic")] 222 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 223 | assert_eq!(IMAGE_SIZE, file.file_size()); 224 | assert_eq!("image.png", file.file_name()); 225 | assert!(file.is_temporary()); 226 | assert!(!file.is_new()); 227 | 228 | let mut reader = file.create_reader().await.unwrap(); 229 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 230 | reader.read_to_end(&mut buffer).await.unwrap(); 231 | assert_eq!(image, buffer); 232 | } 233 | 234 | // temporarily files can only get once 235 | assert!(datalith.get_file_by_id(id).await.unwrap().is_none()); 236 | assert!(!datalith.check_file_exist(id).await.unwrap()); 237 | } 238 | 239 | { 240 | let id = { 241 | let mut file = File::open(IMAGE_PATH).await.unwrap(); 242 | 243 | let file = datalith 244 | .put_file_by_reader(&mut file, Some("image.png"), None, Some(IMAGE_SIZE)) 245 | .await 246 | .unwrap(); 247 | 248 | #[cfg(feature = "magic")] 249 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 250 | assert_eq!(IMAGE_SIZE, file.file_size()); 251 | assert_eq!("image.png", file.file_name()); 252 | assert!(!file.is_temporary()); 253 | assert!(file.is_new()); 254 | 255 | let mut reader = file.create_reader().await.unwrap(); 256 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 257 | reader.read_to_end(&mut buffer).await.unwrap(); 258 | assert_eq!(image, buffer); 259 | 260 | file.id() 261 | }; 262 | 263 | // get 264 | { 265 | let file = datalith.get_file_by_id(id).await.unwrap().unwrap(); 266 | 267 | #[cfg(feature = "magic")] 268 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 269 | assert_eq!(IMAGE_SIZE, file.file_size()); 270 | assert_eq!("image.png", file.file_name()); 271 | assert!(!file.is_temporary()); 272 | assert!(!file.is_new()); 273 | 274 | let mut reader = file.create_reader().await.unwrap(); 275 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 276 | reader.read_to_end(&mut buffer).await.unwrap(); 277 | assert_eq!(image, buffer); 278 | } 279 | 280 | assert!(datalith.get_file_by_id(id).await.unwrap().is_some()); 281 | assert!(datalith.check_file_exist(id).await.unwrap()); 282 | 283 | // delete 284 | assert!(datalith.delete_file_by_id(id).await.unwrap()); 285 | assert!(!datalith.delete_file_by_id(id).await.unwrap()); 286 | } 287 | 288 | datalith_close(datalith).await; 289 | } 290 | 291 | #[tokio::test] 292 | async fn resource_upload_download_data() { 293 | let datalith = datalith_init().await; 294 | 295 | let image = IMAGE_DATA.as_ref(); 296 | 297 | { 298 | let id = { 299 | let resource = datalith 300 | .put_resource_by_buffer_temporarily(image, Some("image.png"), None) 301 | .await 302 | .unwrap(); 303 | 304 | #[cfg(feature = "magic")] 305 | assert_eq!(&mime::IMAGE_PNG, resource.file_type()); 306 | assert_eq!("image.png", resource.file_name()); 307 | assert!(resource.is_temporary()); 308 | 309 | let file = resource.file(); 310 | #[cfg(feature = "magic")] 311 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 312 | assert_eq!(IMAGE_SIZE, file.file_size()); 313 | assert_eq!("image.png", file.file_name()); 314 | assert!(!file.is_temporary()); 315 | assert!(file.is_new()); 316 | 317 | let mut reader = file.create_reader().await.unwrap(); 318 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 319 | reader.read_to_end(&mut buffer).await.unwrap(); 320 | assert_eq!(image, buffer); 321 | 322 | resource.id() 323 | }; 324 | 325 | // get 326 | { 327 | let resource = datalith.get_resource_by_id(id).await.unwrap().unwrap(); 328 | 329 | #[cfg(feature = "magic")] 330 | assert_eq!(&mime::IMAGE_PNG, resource.file_type()); 331 | assert_eq!("image.png", resource.file_name()); 332 | assert!(resource.is_temporary()); 333 | 334 | let file = resource.file(); 335 | #[cfg(feature = "magic")] 336 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 337 | assert_eq!(IMAGE_SIZE, file.file_size()); 338 | assert_eq!("image.png", file.file_name()); 339 | assert!(!file.is_temporary()); 340 | assert!(!file.is_new()); 341 | 342 | let mut reader = file.create_reader().await.unwrap(); 343 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 344 | reader.read_to_end(&mut buffer).await.unwrap(); 345 | assert_eq!(image, buffer); 346 | } 347 | 348 | // temporarily resources can only get once 349 | assert!(datalith.get_resource_by_id(id).await.unwrap().is_none()); 350 | assert!(!datalith.check_resource_exist(id).await.unwrap()); 351 | } 352 | 353 | { 354 | let id = { 355 | let resource = 356 | datalith.put_resource_by_buffer(image, Some("image.png"), None).await.unwrap(); 357 | 358 | #[cfg(feature = "magic")] 359 | assert_eq!(&mime::IMAGE_PNG, resource.file_type()); 360 | assert_eq!("image.png", resource.file_name()); 361 | assert!(!resource.is_temporary()); 362 | 363 | let file = resource.file(); 364 | #[cfg(feature = "magic")] 365 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 366 | assert_eq!(IMAGE_SIZE, file.file_size()); 367 | assert_eq!("image.png", file.file_name()); 368 | assert!(!file.is_temporary()); 369 | // the previous temporary resource is existing, so it cannot generate a new file 370 | assert!(!file.is_new()); 371 | 372 | let mut reader = file.create_reader().await.unwrap(); 373 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 374 | reader.read_to_end(&mut buffer).await.unwrap(); 375 | assert_eq!(image, buffer); 376 | 377 | resource.id() 378 | }; 379 | 380 | // get 381 | { 382 | let resource = datalith.get_resource_by_id(id).await.unwrap().unwrap(); 383 | 384 | #[cfg(feature = "magic")] 385 | assert_eq!(&mime::IMAGE_PNG, resource.file_type()); 386 | assert_eq!("image.png", resource.file_name()); 387 | assert!(!resource.is_temporary()); 388 | 389 | let file = resource.file(); 390 | #[cfg(feature = "magic")] 391 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 392 | assert_eq!(IMAGE_SIZE, file.file_size()); 393 | assert_eq!("image.png", file.file_name()); 394 | assert!(!file.is_temporary()); 395 | assert!(!file.is_new()); 396 | 397 | let mut reader = file.create_reader().await.unwrap(); 398 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 399 | reader.read_to_end(&mut buffer).await.unwrap(); 400 | assert_eq!(image, buffer); 401 | } 402 | 403 | assert!(datalith.get_resource_by_id(id).await.unwrap().is_some()); 404 | assert!(datalith.check_resource_exist(id).await.unwrap()); 405 | 406 | // delete 407 | assert!(datalith.delete_resource_by_id(id).await.unwrap()); 408 | assert!(!datalith.delete_resource_by_id(id).await.unwrap()); 409 | } 410 | 411 | { 412 | let id = { 413 | let resource = datalith 414 | .put_resource_by_path_temporarily(IMAGE_PATH, Some("image.png"), None) 415 | .await 416 | .unwrap(); 417 | 418 | #[cfg(feature = "magic")] 419 | assert_eq!(&mime::IMAGE_PNG, resource.file_type()); 420 | assert_eq!("image.png", resource.file_name()); 421 | assert!(resource.is_temporary()); 422 | 423 | let file = resource.file(); 424 | #[cfg(feature = "magic")] 425 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 426 | assert_eq!(IMAGE_SIZE, file.file_size()); 427 | assert_eq!("image.png", file.file_name()); 428 | assert!(!file.is_temporary()); 429 | // the previous temporary resource is existing, so it cannot generate a new file 430 | assert!(!file.is_new()); 431 | 432 | let mut reader = file.create_reader().await.unwrap(); 433 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 434 | reader.read_to_end(&mut buffer).await.unwrap(); 435 | assert_eq!(image, buffer); 436 | 437 | resource.id() 438 | }; 439 | 440 | // get 441 | { 442 | let resource = datalith.get_resource_by_id(id).await.unwrap().unwrap(); 443 | 444 | #[cfg(feature = "magic")] 445 | assert_eq!(&mime::IMAGE_PNG, resource.file_type()); 446 | assert_eq!("image.png", resource.file_name()); 447 | assert!(resource.is_temporary()); 448 | 449 | let file = resource.file(); 450 | #[cfg(feature = "magic")] 451 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 452 | assert_eq!(IMAGE_SIZE, file.file_size()); 453 | assert_eq!("image.png", file.file_name()); 454 | assert!(!file.is_temporary()); 455 | assert!(!file.is_new()); 456 | 457 | let mut reader = file.create_reader().await.unwrap(); 458 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 459 | reader.read_to_end(&mut buffer).await.unwrap(); 460 | assert_eq!(image, buffer); 461 | } 462 | 463 | // temporarily resources can only get once 464 | assert!(datalith.get_resource_by_id(id).await.unwrap().is_none()); 465 | assert!(!datalith.check_resource_exist(id).await.unwrap()); 466 | } 467 | 468 | { 469 | let id = { 470 | let resource = 471 | datalith.put_resource_by_path(IMAGE_PATH, Some("image.png"), None).await.unwrap(); 472 | 473 | #[cfg(feature = "magic")] 474 | assert_eq!(&mime::IMAGE_PNG, resource.file_type()); 475 | assert_eq!("image.png", resource.file_name()); 476 | assert!(!resource.is_temporary()); 477 | 478 | let file = resource.file(); 479 | #[cfg(feature = "magic")] 480 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 481 | assert_eq!(IMAGE_SIZE, file.file_size()); 482 | assert_eq!("image.png", file.file_name()); 483 | assert!(!file.is_temporary()); 484 | // the previous temporary resource is existing, so it cannot generate a new file 485 | assert!(!file.is_new()); 486 | 487 | let mut reader = file.create_reader().await.unwrap(); 488 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 489 | reader.read_to_end(&mut buffer).await.unwrap(); 490 | assert_eq!(image, buffer); 491 | 492 | resource.id() 493 | }; 494 | 495 | // get 496 | { 497 | let resource = datalith.get_resource_by_id(id).await.unwrap().unwrap(); 498 | 499 | #[cfg(feature = "magic")] 500 | assert_eq!(&mime::IMAGE_PNG, resource.file_type()); 501 | assert_eq!("image.png", resource.file_name()); 502 | assert!(!resource.is_temporary()); 503 | 504 | let file = resource.file(); 505 | #[cfg(feature = "magic")] 506 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 507 | assert_eq!(IMAGE_SIZE, file.file_size()); 508 | assert_eq!("image.png", file.file_name()); 509 | assert!(!file.is_temporary()); 510 | assert!(!file.is_new()); 511 | 512 | let mut reader = file.create_reader().await.unwrap(); 513 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 514 | reader.read_to_end(&mut buffer).await.unwrap(); 515 | assert_eq!(image, buffer); 516 | } 517 | 518 | assert!(datalith.get_resource_by_id(id).await.unwrap().is_some()); 519 | assert!(datalith.check_resource_exist(id).await.unwrap()); 520 | 521 | // delete 522 | assert!(datalith.delete_resource_by_id(id).await.unwrap()); 523 | assert!(!datalith.delete_resource_by_id(id).await.unwrap()); 524 | } 525 | 526 | { 527 | let id = { 528 | let mut file = File::open(IMAGE_PATH).await.unwrap(); 529 | 530 | let resource = datalith 531 | .put_resource_by_reader_temporarily( 532 | &mut file, 533 | Some("image.png"), 534 | None, 535 | Some(IMAGE_SIZE), 536 | ) 537 | .await 538 | .unwrap(); 539 | 540 | #[cfg(feature = "magic")] 541 | assert_eq!(&mime::IMAGE_PNG, resource.file_type()); 542 | assert_eq!("image.png", resource.file_name()); 543 | assert!(resource.is_temporary()); 544 | 545 | let file = resource.file(); 546 | #[cfg(feature = "magic")] 547 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 548 | assert_eq!(IMAGE_SIZE, file.file_size()); 549 | assert_eq!("image.png", file.file_name()); 550 | assert!(!file.is_temporary()); 551 | // the previous temporary resource is existing, so it cannot generate a new file 552 | assert!(!file.is_new()); 553 | 554 | let mut reader = file.create_reader().await.unwrap(); 555 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 556 | reader.read_to_end(&mut buffer).await.unwrap(); 557 | assert_eq!(image, buffer); 558 | 559 | resource.id() 560 | }; 561 | 562 | // get 563 | { 564 | let resource = datalith.get_resource_by_id(id).await.unwrap().unwrap(); 565 | 566 | #[cfg(feature = "magic")] 567 | assert_eq!(&mime::IMAGE_PNG, resource.file_type()); 568 | assert_eq!("image.png", resource.file_name()); 569 | assert!(resource.is_temporary()); 570 | 571 | let file = resource.file(); 572 | #[cfg(feature = "magic")] 573 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 574 | assert_eq!(IMAGE_SIZE, file.file_size()); 575 | assert_eq!("image.png", file.file_name()); 576 | assert!(!file.is_temporary()); 577 | assert!(!file.is_new()); 578 | 579 | let mut reader = file.create_reader().await.unwrap(); 580 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 581 | reader.read_to_end(&mut buffer).await.unwrap(); 582 | assert_eq!(image, buffer); 583 | } 584 | 585 | // temporarily resources can only get once 586 | assert!(datalith.get_resource_by_id(id).await.unwrap().is_none()); 587 | assert!(!datalith.check_resource_exist(id).await.unwrap()); 588 | } 589 | 590 | { 591 | let id = { 592 | let mut file = File::open(IMAGE_PATH).await.unwrap(); 593 | 594 | let resource = datalith 595 | .put_resource_by_reader(&mut file, Some("image.png"), None, Some(IMAGE_SIZE)) 596 | .await 597 | .unwrap(); 598 | 599 | #[cfg(feature = "magic")] 600 | assert_eq!(&mime::IMAGE_PNG, resource.file_type()); 601 | assert_eq!("image.png", resource.file_name()); 602 | assert!(!resource.is_temporary()); 603 | 604 | let file = resource.file(); 605 | #[cfg(feature = "magic")] 606 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 607 | assert_eq!(IMAGE_SIZE, file.file_size()); 608 | assert_eq!("image.png", file.file_name()); 609 | assert!(!file.is_temporary()); 610 | // the previous temporary resource is existing, so it cannot generate a new file 611 | assert!(!file.is_new()); 612 | 613 | let mut reader = file.create_reader().await.unwrap(); 614 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 615 | reader.read_to_end(&mut buffer).await.unwrap(); 616 | assert_eq!(image, buffer); 617 | 618 | resource.id() 619 | }; 620 | 621 | // get 622 | { 623 | let resource = datalith.get_resource_by_id(id).await.unwrap().unwrap(); 624 | 625 | #[cfg(feature = "magic")] 626 | assert_eq!(&mime::IMAGE_PNG, resource.file_type()); 627 | assert_eq!("image.png", resource.file_name()); 628 | assert!(!resource.is_temporary()); 629 | 630 | let file = resource.file(); 631 | #[cfg(feature = "magic")] 632 | assert_eq!(&mime::IMAGE_PNG, file.file_type()); 633 | assert_eq!(IMAGE_SIZE, file.file_size()); 634 | assert_eq!("image.png", file.file_name()); 635 | assert!(!file.is_temporary()); 636 | assert!(!file.is_new()); 637 | 638 | let mut reader = file.create_reader().await.unwrap(); 639 | let mut buffer = Vec::with_capacity(file.file_size() as usize); 640 | reader.read_to_end(&mut buffer).await.unwrap(); 641 | assert_eq!(image, buffer); 642 | } 643 | 644 | assert!(datalith.get_resource_by_id(id).await.unwrap().is_some()); 645 | assert!(datalith.check_resource_exist(id).await.unwrap()); 646 | 647 | // delete 648 | assert!(datalith.delete_resource_by_id(id).await.unwrap()); 649 | assert!(!datalith.delete_resource_by_id(id).await.unwrap()); 650 | } 651 | 652 | datalith_close(datalith).await; 653 | } 654 | 655 | #[cfg(feature = "image-convert")] 656 | #[tokio::test] 657 | async fn image_upload_download_data() { 658 | let datalith = datalith_init().await; 659 | 660 | let image = IMAGE_DATA.as_ref(); 661 | 662 | { 663 | let id = { 664 | let image = datalith 665 | .put_image_by_buffer(image.to_vec(), Some("image.png"), Some(32), None, None, true) 666 | .await 667 | .unwrap(); 668 | 669 | assert_eq!("image", image.image_stem()); 670 | assert_eq!(32, image.image_width()); 671 | assert_eq!(32, image.image_height()); 672 | assert!(image.has_alpha_channel()); 673 | 674 | let original_file = image.original_file().unwrap(); 675 | assert_eq!(&mime::IMAGE_PNG, original_file.file_type()); 676 | assert_eq!(IMAGE_SIZE, original_file.file_size()); 677 | assert_eq!("image.png", original_file.file_name()); 678 | 679 | let thumbnails = image.thumbnails(); 680 | let fallback_thumbnails = image.fallback_thumbnails(); 681 | assert_eq!(3, thumbnails.len()); 682 | assert_eq!(3, fallback_thumbnails.len()); 683 | 684 | image.id() 685 | }; 686 | 687 | // get 688 | let original_file_id = { 689 | let image = datalith.get_image_by_id(id).await.unwrap().unwrap(); 690 | 691 | assert_eq!("image", image.image_stem()); 692 | assert_eq!(32, image.image_width()); 693 | assert_eq!(32, image.image_height()); 694 | assert!(image.has_alpha_channel()); 695 | 696 | let original_file = image.original_file().unwrap(); 697 | assert_eq!(&mime::IMAGE_PNG, original_file.file_type()); 698 | assert_eq!(IMAGE_SIZE, original_file.file_size()); 699 | assert_eq!("image.png", original_file.file_name()); 700 | 701 | let thumbnails = image.thumbnails(); 702 | let fallback_thumbnails = image.fallback_thumbnails(); 703 | assert_eq!(3, thumbnails.len()); 704 | assert_eq!(3, fallback_thumbnails.len()); 705 | 706 | original_file.id() 707 | }; 708 | 709 | assert!(datalith.get_image_by_id(id).await.unwrap().is_some()); 710 | assert!(datalith.check_image_exist(id).await.unwrap()); 711 | 712 | // delete 713 | assert!(!datalith.delete_file_by_id(original_file_id).await.unwrap()); 714 | assert!(datalith.delete_image_by_id(id).await.unwrap()); 715 | assert!(!datalith.delete_image_by_id(id).await.unwrap()); 716 | assert!(datalith.list_file_ids(PaginationOptions::default()).await.unwrap().0.is_empty()); 717 | } 718 | 719 | { 720 | let id = { 721 | let image = datalith 722 | .put_image_by_path(IMAGE_PATH, None::<&str>, Some(32), None, None, true) 723 | .await 724 | .unwrap(); 725 | 726 | assert_eq!("image", image.image_stem()); 727 | assert_eq!(32, image.image_width()); 728 | assert_eq!(32, image.image_height()); 729 | assert!(image.has_alpha_channel()); 730 | 731 | let original_file = image.original_file().unwrap(); 732 | assert_eq!(&mime::IMAGE_PNG, original_file.file_type()); 733 | assert_eq!(IMAGE_SIZE, original_file.file_size()); 734 | assert_eq!("image.png", original_file.file_name()); 735 | 736 | let thumbnails = image.thumbnails(); 737 | let fallback_thumbnails = image.fallback_thumbnails(); 738 | assert_eq!(3, thumbnails.len()); 739 | assert_eq!(3, fallback_thumbnails.len()); 740 | 741 | image.id() 742 | }; 743 | 744 | // get 745 | let original_file_id = { 746 | let image = datalith.get_image_by_id(id).await.unwrap().unwrap(); 747 | 748 | assert_eq!("image", image.image_stem()); 749 | assert_eq!(32, image.image_width()); 750 | assert_eq!(32, image.image_height()); 751 | assert!(image.has_alpha_channel()); 752 | 753 | let original_file = image.original_file().unwrap(); 754 | assert_eq!(&mime::IMAGE_PNG, original_file.file_type()); 755 | assert_eq!(IMAGE_SIZE, original_file.file_size()); 756 | assert_eq!("image.png", original_file.file_name()); 757 | 758 | let thumbnails = image.thumbnails(); 759 | let fallback_thumbnails = image.fallback_thumbnails(); 760 | assert_eq!(3, thumbnails.len()); 761 | assert_eq!(3, fallback_thumbnails.len()); 762 | 763 | original_file.id() 764 | }; 765 | 766 | assert!(datalith.get_image_by_id(id).await.unwrap().is_some()); 767 | assert!(datalith.check_image_exist(id).await.unwrap()); 768 | 769 | // delete 770 | assert!(!datalith.delete_file_by_id(original_file_id).await.unwrap()); 771 | assert!(datalith.delete_image_by_id(id).await.unwrap()); 772 | assert!(!datalith.delete_image_by_id(id).await.unwrap()); 773 | assert!(datalith.list_file_ids(PaginationOptions::default()).await.unwrap().0.is_empty()); 774 | } 775 | 776 | { 777 | let id = { 778 | let mut file = File::open(IMAGE_PATH).await.unwrap(); 779 | 780 | let image = datalith 781 | .put_image_by_reader( 782 | &mut file, 783 | Some("image.png"), 784 | Some(32), 785 | None, 786 | None, 787 | true, 788 | Some(IMAGE_SIZE), 789 | ) 790 | .await 791 | .unwrap(); 792 | 793 | assert_eq!("image", image.image_stem()); 794 | assert_eq!(32, image.image_width()); 795 | assert_eq!(32, image.image_height()); 796 | assert!(image.has_alpha_channel()); 797 | 798 | let original_file = image.original_file().unwrap(); 799 | assert_eq!(&mime::IMAGE_PNG, original_file.file_type()); 800 | assert_eq!(IMAGE_SIZE, original_file.file_size()); 801 | assert_eq!("image.png", original_file.file_name()); 802 | 803 | let thumbnails = image.thumbnails(); 804 | let fallback_thumbnails = image.fallback_thumbnails(); 805 | assert_eq!(3, thumbnails.len()); 806 | assert_eq!(3, fallback_thumbnails.len()); 807 | 808 | image.id() 809 | }; 810 | 811 | // get 812 | let original_file_id = { 813 | let image = datalith.get_image_by_id(id).await.unwrap().unwrap(); 814 | 815 | assert_eq!("image", image.image_stem()); 816 | assert_eq!(32, image.image_width()); 817 | assert_eq!(32, image.image_height()); 818 | assert!(image.has_alpha_channel()); 819 | 820 | let original_file = image.original_file().unwrap(); 821 | assert_eq!(&mime::IMAGE_PNG, original_file.file_type()); 822 | assert_eq!(IMAGE_SIZE, original_file.file_size()); 823 | assert_eq!("image.png", original_file.file_name()); 824 | 825 | let thumbnails = image.thumbnails(); 826 | let fallback_thumbnails = image.fallback_thumbnails(); 827 | assert_eq!(3, thumbnails.len()); 828 | assert_eq!(3, fallback_thumbnails.len()); 829 | 830 | original_file.id() 831 | }; 832 | 833 | assert!(datalith.get_image_by_id(id).await.unwrap().is_some()); 834 | assert!(datalith.check_image_exist(id).await.unwrap()); 835 | 836 | // delete 837 | assert!(!datalith.delete_file_by_id(original_file_id).await.unwrap()); 838 | assert!(datalith.delete_image_by_id(id).await.unwrap()); 839 | assert!(!datalith.delete_image_by_id(id).await.unwrap()); 840 | assert!(datalith.list_file_ids(PaginationOptions::default()).await.unwrap().0.is_empty()); 841 | } 842 | 843 | datalith_close(datalith).await; 844 | } 845 | --------------------------------------------------------------------------------