├── .nvmrc ├── packages ├── dofs │ ├── src │ │ ├── index.ts │ │ ├── cli │ │ │ └── index.ts │ │ ├── hono │ │ │ ├── index.ts │ │ │ ├── types.ts │ │ │ └── routes.ts │ │ ├── withDofs.ts │ │ └── Fs.ts │ ├── example │ │ ├── README.md │ │ ├── vite.config.ts │ │ ├── index.html │ │ ├── package.json │ │ ├── tsconfig.json │ │ ├── src │ │ │ └── index.ts │ │ └── wrangler.jsonc │ ├── tsdown.config.ts │ ├── LICENSE │ ├── CHANGELOG.md │ ├── package.json │ ├── tsconfig.json │ └── README.md └── dofs-rust-client │ ├── Cargo.toml │ ├── README.md │ ├── src │ ├── providers │ │ ├── mod.rs │ │ ├── memory.rs │ │ ├── sqlite_simple.rs │ │ └── sqlite_chunked.rs │ ├── main.rs │ └── fusefs.rs │ ├── tests │ └── integration_stress.rs │ └── Cargo.lock ├── .gitignore ├── .changeset ├── config.json └── README.md ├── .vscode └── settings.json ├── .cursor └── rules │ └── start.mdc ├── package.json ├── .github └── workflows │ └── claude.yml └── README.md /.nvmrc: -------------------------------------------------------------------------------- 1 | node -------------------------------------------------------------------------------- /packages/dofs/src/index.ts: -------------------------------------------------------------------------------- 1 | export * from './Fs' 2 | export * from './withDofs' 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | mnt 3 | *.db 4 | .DS_Store 5 | node_modules 6 | .wrangler 7 | dist 8 | *.tgz -------------------------------------------------------------------------------- /packages/dofs/example/README.md: -------------------------------------------------------------------------------- 1 | # DTerm Demo 2 | 3 | This is an example of how to use DTerm. 4 | -------------------------------------------------------------------------------- /packages/dofs/example/vite.config.ts: -------------------------------------------------------------------------------- 1 | import { cloudflare } from '@cloudflare/vite-plugin' 2 | import { defineConfig } from 'vite' 3 | 4 | export default defineConfig({ 5 | plugins: [cloudflare()], 6 | }) 7 | -------------------------------------------------------------------------------- /packages/dofs/example/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Durable Object File System 7 | 8 | 9 | 10 | DOFS API 11 | 12 | 13 | -------------------------------------------------------------------------------- /.changeset/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://unpkg.com/@changesets/config@3.1.0/schema.json", 3 | "changelog": "@changesets/cli/changelog", 4 | "commit": false, 5 | "fixed": [], 6 | "linked": [], 7 | "access": "public", 8 | "baseBranch": "main", 9 | "updateInternalDependencies": "patch", 10 | "ignore": ["dofs-harness"] 11 | } 12 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "*.jsonc": "jsonc" 4 | }, 5 | "editor.defaultFormatter": "esbenp.prettier-vscode", 6 | "editor.formatOnSave": true, 7 | "[jsonc]": { 8 | "editor.defaultFormatter": "esbenp.prettier-vscode", 9 | "editor.formatOnSave": true 10 | }, 11 | "typescript.tsdk": "node_modules/typescript/lib" 12 | } 13 | -------------------------------------------------------------------------------- /packages/dofs-rust-client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cf-fuse" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | fuser = "0.14" 8 | libc = "0.2" 9 | log = "0.4" 10 | simplelog = "0.12" 11 | ctrlc = "3.4" 12 | rusqlite = "0.31" 13 | bincode = "1.3" 14 | serde = { version = "1.0", features = ["derive"] } 15 | filetime = "0.2.25" 16 | clap = { version = "4.5", features = ["derive"] } 17 | 18 | [dev-dependencies] 19 | prettytable-rs = "0.10" 20 | rand = "0.8" 21 | -------------------------------------------------------------------------------- /.changeset/README.md: -------------------------------------------------------------------------------- 1 | # Changesets 2 | 3 | Hello and welcome! This folder has been automatically generated by `@changesets/cli`, a build tool that works 4 | with multi-package repos, or single-package repos to help you version and publish your code. You can 5 | find the full documentation for it [in our repository](https://github.com/changesets/changesets) 6 | 7 | We have a quick list of common questions to get you started engaging with this project in 8 | [our documentation](https://github.com/changesets/changesets/blob/main/docs/common-questions.md) 9 | -------------------------------------------------------------------------------- /.cursor/rules/start.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: 3 | globs: 4 | alwaysApply: true 5 | --- 6 | This is a monorepo. 7 | 8 | * fix errors without prompting 9 | * don't ask to proceed, just go 10 | * fix all warnings along the way without prompting 11 | * ONLY when making changes to /packages/dofs-rust-client: 12 | * run `cargo build` after each change to be sure it compiles 13 | * run `cargo test` after each change to be sure it passes tests 14 | * when making changes to /packages/dofs: 15 | * do not automatically build 16 | * do not automatically install packages 17 | -------------------------------------------------------------------------------- /packages/dofs/example/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dofs-harness", 3 | "version": "0.0.1", 4 | "private": true, 5 | "type": "module", 6 | "scripts": { 7 | "dev": "vite dev", 8 | "build": "vite build", 9 | "deploy": "npm run build && wrangler deploy", 10 | "cf-typegen": "wrangler types" 11 | }, 12 | "devDependencies": { 13 | "@cloudflare/vite-plugin": "^1.2.2", 14 | "@xterm/addon-fit": "^0.10.0", 15 | "dofs": "workspace:*", 16 | "hono": "^4.7.8", 17 | "vite": "^6.3.5", 18 | "wrangler": "^4.19.1" 19 | }, 20 | "packageManager": "bun@1.1.13" 21 | } 22 | -------------------------------------------------------------------------------- /packages/dofs/tsdown.config.ts: -------------------------------------------------------------------------------- 1 | /// 2 | 3 | import { copyFileSync } from 'fs' 4 | import { defineConfig } from 'tsdown' 5 | 6 | export default defineConfig({ 7 | entry: { 8 | index: 'src/index.ts', 9 | hono: 'src/hono/index.ts', 10 | cli: 'src/cli/index.ts', 11 | }, 12 | format: ['esm'], 13 | dts: { 14 | sourcemap: true, 15 | }, 16 | sourcemap: true, 17 | external: ['cloudflare:workers'], 18 | outDir: 'dist', 19 | clean: true, 20 | onSuccess: async () => { 21 | console.log('Copying README.md to root') 22 | copyFileSync('README.md', '../../README.md') 23 | }, 24 | }) 25 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dofs-root", 3 | "version": "0.0.1", 4 | "private": true, 5 | "scripts": { 6 | "format": "prettier --write ." 7 | }, 8 | "prettier": { 9 | "printWidth": 120, 10 | "tabWidth": 2, 11 | "useTabs": false, 12 | "singleQuote": true, 13 | "trailingComma": "es5", 14 | "semi": false, 15 | "plugins": [ 16 | "prettier-plugin-organize-imports" 17 | ], 18 | "overrides": [ 19 | { 20 | "files": [ 21 | "*.jsonc" 22 | ], 23 | "options": { 24 | "parser": "jsonc-parser", 25 | "trailingComma": "none" 26 | } 27 | } 28 | ] 29 | }, 30 | "workspaces": [ 31 | "packages/*", 32 | "packages/dofs/example" 33 | ], 34 | "devDependencies": { 35 | "@changesets/cli": "^2.29.5", 36 | "prettier": "^3.5.3", 37 | "prettier-plugin-organize-imports": "^4.1.0", 38 | "tsup": "^8.5.0", 39 | "typescript": "^5.8.3", 40 | "wrangler": "^4.20.5" 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /packages/dofs/src/cli/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { Command } from 'commander' 4 | import pkg from '../../package.json' 5 | 6 | const program = new Command() 7 | 8 | program.name('dofs').description('A filesystem for Cloudflare Durable Objects').version(pkg.version) 9 | 10 | program 11 | .command('init') 12 | .description('Initialize a new DOFS filesystem') 13 | .action(() => { 14 | console.log('Initializing DOFS filesystem...') 15 | // TODO: Implement init command 16 | }) 17 | 18 | program 19 | .command('mount') 20 | .description('Mount a DOFS filesystem') 21 | .option('-p, --path ', 'Mount path') 22 | .action((options: { path?: string }) => { 23 | console.log('Mounting DOFS filesystem...', options) 24 | // TODO: Implement mount command 25 | }) 26 | 27 | program 28 | .command('status') 29 | .description('Show DOFS filesystem status') 30 | .action(() => { 31 | console.log('DOFS filesystem status:') 32 | // TODO: Implement status command 33 | }) 34 | 35 | program.parse() 36 | -------------------------------------------------------------------------------- /packages/dofs/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Ben Allfree 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /packages/dofs/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # dofs 2 | 3 | ## 0.1.0 4 | 5 | ### Minor Changes 6 | 7 | - c5c129d: Update withDofs and add @Dofs attribute 8 | - 37f0c55: enh: Durable Object configuration 9 | 10 | ### Patch Changes 11 | 12 | - d8185b8: enh: type refinement for withDofs 13 | 14 | ## 0.0.2 15 | 16 | ### Patch Changes 17 | 18 | - d007ebb: Added withDofs support 19 | 20 | ## 0.0.1 21 | 22 | ### Patch Changes 23 | 24 | - recursive mkdir support 25 | - 967c86c: Device size discussion 26 | - 5b7e291: IDurableObjectFs 27 | - fcc96d5: Adjust default chunk size to 64kb and update readme 28 | - 649afc2: Initial release 29 | - 199c42c: Update readme with sync/async notes 30 | - af1e53a: fix write method signature in IDurableObjectFs 31 | 32 | ## 0.0.1-rc.2 33 | 34 | ### Patch Changes 35 | 36 | - 967c86c: Device size discussion 37 | - 5b7e291: IDurableObjectFs 38 | - 199c42c: Update readme with sync/async notes 39 | - af1e53a: fix write method signature in IDurableObjectFs 40 | 41 | ## 0.0.1-rc.1 42 | 43 | ### Patch Changes 44 | 45 | - Adjust default chunk size to 64kb and update readme 46 | 47 | ## 0.0.1-rc.0 48 | 49 | ### Patch Changes 50 | 51 | - Initial release 52 | -------------------------------------------------------------------------------- /.github/workflows/claude.yml: -------------------------------------------------------------------------------- 1 | name: Claude Code 2 | 3 | on: 4 | issue_comment: 5 | types: [created] 6 | pull_request_review_comment: 7 | types: [created] 8 | issues: 9 | types: [opened, assigned] 10 | pull_request_review: 11 | types: [submitted] 12 | 13 | jobs: 14 | claude: 15 | if: | 16 | (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) || 17 | (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) || 18 | (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) || 19 | (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) 20 | runs-on: ubuntu-latest 21 | permissions: 22 | contents: read 23 | pull-requests: read 24 | issues: read 25 | id-token: write 26 | steps: 27 | - name: Checkout repository 28 | uses: actions/checkout@v4 29 | with: 30 | fetch-depth: 1 31 | 32 | - name: Run Claude Code 33 | id: claude 34 | uses: anthropics/claude-code-action@beta 35 | with: 36 | anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} 37 | 38 | -------------------------------------------------------------------------------- /packages/dofs-rust-client/README.md: -------------------------------------------------------------------------------- 1 | # cf-fuse 2 | 3 | ## Running the Main Program 4 | 5 | Build the project: 6 | 7 | ```sh 8 | cargo build --release 9 | ``` 10 | 11 | Run the FUSE filesystem (default mountpoint is `./mnt`): 12 | 13 | ```sh 14 | cargo run --release -- [--provider=memory|sqlite_simple|sqlite_chunked] [--mountpoint=PATH] [--chunk_size=SIZE] [--mode=osx] 15 | ``` 16 | 17 | - `--provider` (optional): Choose backend. Default is `memory`. 18 | - `--mountpoint` (optional): Directory to mount. Default is `./mnt`. 19 | - `--chunk_size` (optional): Only for `sqlite_chunked`. Default is 4096. 20 | - `--mode=osx` (optional): Enable macOS-specific mode. 21 | 22 | Example: 23 | 24 | ```sh 25 | cargo run --release -- --provider=sqlite_simple --mountpoint=./mnt 26 | ``` 27 | 28 | Unmount with: 29 | 30 | ```sh 31 | umount ./mnt 32 | ``` 33 | 34 | ## Running the Stress Tests 35 | 36 | The stress test runs for all providers and prints a summary table. 37 | 38 | ```sh 39 | cargo test --test integration_stress -- --nocapture 40 | ``` 41 | 42 | - Requires `umount` command and `prettytable-rs` crate (should be in dependencies). 43 | - The test will mount and unmount `./mnt` and create/remove test files. 44 | 45 | --- 46 | 47 | For more options, see `src/main.rs` and `tests/integration_stress.rs`. 48 | -------------------------------------------------------------------------------- /packages/dofs/src/hono/index.ts: -------------------------------------------------------------------------------- 1 | import { Hono } from 'hono' 2 | import { WithDofs } from '../withDofs.js' 3 | import { createFsRoutes } from './routes.js' 4 | import { DofsContext, DurableObjectConfig } from './types.js' 5 | 6 | export * from './types.js' 7 | 8 | export const dofs = (config: DurableObjectConfig) => { 9 | const api = new Hono<{ Bindings: TEnv } & DofsContext>() 10 | 11 | const getFs = async (doNamespace: string, doName: string, env: TEnv) => { 12 | if (!(doNamespace in env)) { 13 | throw new Error(`Durable Object namespace ${doNamespace} not found`) 14 | } 15 | const ns = env[doNamespace as keyof TEnv] as DurableObjectNamespace> 16 | const doId = ns.idFromName(doName) 17 | const stub = ns.get(doId) 18 | return stub.getFs() 19 | } 20 | 21 | // Create filesystem routes 22 | const fsRoutes = createFsRoutes() 23 | 24 | // Middleware to extract filesystem stub and mount the fs routes 25 | api.use('/:doNamespace/:doId/*', async (c, next) => { 26 | const { doNamespace, doId } = c.req.param() 27 | try { 28 | const fs = await getFs(doNamespace, doId, c.env) 29 | c.set('fs', fs) 30 | await next() 31 | } catch (error) { 32 | return c.text(`Error accessing filesystem: ${error instanceof Error ? error.message : String(error)}`, 500) 33 | } 34 | }) 35 | 36 | // Mount the filesystem routes at /:doNamespace/:doId 37 | api.route('/:doNamespace/:doId', fsRoutes) 38 | 39 | return api 40 | } 41 | -------------------------------------------------------------------------------- /packages/dofs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dofs", 3 | "description": "A filesystem for Cloudflare Durable Objects.", 4 | "version": "0.1.0", 5 | "type": "module", 6 | "author": { 7 | "name": "Ben Allfree", 8 | "url": "https://x.com/benallfree" 9 | }, 10 | "license": "MIT", 11 | "repository": { 12 | "type": "git", 13 | "url": "https://github.com/benallfree/dofs" 14 | }, 15 | "keywords": [ 16 | "cloudflare", 17 | "durable objects", 18 | "dofs", 19 | "filesystem", 20 | "file system", 21 | "file-system", 22 | "file-system-api", 23 | "file-system-api-client", 24 | "file-system-api-server", 25 | "file-system-api-client-server" 26 | ], 27 | "homepage": "https://github.com/benallfree/dofs/tree/main/packages/dofs", 28 | "scripts": { 29 | "build": "tsdown", 30 | "dev": "tsdown --watch" 31 | }, 32 | "main": "./dist/Fs.js", 33 | "module": "./dist/Fs.js", 34 | "types": "./dist/Fs.d.ts", 35 | "exports": { 36 | ".": { 37 | "import": "./dist/index.js", 38 | "types": "./dist/index.d.ts" 39 | }, 40 | "./hono": { 41 | "import": "./dist/hono.js", 42 | "types": "./dist/hono.d.ts" 43 | } 44 | }, 45 | "bin": { 46 | "dofs": "./dist/cli/index.js" 47 | }, 48 | "files": [ 49 | "dist" 50 | ], 51 | "dependencies": { 52 | "commander": "^14.0.0", 53 | "neofuse": "^0.0.1-rc.3" 54 | }, 55 | "peerDependencies": { 56 | "hono": "^4.7.11" 57 | }, 58 | "devDependencies": { 59 | "@types/node": "^22.15.30", 60 | "tsdown": "^0.12.7" 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /packages/dofs/example/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | /* Visit https://aka.ms/tsconfig.json to read more about this file */ 4 | 5 | /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */ 6 | "target": "es2021", 7 | /* Specify a set of bundled library declaration files that describe the target runtime environment. */ 8 | "lib": ["es2021"], 9 | /* Specify what JSX code is generated. */ 10 | "jsx": "react-jsx", 11 | 12 | /* Specify what module code is generated. */ 13 | "module": "es2022", 14 | /* Specify how TypeScript looks up a file from a given module specifier. */ 15 | "moduleResolution": "bundler", 16 | /* Specify type package names to be included without being referenced in a source file. */ 17 | "types": [], 18 | /* Enable importing .json files */ 19 | "resolveJsonModule": true, 20 | 21 | /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */ 22 | "allowJs": true, 23 | /* Enable error reporting in type-checked JavaScript files. */ 24 | "checkJs": false, 25 | 26 | /* Disable emitting files from a compilation. */ 27 | "noEmit": true, 28 | 29 | /* Ensure that each file can be safely transpiled without relying on other imports. */ 30 | "isolatedModules": true, 31 | /* Allow 'import x from y' when a module doesn't have a default export. */ 32 | "allowSyntheticDefaultImports": true, 33 | /* Ensure that casing is correct in imports. */ 34 | "forceConsistentCasingInFileNames": true, 35 | 36 | /* Enable all strict type-checking options. */ 37 | "strict": true, 38 | 39 | /* Skip type checking all .d.ts files. */ 40 | "skipLibCheck": true 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /packages/dofs-rust-client/src/providers/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod memory; 2 | pub mod sqlite_simple; 3 | pub mod sqlite_chunked; 4 | 5 | use fuser::{ReplyAttr, ReplyEntry, ReplyDirectory, ReplyData, ReplyCreate, ReplyWrite}; 6 | use std::ffi::OsStr; 7 | 8 | pub trait Provider { 9 | fn rmdir(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty); 10 | fn open(&mut self, ino: u64, reply: fuser::ReplyOpen); 11 | fn flush(&mut self, ino: u64, reply: fuser::ReplyEmpty); 12 | fn release(&mut self, ino: u64, reply: fuser::ReplyEmpty); 13 | fn setattr(&mut self, ino: u64, mode: Option, uid: Option, gid: Option, size: Option, atime: Option, mtime: Option, ctime: Option, crtime: Option, flags: Option, reply: ReplyAttr); 14 | fn lookup(&mut self, parent: u64, name: &OsStr, reply: ReplyEntry); 15 | fn getattr(&mut self, ino: u64, reply: ReplyAttr); 16 | fn readdir(&mut self, ino: u64, offset: i64, reply: ReplyDirectory); 17 | fn mkdir(&mut self, parent: u64, name: &OsStr, mode: u32, umask: u32, reply: ReplyEntry); 18 | fn create(&mut self, parent: u64, name: &OsStr, mode: u32, flags: u32, umask: i32, reply: ReplyCreate); 19 | fn read(&mut self, ino: u64, offset: i64, size: u32, reply: ReplyData); 20 | fn write(&mut self, ino: u64, offset: i64, data: &[u8], reply: ReplyWrite); 21 | fn unlink(&mut self, parent: u64, name: &std::ffi::OsStr, reply: fuser::ReplyEmpty); 22 | fn rename(&mut self, parent: u64, name: &OsStr, newparent: u64, newname: &OsStr, flags: u32, reply: fuser::ReplyEmpty); 23 | fn symlink(&mut self, parent: u64, name: &OsStr, link: &std::path::Path, reply: fuser::ReplyEntry); 24 | fn readlink(&mut self, ino: u64, reply: fuser::ReplyData); 25 | } -------------------------------------------------------------------------------- /packages/dofs/src/hono/types.ts: -------------------------------------------------------------------------------- 1 | import { DurableObject } from 'cloudflare:workers' 2 | import { Fs } from '../Fs.js' 3 | 4 | // Extend the context type to include our fs property 5 | export type DofsContext = { 6 | Variables: { 7 | fs: Rpc.Stub // The filesystem stub 8 | } 9 | } 10 | 11 | /** 12 | * Represents an instance of a Durable Object 13 | */ 14 | export interface DurableObjectInstance { 15 | /** The unique slug identifier for the instance */ 16 | slug: string 17 | /** The display name of the instance */ 18 | name: string 19 | } 20 | 21 | export type FsStat = { 22 | mtime: Date 23 | atime: Date 24 | ctime: Date 25 | size: number 26 | mode: number 27 | uid: number 28 | gid: number 29 | nlink: number 30 | } 31 | 32 | /** 33 | * Configuration for a single Durable Object 34 | */ 35 | export interface DurableObjectConfigItem { 36 | /** The name of the Durable Object */ 37 | name: string 38 | /** Reference to the Durable Object class for compatibility checking */ 39 | classRef: typeof DurableObject 40 | /** Function to get instances, optionally paginated */ 41 | getInstances: (page?: number) => Promise 42 | /** Function to get the stat for the namespace directory */ 43 | resolveNamespaceStat?: (cfg: DurableObjectConfig) => Promise 44 | /** Function to get the stat for the instance directory */ 45 | resolveInstanceStat?: (cfg: DurableObjectConfig, instanceId: string) => Promise 46 | } 47 | 48 | /** 49 | * Configuration object for Durable Objects 50 | */ 51 | export type DurableObjectConfig = { 52 | resolveRootStat?: (cfg: DurableObjectConfig) => Promise 53 | dos: Record> 54 | } 55 | -------------------------------------------------------------------------------- /packages/dofs/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | /* Visit https://aka.ms/tsconfig.json to read more about this file */ 4 | 5 | /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */ 6 | "target": "es2021", 7 | /* Specify a set of bundled library declaration files that describe the target runtime environment. */ 8 | "lib": ["es2021"], 9 | /* Specify what JSX code is generated. */ 10 | "jsx": "react-jsx", 11 | 12 | /* Specify what module code is generated. */ 13 | "module": "es2022", 14 | /* Specify how TypeScript looks up a file from a given module specifier. */ 15 | "moduleResolution": "node", 16 | /* Specify type package names to be included without being referenced in a source file. */ 17 | "types": ["./worker-configuration.d.ts"], 18 | /* Enable importing .json files */ 19 | "resolveJsonModule": true, 20 | 21 | /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */ 22 | "allowJs": true, 23 | /* Enable error reporting in type-checked JavaScript files. */ 24 | "checkJs": false, 25 | 26 | /* Disable emitting files from a compilation. */ 27 | "noEmit": true, 28 | 29 | /* Ensure that each file can be safely transpiled without relying on other imports. */ 30 | "isolatedModules": true, 31 | /* Allow 'import x from y' when a module doesn't have a default export. */ 32 | "allowSyntheticDefaultImports": true, 33 | /* Ensure that casing is correct in imports. */ 34 | "forceConsistentCasingInFileNames": true, 35 | 36 | /* Enable all strict type-checking options. */ 37 | "strict": true, 38 | 39 | /* Skip type checking all .d.ts files. */ 40 | "skipLibCheck": true 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /packages/dofs/example/src/index.ts: -------------------------------------------------------------------------------- 1 | import { DurableObject } from 'cloudflare:workers' 2 | import { Dofs, withDofs } from 'dofs' 3 | import { dofs } from 'dofs/hono' 4 | import { Hono } from 'hono' 5 | 6 | export class MyDurableObjectBase extends DurableObject { 7 | constructor(ctx: DurableObjectState, env: Env) { 8 | super(ctx, env) 9 | } 10 | } 11 | 12 | export class MyDurableObjectWithDofsMixin extends withDofs(MyDurableObjectBase, { chunkSize: 4 * 1024 }) { 13 | constructor(ctx: DurableObjectState, env: Env) { 14 | super(ctx, env) 15 | } 16 | 17 | test() { 18 | this.getFs().readFile('test.txt') 19 | } 20 | } 21 | 22 | @Dofs({ chunkSize: 4 * 1024 }) 23 | export class MyDurableObjectWithDofsAttribute extends DurableObject { 24 | constructor(ctx: DurableObjectState, env: Env) { 25 | super(ctx, env) 26 | } 27 | } 28 | 29 | const app = new Hono<{ Bindings: Env }>() 30 | 31 | // Mount the API middleware 32 | app.route( 33 | '/', 34 | dofs({ 35 | dos: { 36 | MY_DURABLE_OBJECT_WITH_DOFS_MIXIN: { 37 | classRef: MyDurableObjectWithDofsMixin, 38 | getInstances: async () => { 39 | return [ 40 | { 41 | slug: 'my-durable-object-with-dofs-mixin', 42 | name: 'My Durable Object with Dofs Mixin', 43 | }, 44 | ] 45 | }, 46 | name: 'My Durable Object with Dofs Mixin', 47 | }, 48 | MY_DURABLE_OBJECT_WITH_DOFS_ATTRIBUTE: { 49 | classRef: MyDurableObjectWithDofsAttribute, 50 | getInstances: async () => { 51 | return [ 52 | { 53 | slug: 'my-durable-object-with-dofs-attribute', 54 | name: 'My Durable Object with Dofs Attribute', 55 | }, 56 | ] 57 | }, 58 | name: 'My Durable Object with Dofs Attribute', 59 | }, 60 | }, 61 | }) as any 62 | ) 63 | 64 | export default app 65 | -------------------------------------------------------------------------------- /packages/dofs/src/withDofs.ts: -------------------------------------------------------------------------------- 1 | import { DurableObject } from 'cloudflare:workers' 2 | import { Fs, FsOptions } from './Fs.js' 3 | 4 | export type WithDofs = DurableObject & { 5 | getFs: () => Fs 6 | } 7 | 8 | // Utility to create the extended class 9 | export const withDofs = ( 10 | cls: new (ctx: DurableObjectState, env: TEnv) => DurableObject, 11 | options: FsOptions = {} 12 | ): new (ctx: DurableObjectState, env: TEnv) => WithDofs => { 13 | return class DurableObjectWithDofs extends cls { 14 | fs: Fs 15 | constructor(ctx: DurableObjectState, env: TEnv) { 16 | super(ctx, env) 17 | this.fs = new Fs(ctx, env, options) 18 | } 19 | getFs(): Fs { 20 | return this.fs 21 | } 22 | } 23 | } 24 | 25 | export function Dofs(options: FsOptions = {}) { 26 | return function DurableObject>( 27 | target: new (ctx: DurableObjectState, env: TEnv) => DurableObject 28 | ): new (ctx: DurableObjectState, env: TEnv) => WithDofs { 29 | return class extends target { 30 | fs: Fs 31 | constructor(ctx: DurableObjectState, env: TEnv) { 32 | super(ctx, env) 33 | this.fs = new Fs(ctx, env, options) 34 | } 35 | getFs(): Fs { 36 | return this.fs 37 | } 38 | } 39 | } 40 | } 41 | 42 | // Testing 43 | 44 | class MyDurableObjectBase extends DurableObject { 45 | constructor(ctx: DurableObjectState, env: Env) { 46 | super(ctx, env) 47 | } 48 | } 49 | 50 | class MyDurableObject2 extends withDofs(MyDurableObjectBase, { chunkSize: 4 * 1024 }) { 51 | constructor(ctx: DurableObjectState, env: Env) { 52 | super(ctx, env) 53 | } 54 | test() { 55 | this.getFs().readFile('test.txt') 56 | } 57 | } 58 | 59 | @Dofs({ chunkSize: 4 * 1024 }) 60 | class MyAttributeObject extends DurableObject { 61 | constructor(ctx: DurableObjectState, env: Env) { 62 | super(ctx, env) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /packages/dofs/example/wrangler.jsonc: -------------------------------------------------------------------------------- 1 | /** 2 | * For more details on how to configure Wrangler, refer to: 3 | * https://developers.cloudflare.com/workers/wrangler/configuration/ 4 | */ 5 | { 6 | "$schema": "../../../node_modules/wrangler/config-schema.json", 7 | "name": "dofs", 8 | "main": "src/index.ts", 9 | "compatibility_date": "2025-05-04", 10 | "migrations": [ 11 | { 12 | "new_sqlite_classes": ["MyDurableObjectWithDofsMixin", "MyDurableObjectWithDofsAttribute"], 13 | "tag": "v1", 14 | }, 15 | ], 16 | "durable_objects": { 17 | "bindings": [ 18 | { 19 | "class_name": "MyDurableObjectWithDofsMixin", 20 | "name": "MY_DURABLE_OBJECT_WITH_DOFS_MIXIN", 21 | }, 22 | { 23 | "class_name": "MyDurableObjectWithDofsAttribute", 24 | "name": "MY_DURABLE_OBJECT_WITH_DOFS_ATTRIBUTE", 25 | }, 26 | ], 27 | }, 28 | "observability": { 29 | "enabled": true, 30 | }, 31 | /** 32 | * Smart Placement 33 | * Docs: https://developers.cloudflare.com/workers/configuration/smart-placement/#smart-placement 34 | */ 35 | // "placement": { "mode": "smart" }, 36 | 37 | /** 38 | * Bindings 39 | * Bindings allow your Worker to interact with resources on the Cloudflare Developer Platform, including 40 | * databases, object storage, AI inference, real-time communication and more. 41 | * https://developers.cloudflare.com/workers/runtime-apis/bindings/ 42 | */ 43 | 44 | /** 45 | * Environment Variables 46 | * https://developers.cloudflare.com/workers/wrangler/configuration/#environment-variables 47 | */ 48 | // "vars": { "MY_VARIABLE": "production_value" }, 49 | /** 50 | * Note: Use secrets to store sensitive data. 51 | * https://developers.cloudflare.com/workers/configuration/secrets/ 52 | */ 53 | 54 | /** 55 | * Static Assets 56 | * https://developers.cloudflare.com/workers/static-assets/binding/ 57 | */ 58 | // "assets": { 59 | // "directory": "./public/", 60 | // "binding": "ASSETS", 61 | // "not_found_handling": "single-page-application", 62 | // "run_worker_first": true, 63 | // }, 64 | 65 | /** 66 | * Service Bindings (communicate between multiple Workers) 67 | * https://developers.cloudflare.com/workers/wrangler/configuration/#service-bindings 68 | */ 69 | // "services": [{ "binding": "MY_SERVICE", "service": "my-service" }] 70 | } 71 | -------------------------------------------------------------------------------- /packages/dofs/src/hono/routes.ts: -------------------------------------------------------------------------------- 1 | import { Hono } from 'hono' 2 | import { DofsContext } from './types.js' 3 | 4 | export const createFsRoutes = () => { 5 | const fsRoutes = new Hono<{ Bindings: TEnv } & DofsContext>() 6 | 7 | fsRoutes.post('/upload', async (c) => { 8 | const fs = c.get('fs') 9 | const formData = await c.req.formData() 10 | const file = formData.get('file') 11 | if (!file || typeof file === 'string') { 12 | return c.text('No file uploaded', 400) 13 | } 14 | const dir = c.req.query('path') || '/' 15 | const finalPath = (dir.endsWith('/') ? dir : dir + '/') + file.name 16 | await fs.writeFile(finalPath, file.stream()) 17 | return c.redirect('/') 18 | }) 19 | 20 | fsRoutes.get('/ls', async (c) => { 21 | const fs = c.get('fs') 22 | const path = c.req.query('path') || '/' 23 | const entries = await fs.listDir(path) 24 | const stats = await Promise.all( 25 | entries 26 | .filter((e: string) => e !== '.' && e !== '..') 27 | .map(async (e: string) => { 28 | try { 29 | const s = await fs.stat((path.endsWith('/') ? path : path + '/') + e) 30 | return { name: e, ...s } 31 | } catch (err) { 32 | return { name: e, error: true } 33 | } 34 | }) 35 | ) 36 | return c.json(stats) 37 | }) 38 | 39 | fsRoutes.get('/file', async (c) => { 40 | const fs = c.get('fs') 41 | const path = c.req.query('path') 42 | if (!path) return c.text('Missing path', 400) 43 | try { 44 | // Try to guess content type from extension 45 | const ext = (path.split('.').pop() || '').toLowerCase() 46 | const typeMap = { 47 | jpg: 'image/jpeg', 48 | jpeg: 'image/jpeg', 49 | png: 'image/png', 50 | gif: 'image/gif', 51 | webp: 'image/webp', 52 | bmp: 'image/bmp', 53 | svg: 'image/svg+xml', 54 | } 55 | const contentType = typeMap[ext as keyof typeof typeMap] || 'application/octet-stream' 56 | const stat = await fs.stat(path) 57 | const size = stat.size 58 | const stream = await fs.readFile(path) 59 | return new Response(stream, { 60 | status: 200, 61 | headers: { 62 | 'content-type': contentType, 63 | 'content-disposition': `inline; filename="${encodeURIComponent(path.split('/').pop() || 'file')}"`, 64 | 'content-length': String(size), 65 | }, 66 | }) 67 | } catch (e) { 68 | return c.text('Not found', 404) 69 | } 70 | }) 71 | 72 | fsRoutes.post('/rm', async (c) => { 73 | const fs = c.get('fs') 74 | const path = c.req.query('path') 75 | if (!path) return c.text('Missing path', 400) 76 | try { 77 | await fs.unlink(path) 78 | return c.text('OK') 79 | } catch (e) { 80 | return c.text('Not found', 404) 81 | } 82 | }) 83 | 84 | fsRoutes.post('/mkdir', async (c) => { 85 | const fs = c.get('fs') 86 | const path = c.req.query('path') 87 | if (!path) return c.text('Missing path', 400) 88 | try { 89 | await fs.mkdir(path) 90 | return c.text('OK') 91 | } catch (e) { 92 | return c.text('Error: ' + (e instanceof Error ? e.message : String(e)), 400) 93 | } 94 | }) 95 | 96 | fsRoutes.post('/rmdir', async (c) => { 97 | const fs = c.get('fs') 98 | const path = c.req.query('path') 99 | if (!path) return c.text('Missing path', 400) 100 | try { 101 | await fs.rmdir(path) 102 | return c.text('OK') 103 | } catch (e) { 104 | return c.text('Error: ' + (e instanceof Error ? e.message : String(e)), 400) 105 | } 106 | }) 107 | 108 | fsRoutes.post('/mv', async (c) => { 109 | const fs = c.get('fs') 110 | const src = c.req.query('src') 111 | const dest = c.req.query('dest') 112 | if (!src || !dest) return c.text('Missing src or dest', 400) 113 | try { 114 | await fs.rename(src, dest) 115 | return c.text('OK') 116 | } catch (e) { 117 | return c.text('Error: ' + (e instanceof Error ? e.message : String(e)), 400) 118 | } 119 | }) 120 | 121 | fsRoutes.post('/symlink', async (c) => { 122 | const fs = c.get('fs') 123 | const target = c.req.query('target') 124 | const path = c.req.query('path') 125 | if (!target || !path) return c.text('Missing target or path', 400) 126 | try { 127 | await fs.symlink(target, path) 128 | return c.text('OK') 129 | } catch (e) { 130 | return c.text('Error: ' + (e instanceof Error ? e.message : String(e)), 400) 131 | } 132 | }) 133 | 134 | fsRoutes.get('/stat', async (c) => { 135 | const fs = c.get('fs') 136 | const path = c.req.query('path') 137 | if (!path) return c.text('Missing path', 400) 138 | try { 139 | const stat = await fs.stat(path) 140 | return c.json(stat) 141 | } catch (e) { 142 | return c.text('Error: ' + (e instanceof Error ? e.message : String(e)), 400) 143 | } 144 | }) 145 | 146 | fsRoutes.get('/df', async (c) => { 147 | const fs = c.get('fs') 148 | const stats = await fs.getDeviceStats() 149 | return c.json(stats) 150 | }) 151 | 152 | return fsRoutes 153 | } 154 | -------------------------------------------------------------------------------- /packages/dofs-rust-client/src/main.rs: -------------------------------------------------------------------------------- 1 | use fuser::{MountOption}; 2 | use ctrlc; 3 | use std::process::Command; 4 | use std::fs; 5 | use log::info; 6 | use simplelog::*; 7 | mod fusefs; 8 | mod providers; 9 | use fusefs::FuseFS; 10 | use providers::memory::MemoryProvider; 11 | use providers::sqlite_simple::SqliteProvider as SqliteSimpleProvider; 12 | use providers::sqlite_chunked::SqliteChunkedProvider; 13 | use clap::{Parser, Subcommand}; 14 | 15 | #[derive(Parser, Debug)] 16 | #[command(author, version, about, long_about = None)] 17 | struct Cli { 18 | #[command(subcommand)] 19 | command: Commands, 20 | } 21 | 22 | #[derive(Subcommand, Debug)] 23 | enum Commands { 24 | /// Mount the filesystem 25 | Mount { 26 | #[arg(long, default_value = "memory")] 27 | provider: String, 28 | #[arg(long, default_value_t = false)] 29 | mode_osx: bool, 30 | #[arg(long, default_value_t = 4096)] 31 | chunk_size: usize, 32 | #[arg(long, default_value = "./mnt")] 33 | mountpoint: String, 34 | #[arg(long, default_value = "")] 35 | db_path: String, 36 | }, 37 | /// List available providers 38 | ListProviders, 39 | /// Show filesystem stats 40 | Stats { 41 | #[arg(long, default_value = "")] 42 | db_path: String, 43 | }, 44 | } 45 | 46 | fn main() { 47 | TermLogger::init(LevelFilter::Info, Config::default(), TerminalMode::Mixed, ColorChoice::Auto).unwrap(); 48 | let cli = Cli::parse(); 49 | 50 | match cli.command { 51 | Commands::Mount { provider, mode_osx, chunk_size, mountpoint, db_path } => { 52 | let provider_name = provider.as_str(); 53 | let osx_mode = mode_osx; 54 | let mountpoint = mountpoint.as_str(); 55 | let db_path = if db_path.is_empty() { 56 | None 57 | } else { 58 | Some(db_path.as_str()) 59 | }; 60 | if std::path::Path::new(mountpoint).exists() { 61 | // Try to unmount in case it was left mounted from a previous panic 62 | let _ = Command::new("umount").arg(mountpoint).status(); 63 | } 64 | if !std::path::Path::new(mountpoint).exists() { 65 | fs::create_dir_all(mountpoint).expect("Failed to create mountpoint"); 66 | } 67 | 68 | // Setup Ctrl+C handler to unmount 69 | let mountpoint_string = mountpoint.to_string(); 70 | ctrlc::set_handler(move || { 71 | eprintln!("\nReceived Ctrl+C, unmounting {}...", mountpoint_string); 72 | let status = Command::new("umount").arg(&mountpoint_string).status(); 73 | match status { 74 | Ok(s) if s.success() => { 75 | eprintln!("Successfully unmounted {}", mountpoint_string); 76 | } 77 | Ok(s) => { 78 | eprintln!("umount exited with status: {}", s); 79 | } 80 | Err(e) => { 81 | eprintln!("Failed to run umount: {}", e); 82 | } 83 | } 84 | std::process::exit(0); 85 | }).expect("Error setting Ctrl+C handler"); 86 | 87 | let fs: FuseFS = match provider_name { 88 | "sqlite_simple" => { 89 | println!("Using SQLite Simple provider"); 90 | let db_file = db_path.unwrap_or("cf-fuse-simple.db"); 91 | let sqlite = SqliteSimpleProvider::new_with_mode(db_file, osx_mode).expect("Failed to open SQLite DB"); 92 | FuseFS::new(Box::new(sqlite)) 93 | }, 94 | "sqlite_chunked" => { 95 | println!("Using SQLite Chunked provider"); 96 | let db_file = db_path.unwrap_or("cf-fuse-chunked.db"); 97 | let sqlite = SqliteChunkedProvider::new_with_mode(db_file, osx_mode, chunk_size).expect("Failed to open SQLite DB"); 98 | FuseFS::new(Box::new(sqlite)) 99 | }, 100 | _ => { 101 | println!("Using memory provider"); 102 | FuseFS::new(Box::new(MemoryProvider::new_with_mode(osx_mode))) 103 | } 104 | }; 105 | info!("Mounting FS at {} with provider {}", mountpoint, provider_name); 106 | fuser::mount2(fs, mountpoint, &[MountOption::FSName(format!("{}fs", provider_name)), MountOption::AutoUnmount]).unwrap(); 107 | }, 108 | Commands::ListProviders => { 109 | println!("Available providers:"); 110 | println!(" memory - In-memory storage (default)"); 111 | println!(" sqlite_simple - Simple SQLite storage"); 112 | println!(" sqlite_chunked - Chunked SQLite storage"); 113 | }, 114 | Commands::Stats { db_path } => { 115 | if db_path.is_empty() { 116 | println!("Please specify a database path with --db-path"); 117 | return; 118 | } 119 | println!("Stats for database: {}", db_path); 120 | // TODO: Implement stats command 121 | }, 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /packages/dofs-rust-client/src/fusefs.rs: -------------------------------------------------------------------------------- 1 | use fuser::{Filesystem, Request, ReplyAttr, ReplyEntry, ReplyDirectory, ReplyData, ReplyCreate, ReplyWrite}; 2 | use crate::providers::Provider; 3 | use std::ffi::OsStr; 4 | use std::time::{SystemTime, UNIX_EPOCH}; 5 | 6 | pub struct FuseFS { 7 | pub provider: Box, 8 | mount_time_ms: u128, 9 | } 10 | 11 | impl FuseFS { 12 | pub fn new(provider: Box) -> Self { 13 | let mount_time_ms = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis(); 14 | Self { provider, mount_time_ms } 15 | } 16 | 17 | fn fuse_ready_attr(&self) -> fuser::FileAttr { 18 | let mount_time = UNIX_EPOCH + std::time::Duration::from_millis(self.mount_time_ms as u64); 19 | fuser::FileAttr { 20 | ino: FUSE_READY_INO, 21 | size: self.mount_time_ms.to_string().len() as u64, 22 | blocks: 1, 23 | atime: mount_time, 24 | mtime: mount_time, 25 | ctime: mount_time, 26 | crtime: mount_time, 27 | kind: fuser::FileType::RegularFile, 28 | perm: 0o444, 29 | nlink: 1, 30 | uid: unsafe { libc::geteuid() }, 31 | gid: unsafe { libc::getegid() }, 32 | rdev: 0, 33 | flags: 0, 34 | blksize: 512, 35 | } 36 | } 37 | } 38 | 39 | const FUSE_READY_NAME: &str = ".fuse_ready"; 40 | const FUSE_READY_INO: u64 = 2; 41 | 42 | impl Filesystem for FuseFS { 43 | fn rmdir(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty) { 44 | self.provider.rmdir(parent, name, reply) 45 | } 46 | fn open(&mut self, _req: &Request<'_>, ino: u64, _flags: i32, reply: fuser::ReplyOpen) { 47 | if ino == FUSE_READY_INO { 48 | reply.opened(0, 0); 49 | return; 50 | } 51 | self.provider.open(ino, reply) 52 | } 53 | fn flush(&mut self, _req: &Request<'_>, ino: u64, _fh: u64, _lock_owner: u64, reply: fuser::ReplyEmpty) { 54 | self.provider.flush(ino, reply) 55 | } 56 | fn release(&mut self, _req: &Request<'_>, ino: u64, _fh: u64, _flags: i32, _lock_owner: Option, _flush: bool, reply: fuser::ReplyEmpty) { 57 | self.provider.release(ino, reply) 58 | } 59 | fn setattr(&mut self, _req: &Request<'_>, ino: u64, mode: Option, uid: Option, gid: Option, size: Option, atime: Option, mtime: Option, ctime: Option, _fh: Option, crtime: Option, _chgtime: Option, _bkuptime: Option, flags: Option, reply: ReplyAttr) { 60 | self.provider.setattr(ino, mode, uid, gid, size, atime, mtime, ctime, crtime, flags, reply) 61 | } 62 | fn lookup(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, reply: ReplyEntry) { 63 | if parent == 1 && name.to_str() == Some(FUSE_READY_NAME) { 64 | let attr = self.fuse_ready_attr(); 65 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0); 66 | return; 67 | } 68 | self.provider.lookup(parent, name, reply) 69 | } 70 | fn getattr(&mut self, _req: &Request<'_>, ino: u64, reply: ReplyAttr) { 71 | if ino == FUSE_READY_INO { 72 | let attr = self.fuse_ready_attr(); 73 | reply.attr(&std::time::Duration::from_secs(1), &attr); 74 | return; 75 | } 76 | self.provider.getattr(ino, reply) 77 | } 78 | fn readdir(&mut self, _req: &Request<'_>, ino: u64, _fh: u64, offset: i64, reply: ReplyDirectory) { 79 | self.provider.readdir(ino, offset, reply) 80 | } 81 | fn mkdir(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, mode: u32, umask: u32, reply: ReplyEntry) { 82 | self.provider.mkdir(parent, name, mode, umask, reply) 83 | } 84 | fn create(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, mode: u32, flags: u32, umask: i32, reply: ReplyCreate) { 85 | self.provider.create(parent, name, mode, flags, umask, reply) 86 | } 87 | fn read(&mut self, _req: &Request<'_>, ino: u64, _fh: u64, offset: i64, size: u32, _flags: i32, _lock_owner: Option, reply: ReplyData) { 88 | if ino == FUSE_READY_INO { 89 | let data = self.mount_time_ms.to_string().into_bytes(); 90 | let start = std::cmp::min(offset as usize, data.len()); 91 | let end = std::cmp::min(start + size as usize, data.len()); 92 | reply.data(&data[start..end]); 93 | return; 94 | } 95 | self.provider.read(ino, offset, size, reply) 96 | } 97 | fn write(&mut self, _req: &Request<'_>, ino: u64, _fh: u64, offset: i64, data: &[u8], _write_flags: u32, _flags: i32, _lock_owner: Option, reply: ReplyWrite) { 98 | self.provider.write(ino, offset, data, reply) 99 | } 100 | fn unlink(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty) { 101 | self.provider.unlink(parent, name, reply) 102 | } 103 | fn rename(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, newparent: u64, newname: &OsStr, flags: u32, reply: fuser::ReplyEmpty) { 104 | self.provider.rename(parent, name, newparent, newname, flags, reply) 105 | } 106 | fn symlink(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, link: &std::path::Path, reply: ReplyEntry) { 107 | self.provider.symlink(parent, name, link, reply) 108 | } 109 | fn readlink(&mut self, _req: &Request<'_>, ino: u64, reply: ReplyData) { 110 | self.provider.readlink(ino, reply) 111 | } 112 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Durable Objects File System (dofs) 2 | 3 | A filesystem-like API for Cloudflare Durable Objects, supporting streaming reads and writes with chunked storage. 4 | 5 | ## Features 6 | 7 | - File and directory operations (read, write, mkdir, rmdir, stat, etc.) 8 | - Efficient chunked storage for large files 9 | - Streaming read and write support via ReadableStream and WritableStream 10 | - Designed for use in Durable Objects (DOs) 11 | 12 | ## Basic Usage 13 | 14 | The recommended way to add dofs to your Durable Object is using the `@Dofs` decorator: 15 | 16 | ```ts 17 | import { DurableObject } from 'cloudflare:workers' 18 | import { Dofs } from 'dofs' 19 | 20 | @Dofs({ chunkSize: 256 * 1024 }) 21 | export class MyDurableObject extends DurableObject { 22 | // Your custom methods here 23 | // Access filesystem via this.getFs() 24 | } 25 | ``` 26 | 27 | The `@Dofs` decorator: 28 | 29 | - Automatically creates the `fs` property in your Durable Object 30 | - Adds a `getFs()` method to access the filesystem instance 31 | - Accepts the same configuration options as the `Fs` constructor 32 | - Works directly with classes extending `DurableObject` 33 | 34 | ### Alternative: Using withDofs Helper 35 | 36 | For cases where you need more control or are working with existing class hierarchies, you can use the `withDofs` helper: 37 | 38 | ```ts 39 | import { DurableObject } from 'cloudflare:workers' 40 | import { withDofs } from 'dofs' 41 | 42 | // Create a concrete base class first 43 | class MyDurableObjectBase extends DurableObject { 44 | constructor(ctx: DurableObjectState, env: Env) { 45 | super(ctx, env) 46 | } 47 | } 48 | 49 | // Then extend it with dofs 50 | export class MyDurableObject extends withDofs(MyDurableObjectBase) { 51 | // Your custom methods here 52 | } 53 | 54 | // Or with configuration options: 55 | export class MyDurableObject extends withDofs(MyDurableObjectBase, { chunkSize: 256 * 1024 }) { 56 | // Your custom methods here 57 | } 58 | ``` 59 | 60 | **Important:** Due to TypeScript declaration generation limitations, `withDofs` requires a concrete base class. You cannot pass the abstract `DurableObject` class directly to `withDofs`. 61 | 62 | Both approaches provide the same functionality: 63 | 64 | - Automatically creates the `fs` property in your Durable Object 65 | - Adds a `getFs()` method to access the filesystem instance 66 | - Accepts the same configuration options as the `Fs` constructor 67 | 68 | > Note: class instances can be [passed via RPC](https://developers.cloudflare.com/workers/runtime-apis/rpc/#class-instances) as long as they inherit from `RpcTarget` as `Fs` does. 69 | 70 | ### Advanced: Manual Setup 71 | 72 | For more control, you can manually create a dofs instance in your Durable Object: 73 | 74 | ```ts 75 | import { DurableObject } from 'cloudflare:workers' 76 | import { Fs } from 'dofs' 77 | 78 | export class MyDurableObject extends DurableObject { 79 | private fs: Fs 80 | 81 | constructor(ctx: DurableObjectState, env: Env) { 82 | super(ctx, env) 83 | this.fs = new Fs(ctx, env) 84 | } 85 | 86 | // Expose fs 87 | public getDofs() { 88 | return this.fs 89 | } 90 | } 91 | ``` 92 | 93 | ## Configuration Options 94 | 95 | ### Chunk Size 96 | 97 | By default, the chunk size is 64kb. You can configure it by passing the `chunkSize` option (in bytes) to the `Fs` constructor: 98 | 99 | ```ts 100 | import { Fs } from 'dofs' 101 | 102 | const fs = new Fs(ctx, env, { chunkSize: 256 * 1024 }) // 256kb chunks 103 | ``` 104 | 105 | **How chunk size affects query frequency and cost:** 106 | 107 | - Smaller chunk sizes mean more database queries per file read/write, which can increase Durable Object query costs and latency. 108 | - Larger chunk sizes reduce the number of queries (lower cost, better throughput), but may use more memory per operation and can be less efficient for small files or random access. 109 | - Choose a chunk size that balances your workload's cost, performance, and memory needs. 110 | 111 | > **Note:** Chunk size cannot be changed after the first file has been written to the filesystem. It is fixed for the lifetime of the filesystem instance. 112 | 113 | ### Device Size 114 | 115 | By default, the device size (total storage available) is 1GB (`1024 * 1024 * 1024` bytes). You can change this limit using the `setDeviceSize` method: 116 | 117 | ```ts 118 | fs.setDeviceSize(10 * 1024 * 1024 * 1024) // Set device size to 10GB 119 | ``` 120 | 121 | - The device size must be set before writing data that would exceed the current limit. 122 | - If you try to write more data than the device size allows, an `ENOSPC` error will be thrown. 123 | - You can check the current device size and usage with `getDeviceStats()`. 124 | 125 | ```ts 126 | const stats = fs.getDeviceStats() 127 | console.log(stats.deviceSize, stats.spaceUsed, stats.spaceAvailable) 128 | ``` 129 | 130 | > **Default:** 1GB if not set. 131 | 132 | ## Streaming Support 133 | 134 | - **Read:** `readFile(path)` returns a `ReadableStream` for efficient, chunked reading. 135 | - **Write:** `writeFile(path, stream)` accepts a `ReadableStream` for efficient, chunked writing. 136 | - You can also use `writeFile(path, data)` with a string or ArrayBuffer for non-streaming writes. 137 | 138 | ## API Reference 139 | 140 | **Note:** These are async from the CF Worker stub (RPC call), but are sync when called inside the Durable Object (direct call). 141 | 142 | - `readFile(path: string): ReadableStream` 143 | - `writeFile(path: string, data: string | ArrayBuffer | ReadableStream): void` 144 | - `read(path: string, options): ArrayBuffer` (non-streaming, offset/length) 145 | - `write(path: string, data, options): void` (non-streaming, offset) 146 | - `mkdir(path: string, options?): void` 147 | - `rmdir(path: string, options?): void` 148 | - `listDir(path: string, options?): string[]` 149 | - `stat(path: string): Stat` 150 | - `unlink(path: string): void` 151 | - `rename(oldPath: string, newPath: string): void` 152 | - `symlink(target: string, path: string): void` 153 | - `readlink(path: string): string` 154 | 155 | ## Projects that work with dofs 156 | 157 | - dterm 158 | 159 | ## Future Plans 160 | 161 | - In-memory block caching for improved read/write performance 162 | - Store small files (that fit in one block) directly in the inode table instead of the chunk table to reduce queries 163 | - `defrag()` method to allow changing chunk size and optimizing storage 164 | -------------------------------------------------------------------------------- /packages/dofs/README.md: -------------------------------------------------------------------------------- 1 | # Durable Objects File System (dofs) 2 | 3 | A filesystem-like API for Cloudflare Durable Objects, supporting streaming reads and writes with chunked storage. 4 | 5 | ## Features 6 | 7 | - File and directory operations (read, write, mkdir, rmdir, stat, etc.) 8 | - Efficient chunked storage for large files 9 | - Streaming read and write support via ReadableStream and WritableStream 10 | - Designed for use in Durable Objects (DOs) 11 | 12 | ## Basic Usage 13 | 14 | The recommended way to add dofs to your Durable Object is using the `@Dofs` decorator: 15 | 16 | ```ts 17 | import { DurableObject } from 'cloudflare:workers' 18 | import { Dofs } from 'dofs' 19 | 20 | @Dofs({ chunkSize: 256 * 1024 }) 21 | export class MyDurableObject extends DurableObject { 22 | // Your custom methods here 23 | // Access filesystem via this.getFs() 24 | } 25 | ``` 26 | 27 | The `@Dofs` decorator: 28 | 29 | - Automatically creates the `fs` property in your Durable Object 30 | - Adds a `getFs()` method to access the filesystem instance 31 | - Accepts the same configuration options as the `Fs` constructor 32 | - Works directly with classes extending `DurableObject` 33 | 34 | ### Alternative: Using withDofs Helper 35 | 36 | For cases where you need more control or are working with existing class hierarchies, you can use the `withDofs` helper: 37 | 38 | ```ts 39 | import { DurableObject } from 'cloudflare:workers' 40 | import { withDofs } from 'dofs' 41 | 42 | // Create a concrete base class first 43 | class MyDurableObjectBase extends DurableObject { 44 | constructor(ctx: DurableObjectState, env: Env) { 45 | super(ctx, env) 46 | } 47 | } 48 | 49 | // Then extend it with dofs 50 | export class MyDurableObject extends withDofs(MyDurableObjectBase) { 51 | // Your custom methods here 52 | } 53 | 54 | // Or with configuration options: 55 | export class MyDurableObject extends withDofs(MyDurableObjectBase, { chunkSize: 256 * 1024 }) { 56 | // Your custom methods here 57 | } 58 | ``` 59 | 60 | **Important:** Due to TypeScript declaration generation limitations, `withDofs` requires a concrete base class. You cannot pass the abstract `DurableObject` class directly to `withDofs`. 61 | 62 | Both approaches provide the same functionality: 63 | 64 | - Automatically creates the `fs` property in your Durable Object 65 | - Adds a `getFs()` method to access the filesystem instance 66 | - Accepts the same configuration options as the `Fs` constructor 67 | 68 | > Note: class instances can be [passed via RPC](https://developers.cloudflare.com/workers/runtime-apis/rpc/#class-instances) as long as they inherit from `RpcTarget` as `Fs` does. 69 | 70 | ### Advanced: Manual Setup 71 | 72 | For more control, you can manually create a dofs instance in your Durable Object: 73 | 74 | ```ts 75 | import { DurableObject } from 'cloudflare:workers' 76 | import { Fs } from 'dofs' 77 | 78 | export class MyDurableObject extends DurableObject { 79 | private fs: Fs 80 | 81 | constructor(ctx: DurableObjectState, env: Env) { 82 | super(ctx, env) 83 | this.fs = new Fs(ctx, env) 84 | } 85 | 86 | // Expose fs 87 | public getDofs() { 88 | return this.fs 89 | } 90 | } 91 | ``` 92 | 93 | ## Configuration Options 94 | 95 | ### Chunk Size 96 | 97 | By default, the chunk size is 64kb. You can configure it by passing the `chunkSize` option (in bytes) to the `Fs` constructor: 98 | 99 | ```ts 100 | import { Fs } from 'dofs' 101 | 102 | const fs = new Fs(ctx, env, { chunkSize: 256 * 1024 }) // 256kb chunks 103 | ``` 104 | 105 | **How chunk size affects query frequency and cost:** 106 | 107 | - Smaller chunk sizes mean more database queries per file read/write, which can increase Durable Object query costs and latency. 108 | - Larger chunk sizes reduce the number of queries (lower cost, better throughput), but may use more memory per operation and can be less efficient for small files or random access. 109 | - Choose a chunk size that balances your workload's cost, performance, and memory needs. 110 | 111 | > **Note:** Chunk size cannot be changed after the first file has been written to the filesystem. It is fixed for the lifetime of the filesystem instance. 112 | 113 | ### Device Size 114 | 115 | By default, the device size (total storage available) is 1GB (`1024 * 1024 * 1024` bytes). You can change this limit using the `setDeviceSize` method: 116 | 117 | ```ts 118 | fs.setDeviceSize(10 * 1024 * 1024 * 1024) // Set device size to 10GB 119 | ``` 120 | 121 | - The device size must be set before writing data that would exceed the current limit. 122 | - If you try to write more data than the device size allows, an `ENOSPC` error will be thrown. 123 | - You can check the current device size and usage with `getDeviceStats()`. 124 | 125 | ```ts 126 | const stats = fs.getDeviceStats() 127 | console.log(stats.deviceSize, stats.spaceUsed, stats.spaceAvailable) 128 | ``` 129 | 130 | > **Default:** 1GB if not set. 131 | 132 | ## Streaming Support 133 | 134 | - **Read:** `readFile(path)` returns a `ReadableStream` for efficient, chunked reading. 135 | - **Write:** `writeFile(path, stream)` accepts a `ReadableStream` for efficient, chunked writing. 136 | - You can also use `writeFile(path, data)` with a string or ArrayBuffer for non-streaming writes. 137 | 138 | ## API Reference 139 | 140 | **Note:** These are async from the CF Worker stub (RPC call), but are sync when called inside the Durable Object (direct call). 141 | 142 | - `readFile(path: string): ReadableStream` 143 | - `writeFile(path: string, data: string | ArrayBuffer | ReadableStream): void` 144 | - `read(path: string, options): ArrayBuffer` (non-streaming, offset/length) 145 | - `write(path: string, data, options): void` (non-streaming, offset) 146 | - `mkdir(path: string, options?): void` 147 | - `rmdir(path: string, options?): void` 148 | - `listDir(path: string, options?): string[]` 149 | - `stat(path: string): Stat` 150 | - `unlink(path: string): void` 151 | - `rename(oldPath: string, newPath: string): void` 152 | - `symlink(target: string, path: string): void` 153 | - `readlink(path: string): string` 154 | 155 | ## Projects that work with dofs 156 | 157 | - dterm 158 | 159 | ## Future Plans 160 | 161 | - In-memory block caching for improved read/write performance 162 | - Store small files (that fit in one block) directly in the inode table instead of the chunk table to reduce queries 163 | - `defrag()` method to allow changing chunk size and optimizing storage 164 | -------------------------------------------------------------------------------- /packages/dofs-rust-client/src/providers/memory.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{HashMap, BTreeMap}; 2 | use std::path::PathBuf; 3 | use std::ffi::OsStr; 4 | use std::time::SystemTime; 5 | use fuser; 6 | use crate::providers::Provider; 7 | 8 | const ROOT_INODE: u64 = 1; 9 | const USER_INODE_START: u64 = 10; 10 | 11 | #[derive(Debug, Clone)] 12 | pub struct InMemoryFile { 13 | pub data: Vec, 14 | pub attr: fuser::FileAttr, 15 | } 16 | 17 | #[derive(Debug, Clone)] 18 | pub struct InMemoryDir { 19 | pub children: BTreeMap, 20 | pub attr: fuser::FileAttr, 21 | } 22 | 23 | #[derive(Debug, Clone)] 24 | pub struct InMemorySymlink { 25 | pub target: String, 26 | pub attr: fuser::FileAttr, 27 | } 28 | 29 | #[derive(Debug, Clone)] 30 | pub enum Node { 31 | File(InMemoryFile), 32 | Dir(InMemoryDir), 33 | Symlink(InMemorySymlink), 34 | } 35 | 36 | pub struct MemoryProvider { 37 | pub inodes: HashMap, 38 | #[allow(dead_code)] 39 | pub paths: HashMap, 40 | pub next_inode: u64, 41 | #[allow(dead_code)] 42 | pub xattrs: HashMap<(u64, String), Vec>, 43 | pub osx_mode: bool, 44 | } 45 | 46 | impl MemoryProvider { 47 | #[allow(dead_code)] 48 | pub fn new() -> Self { 49 | Self::new_with_mode(false) 50 | } 51 | pub fn new_with_mode(osx_mode: bool) -> Self { 52 | let mut inodes = HashMap::new(); 53 | let mut paths = HashMap::new(); 54 | let now = SystemTime::now(); 55 | let root_attr = fuser::FileAttr { 56 | ino: ROOT_INODE, 57 | size: 0, 58 | blocks: 0, 59 | atime: now, 60 | mtime: now, 61 | ctime: now, 62 | crtime: now, 63 | kind: fuser::FileType::Directory, 64 | perm: 0o755, 65 | nlink: 2, 66 | uid: unsafe { libc::geteuid() }, 67 | gid: unsafe { libc::getegid() }, 68 | rdev: 0, 69 | flags: 0, 70 | blksize: 512, 71 | }; 72 | let root = Node::Dir(InMemoryDir { 73 | children: BTreeMap::new(), 74 | attr: root_attr, 75 | }); 76 | inodes.insert(ROOT_INODE, root); 77 | paths.insert(PathBuf::from("/"), ROOT_INODE); 78 | Self { inodes, paths, next_inode: USER_INODE_START, xattrs: HashMap::new(), osx_mode } 79 | } 80 | pub fn alloc_inode(&mut self) -> u64 { 81 | let ino = self.next_inode; 82 | self.next_inode += 1; 83 | ino 84 | } 85 | } 86 | 87 | impl Provider for MemoryProvider { 88 | fn rmdir(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty) { 89 | let name_str = name.to_str().unwrap_or(""); 90 | let target_ino = if let Some(Node::Dir(parent_dir)) = self.inodes.get(&parent) { 91 | parent_dir.children.get(name_str).copied() 92 | } else { 93 | reply.error(libc::ENOENT); 94 | return; 95 | }; 96 | let ino = match target_ino { 97 | Some(ino) => ino, 98 | None => { 99 | reply.error(libc::ENOENT); 100 | return; 101 | } 102 | }; 103 | let is_empty_dir = if let Some(Node::Dir(dir)) = self.inodes.get(&ino) { 104 | dir.children.is_empty() 105 | } else { 106 | reply.error(libc::ENOTDIR); 107 | return; 108 | }; 109 | if !is_empty_dir { 110 | reply.error(libc::ENOTEMPTY); 111 | return; 112 | } 113 | if let Some(Node::Dir(parent_dir)) = self.inodes.get_mut(&parent) { 114 | parent_dir.children.remove(name_str); 115 | } 116 | self.inodes.remove(&ino); 117 | reply.ok(); 118 | } 119 | fn open(&mut self, ino: u64, reply: fuser::ReplyOpen) { 120 | if self.inodes.contains_key(&ino) { 121 | reply.opened(0, 0); 122 | } else { 123 | reply.error(libc::ENOENT); 124 | } 125 | } 126 | fn flush(&mut self, ino: u64, reply: fuser::ReplyEmpty) { 127 | if self.inodes.contains_key(&ino) { 128 | reply.ok(); 129 | } else { 130 | reply.error(libc::ENOENT); 131 | } 132 | } 133 | fn release(&mut self, ino: u64, reply: fuser::ReplyEmpty) { 134 | if self.inodes.contains_key(&ino) { 135 | reply.ok(); 136 | } else { 137 | reply.error(libc::ENOENT); 138 | } 139 | } 140 | fn setattr(&mut self, ino: u64, mode: Option, uid: Option, gid: Option, size: Option, atime: Option, mtime: Option, ctime: Option, crtime: Option, flags: Option, reply: fuser::ReplyAttr) { 141 | fn timeornow_to_systemtime(t: fuser::TimeOrNow) -> SystemTime { 142 | match t { 143 | fuser::TimeOrNow::SpecificTime(st) => st, 144 | fuser::TimeOrNow::Now => SystemTime::now(), 145 | } 146 | } 147 | fn safe_systemtime(t: SystemTime) -> SystemTime { 148 | // Ensure timestamp is within valid range 149 | let now = SystemTime::now(); 150 | if let Ok(duration_since_epoch) = t.duration_since(std::time::UNIX_EPOCH) { 151 | if duration_since_epoch.as_secs() > now.duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs() + (100 * 365 * 24 * 3600) { 152 | now 153 | } else { 154 | t 155 | } 156 | } else { 157 | std::time::UNIX_EPOCH 158 | } 159 | } 160 | if let Some(node) = self.inodes.get_mut(&ino) { 161 | match node { 162 | Node::File(f) => { 163 | if let Some(new_size) = size { 164 | f.data.resize(new_size as usize, 0); 165 | f.attr.size = new_size; 166 | } 167 | if let Some(m) = mode { f.attr.perm = m as u16; } 168 | if let Some(u) = uid { f.attr.uid = u; } 169 | if let Some(g) = gid { f.attr.gid = g; } 170 | if let Some(a) = atime { f.attr.atime = timeornow_to_systemtime(a); } 171 | if let Some(m) = mtime { f.attr.mtime = timeornow_to_systemtime(m); } 172 | if let Some(c) = ctime { f.attr.ctime = safe_systemtime(c); } 173 | if let Some(cr) = crtime { f.attr.crtime = safe_systemtime(cr); } 174 | if let Some(fg) = flags { f.attr.flags = fg; } 175 | reply.attr(&std::time::Duration::from_secs(1), &f.attr); 176 | } 177 | Node::Dir(d) => { 178 | if let Some(m) = mode { d.attr.perm = m as u16; } 179 | if let Some(u) = uid { d.attr.uid = u; } 180 | if let Some(g) = gid { d.attr.gid = g; } 181 | if let Some(a) = atime { d.attr.atime = timeornow_to_systemtime(a); } 182 | if let Some(m) = mtime { d.attr.mtime = timeornow_to_systemtime(m); } 183 | if let Some(c) = ctime { d.attr.ctime = safe_systemtime(c); } 184 | if let Some(cr) = crtime { d.attr.crtime = safe_systemtime(cr); } 185 | if let Some(fg) = flags { d.attr.flags = fg; } 186 | reply.attr(&std::time::Duration::from_secs(1), &d.attr); 187 | } 188 | Node::Symlink(s) => { 189 | if let Some(m) = mode { s.attr.perm = m as u16; } 190 | if let Some(u) = uid { s.attr.uid = u; } 191 | if let Some(g) = gid { s.attr.gid = g; } 192 | if let Some(a) = atime { s.attr.atime = timeornow_to_systemtime(a); } 193 | if let Some(m) = mtime { s.attr.mtime = timeornow_to_systemtime(m); } 194 | if let Some(c) = ctime { s.attr.ctime = safe_systemtime(c); } 195 | if let Some(cr) = crtime { s.attr.crtime = safe_systemtime(cr); } 196 | if let Some(fg) = flags { s.attr.flags = fg; } 197 | reply.attr(&std::time::Duration::from_secs(1), &s.attr); 198 | } 199 | } 200 | } else { 201 | reply.error(libc::ENOENT); 202 | } 203 | } 204 | fn lookup(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEntry) { 205 | let name = name.to_str().unwrap_or(""); 206 | let parent_node = self.inodes.get(&parent); 207 | if let Some(Node::Dir(dir)) = parent_node { 208 | if let Some(&child_ino) = dir.children.get(name) { 209 | if let Some(node) = self.inodes.get(&child_ino) { 210 | let attr = match node { 211 | Node::File(f) => f.attr, 212 | Node::Dir(d) => d.attr, 213 | Node::Symlink(s) => s.attr, 214 | }; 215 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0); 216 | return; 217 | } 218 | } 219 | } 220 | reply.error(libc::ENOENT); 221 | } 222 | fn getattr(&mut self, ino: u64, reply: fuser::ReplyAttr) { 223 | if let Some(node) = self.inodes.get(&ino) { 224 | let attr = match node { 225 | Node::File(f) => f.attr, 226 | Node::Dir(d) => d.attr, 227 | Node::Symlink(s) => s.attr, 228 | }; 229 | reply.attr(&std::time::Duration::from_secs(1), &attr); 230 | } else { 231 | reply.error(libc::ENOENT); 232 | } 233 | } 234 | fn readdir(&mut self, ino: u64, offset: i64, mut reply: fuser::ReplyDirectory) { 235 | if let Some(Node::Dir(dir)) = self.inodes.get(&ino) { 236 | let mut entries = vec![(ROOT_INODE, fuser::FileType::Directory, ".".to_string()), (ROOT_INODE, fuser::FileType::Directory, "..".to_string())]; 237 | for (name, &child_ino) in &dir.children { 238 | if self.osx_mode && name.starts_with("._") { 239 | continue; 240 | } 241 | let node = self.inodes.get(&child_ino).unwrap(); 242 | let kind = match node { 243 | Node::File(_) => fuser::FileType::RegularFile, 244 | Node::Dir(_) => fuser::FileType::Directory, 245 | Node::Symlink(_) => fuser::FileType::Symlink, 246 | }; 247 | entries.push((child_ino, kind, name.clone())); 248 | } 249 | for (i, (ino, kind, name)) in entries.into_iter().enumerate().skip(offset as usize) { 250 | if reply.add(ino, (i + 1) as i64, kind, name) { 251 | break; 252 | } 253 | } 254 | reply.ok(); 255 | } else { 256 | reply.error(libc::ENOENT); 257 | } 258 | } 259 | fn mkdir(&mut self, parent: u64, name: &OsStr, mode: u32, umask: u32, reply: fuser::ReplyEntry) { 260 | let name_str = name.to_str().unwrap_or(""); 261 | if self.osx_mode && name_str.starts_with("._") { 262 | reply.error(libc::EACCES); 263 | return; 264 | } 265 | let already_exists = if let Some(Node::Dir(dir)) = self.inodes.get(&parent) { 266 | dir.children.contains_key(name_str) 267 | } else { 268 | reply.error(libc::ENOENT); 269 | return; 270 | }; 271 | if already_exists { 272 | reply.error(libc::EEXIST); 273 | return; 274 | } 275 | let ino = self.alloc_inode(); 276 | let now = SystemTime::now(); 277 | let attr = fuser::FileAttr { 278 | ino, 279 | size: 0, 280 | blocks: 0, 281 | atime: now, 282 | mtime: now, 283 | ctime: now, 284 | crtime: now, 285 | kind: fuser::FileType::Directory, 286 | perm: (mode & !umask & 0o7777) as u16, 287 | nlink: 2, 288 | uid: unsafe { libc::geteuid() }, 289 | gid: unsafe { libc::getegid() }, 290 | rdev: 0, 291 | flags: 0, 292 | blksize: 512, 293 | }; 294 | let new_dir = Node::Dir(InMemoryDir { 295 | children: BTreeMap::new(), 296 | attr, 297 | }); 298 | if let Some(Node::Dir(dir)) = self.inodes.get_mut(&parent) { 299 | dir.children.insert(name_str.to_string(), ino); 300 | } 301 | self.inodes.insert(ino, new_dir); 302 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0); 303 | } 304 | fn create(&mut self, parent: u64, name: &OsStr, mode: u32, _flags: u32, umask: i32, reply: fuser::ReplyCreate) { 305 | let name_str = name.to_str().unwrap_or(""); 306 | if self.osx_mode && name_str.starts_with("._") { 307 | reply.error(libc::EACCES); 308 | return; 309 | } 310 | let already_exists = if let Some(Node::Dir(dir)) = self.inodes.get(&parent) { 311 | dir.children.contains_key(name_str) 312 | } else { 313 | reply.error(libc::ENOENT); 314 | return; 315 | }; 316 | if already_exists { 317 | reply.error(libc::EEXIST); 318 | return; 319 | } 320 | let ino = self.alloc_inode(); 321 | let now = SystemTime::now(); 322 | let attr = fuser::FileAttr { 323 | ino, 324 | size: 0, 325 | blocks: 0, 326 | atime: now, 327 | mtime: now, 328 | ctime: now, 329 | crtime: now, 330 | kind: fuser::FileType::RegularFile, 331 | perm: (mode & !(umask as u32) & 0o7777) as u16, 332 | nlink: 1, 333 | uid: unsafe { libc::geteuid() }, 334 | gid: unsafe { libc::getegid() }, 335 | rdev: 0, 336 | flags: 0, 337 | blksize: 512, 338 | }; 339 | let new_file = Node::File(InMemoryFile { 340 | data: vec![], 341 | attr, 342 | }); 343 | if let Some(Node::Dir(dir)) = self.inodes.get_mut(&parent) { 344 | dir.children.insert(name_str.to_string(), ino); 345 | } 346 | self.inodes.insert(ino, new_file); 347 | reply.created(&std::time::Duration::from_secs(1), &attr, 0, 0, 0); 348 | } 349 | fn read(&mut self, ino: u64, offset: i64, size: u32, reply: fuser::ReplyData) { 350 | if let Some(Node::File(file)) = self.inodes.get(&ino) { 351 | let data = &file.data; 352 | let end = std::cmp::min((offset as usize) + (size as usize), data.len()); 353 | let start = std::cmp::min(offset as usize, data.len()); 354 | reply.data(&data[start..end]); 355 | } else { 356 | reply.error(libc::ENOENT); 357 | } 358 | } 359 | fn write(&mut self, ino: u64, offset: i64, data: &[u8], reply: fuser::ReplyWrite) { 360 | if let Some(Node::File(file)) = self.inodes.get_mut(&ino) { 361 | let offset = offset as usize; 362 | if file.data.len() < offset + data.len() { 363 | file.data.resize(offset + data.len(), 0); 364 | } 365 | file.data[offset..offset + data.len()].copy_from_slice(data); 366 | file.attr.size = file.data.len() as u64; 367 | reply.written(data.len() as u32); 368 | } else { 369 | reply.error(libc::ENOENT); 370 | } 371 | } 372 | fn unlink(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty) { 373 | let name_str = name.to_str().unwrap_or(""); 374 | let target_ino = if let Some(Node::Dir(parent_dir)) = self.inodes.get(&parent) { 375 | parent_dir.children.get(name_str).copied() 376 | } else { 377 | reply.error(libc::ENOENT); 378 | return; 379 | }; 380 | let ino = match target_ino { 381 | Some(ino) => ino, 382 | None => { 383 | reply.error(libc::ENOENT); 384 | return; 385 | } 386 | }; 387 | match self.inodes.get(&ino) { 388 | Some(Node::File(_)) | Some(Node::Symlink(_)) => { 389 | if let Some(Node::Dir(parent_dir)) = self.inodes.get_mut(&parent) { 390 | parent_dir.children.remove(name_str); 391 | } 392 | self.inodes.remove(&ino); 393 | reply.ok(); 394 | } 395 | Some(Node::Dir(_)) => { 396 | reply.error(libc::EISDIR); 397 | } 398 | None => { 399 | reply.error(libc::ENOENT); 400 | } 401 | } 402 | } 403 | fn rename(&mut self, parent: u64, name: &OsStr, newparent: u64, newname: &OsStr, _flags: u32, reply: fuser::ReplyEmpty) { 404 | let name_str = name.to_str().unwrap_or(""); 405 | let newname_str = newname.to_str().unwrap_or(""); 406 | // Get source parent dir 407 | let src_is_dir = matches!(self.inodes.get(&parent), Some(Node::Dir(_))); 408 | let dst_is_dir = matches!(self.inodes.get(&newparent), Some(Node::Dir(_))); 409 | if !src_is_dir || !dst_is_dir { 410 | reply.error(libc::ENOTDIR); 411 | return; 412 | } 413 | // Check source exists and get inode 414 | let ino = { 415 | let src_parent = match self.inodes.get(&parent) { 416 | Some(Node::Dir(dir)) => dir, 417 | _ => { reply.error(libc::ENOTDIR); return; } 418 | }; 419 | match src_parent.children.get(name_str) { 420 | Some(&ino) => ino, 421 | None => { reply.error(libc::ENOENT); return; } 422 | } 423 | }; 424 | // Check dest exists 425 | let dest_exists = { 426 | let dst_parent = match self.inodes.get(&newparent) { 427 | Some(Node::Dir(dir)) => dir, 428 | _ => { reply.error(libc::ENOTDIR); return; } 429 | }; 430 | dst_parent.children.contains_key(newname_str) 431 | }; 432 | if dest_exists { 433 | reply.error(libc::EEXIST); 434 | return; 435 | } 436 | // Now do the mutation 437 | if let Some(Node::Dir(src_parent)) = self.inodes.get_mut(&parent) { 438 | src_parent.children.remove(name_str); 439 | } 440 | if let Some(Node::Dir(dst_parent)) = self.inodes.get_mut(&newparent) { 441 | dst_parent.children.insert(newname_str.to_string(), ino); 442 | } 443 | reply.ok(); 444 | } 445 | fn symlink(&mut self, parent: u64, name: &OsStr, link: &std::path::Path, reply: fuser::ReplyEntry) { 446 | let name_str = name.to_str().unwrap_or(""); 447 | if self.osx_mode && name_str.starts_with("._") { 448 | reply.error(libc::EACCES); 449 | return; 450 | } 451 | let already_exists = if let Some(Node::Dir(dir)) = self.inodes.get(&parent) { 452 | dir.children.contains_key(name_str) 453 | } else { 454 | reply.error(libc::ENOENT); 455 | return; 456 | }; 457 | if already_exists { 458 | reply.error(libc::EEXIST); 459 | return; 460 | } 461 | let ino = self.alloc_inode(); 462 | let now = SystemTime::now(); 463 | let target = link.to_string_lossy().to_string(); 464 | let attr = fuser::FileAttr { 465 | ino, 466 | size: target.len() as u64, 467 | blocks: 0, 468 | atime: now, 469 | mtime: now, 470 | ctime: now, 471 | crtime: now, 472 | kind: fuser::FileType::Symlink, 473 | perm: 0o777, 474 | nlink: 1, 475 | uid: unsafe { libc::geteuid() }, 476 | gid: unsafe { libc::getegid() }, 477 | rdev: 0, 478 | flags: 0, 479 | blksize: 512, 480 | }; 481 | let symlink = Node::Symlink(InMemorySymlink { target, attr }); 482 | if let Some(Node::Dir(dir)) = self.inodes.get_mut(&parent) { 483 | dir.children.insert(name_str.to_string(), ino); 484 | } 485 | self.inodes.insert(ino, symlink); 486 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0); 487 | } 488 | fn readlink(&mut self, ino: u64, reply: fuser::ReplyData) { 489 | if let Some(Node::Symlink(s)) = self.inodes.get(&ino) { 490 | reply.data(s.target.as_bytes()); 491 | } else { 492 | reply.error(libc::EINVAL); 493 | } 494 | } 495 | } -------------------------------------------------------------------------------- /packages/dofs-rust-client/tests/integration_stress.rs: -------------------------------------------------------------------------------- 1 | use std::process::{Command, Stdio}; 2 | use std::time::{Duration, Instant}; 3 | use std::fs::{self, File, create_dir, read_dir, remove_dir, OpenOptions, rename, remove_file, metadata}; 4 | use std::io::{Read, Write}; 5 | use std::io::Seek; 6 | use prettytable::{Table, Row, Cell}; 7 | use libc; 8 | use std::os::unix::fs::symlink; 9 | use rand::{Rng, SeedableRng}; 10 | use std::sync::{Arc, Barrier}; 11 | use std::thread; 12 | 13 | const MOUNTPOINT: &str = "./mnt"; 14 | const TEST_FILE: &str = "./mnt/testfile"; 15 | const TEST_DIR: &str = "./mnt/testdir"; 16 | 17 | #[derive(Clone)] 18 | struct ProviderTestResult { 19 | elapsed: Duration, 20 | success: bool, 21 | error: Option, 22 | } 23 | 24 | struct StressTest { 25 | name: &'static str, 26 | func: fn() -> Result<(), String>, 27 | skip_providers: Option<&'static [&'static str]>, 28 | } 29 | 30 | fn run_fuse_with_provider(provider: &str, db_path: Option<&str>) -> std::process::Child { 31 | let mut cmd = Command::new("cargo"); 32 | cmd.args(["run", "--quiet", "--", "--mode-osx", "--provider", provider]); 33 | if let Some(path) = db_path { 34 | cmd.args(["--db-path", path]); 35 | } 36 | cmd.stdout(Stdio::null()) 37 | .stderr(Stdio::null()) 38 | .spawn() 39 | .expect("Failed to start fuse process") 40 | } 41 | 42 | fn wait_for_mount() { 43 | for _ in 0..40 { 44 | if let Ok(mut file) = File::open(format!("{}/.fuse_ready", MOUNTPOINT)) { 45 | let mut contents = String::new(); 46 | if file.read_to_string(&mut contents).is_ok() { 47 | println!("Found .fuse_ready with contents: {}", contents); 48 | return; 49 | } 50 | } 51 | std::thread::sleep(Duration::from_millis(100)); 52 | } 53 | panic!("Mountpoint not available or .fuse_ready not present"); 54 | } 55 | 56 | fn wait_for_unmount() { 57 | for _ in 0..40 { 58 | if std::fs::metadata(format!("{}/.fuse_ready", MOUNTPOINT)).is_err() { 59 | return; 60 | } 61 | std::thread::sleep(Duration::from_millis(100)); 62 | } 63 | panic!("Mountpoint still present or .fuse_ready still exists"); 64 | } 65 | 66 | fn clean_setup(db_path: Option<&str>) { 67 | let _ = fs::remove_file("cf-fuse-simple.db"); 68 | let _ = fs::remove_file("cf-fuse-chunked.db"); 69 | if let Some(path) = db_path { 70 | let _ = fs::remove_file(path); 71 | } 72 | let _ = fs::remove_dir_all(MOUNTPOINT); 73 | let _ = fs::create_dir_all(MOUNTPOINT); 74 | } 75 | 76 | fn file_create_write_read_delete() -> Result<(), String> { 77 | // Create file 78 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?; 79 | // Write data 80 | let data = vec![42u8; 1024 * 1024]; 81 | file.write_all(&data).map_err(|e| format!("write: {e}"))?; 82 | drop(file); 83 | // Read data 84 | let mut file = File::open(TEST_FILE).map_err(|e| format!("open: {e}"))?; 85 | let mut buf = Vec::new(); 86 | file.read_to_end(&mut buf).map_err(|e| format!("read: {e}"))?; 87 | if buf != data { 88 | return Err("data mismatch".to_string()); 89 | } 90 | // Remove file 91 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?; 92 | Ok(()) 93 | } 94 | 95 | fn dir_create_list_delete() -> Result<(), String> { 96 | // Create directory 97 | create_dir(TEST_DIR).map_err(|e| format!("create_dir: {e}"))?; 98 | // List directory 99 | let entries: Vec<_> = read_dir("./mnt").map_err(|e| format!("read_dir: {e}"))?.collect(); 100 | if !entries.iter().filter_map(|e| e.as_ref().ok()).any(|e| e.file_name() == "testdir") { 101 | return Err("directory not found in listing".to_string()); 102 | } 103 | // Remove directory 104 | remove_dir(TEST_DIR).map_err(|e| format!("remove_dir: {e}"))?; 105 | Ok(()) 106 | } 107 | 108 | fn file_append_read_delete() -> Result<(), String> { 109 | // Create file and write initial data 110 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?; 111 | let data1 = vec![1u8; 512 * 1024]; 112 | file.write_all(&data1).map_err(|e| format!("write1: {e}"))?; 113 | drop(file); 114 | // Append data 115 | let mut file = OpenOptions::new().append(true).open(TEST_FILE).map_err(|e| format!("open append: {e}"))?; 116 | let data2 = vec![2u8; 512 * 1024]; 117 | file.write_all(&data2).map_err(|e| format!("write2: {e}"))?; 118 | drop(file); 119 | // Read back and check 120 | let mut file = File::open(TEST_FILE).map_err(|e| format!("open: {e}"))?; 121 | let mut buf = Vec::new(); 122 | file.read_to_end(&mut buf).map_err(|e| format!("read: {e}"))?; 123 | if buf.len() != 1024 * 1024 || &buf[..512*1024] != &data1[..] || &buf[512*1024..] != &data2[..] { 124 | return Err("data mismatch after append".to_string()); 125 | } 126 | // Remove file 127 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?; 128 | Ok(()) 129 | } 130 | 131 | fn file_truncate_shrink_read_delete() -> Result<(), String> { 132 | use std::fs::OpenOptions; 133 | // Create file and write data 134 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?; 135 | let data = vec![7u8; 1024 * 1024]; 136 | file.write_all(&data).map_err(|e| format!("write: {e}"))?; 137 | drop(file); 138 | // Truncate to half 139 | let file = OpenOptions::new().write(true).open(TEST_FILE).map_err(|e| format!("open: {e}"))?; 140 | file.set_len(512 * 1024).map_err(|e| format!("truncate: {e}"))?; 141 | drop(file); 142 | // Read back and check 143 | let mut file = File::open(TEST_FILE).map_err(|e| format!("open: {e}"))?; 144 | let mut buf = Vec::new(); 145 | file.read_to_end(&mut buf).map_err(|e| format!("read: {e}"))?; 146 | if buf.len() != 512 * 1024 || !buf.iter().all(|&b| b == 7) { 147 | return Err("data mismatch after truncate".to_string()); 148 | } 149 | // Remove file 150 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?; 151 | Ok(()) 152 | } 153 | 154 | fn file_truncate_grow_read_delete() -> Result<(), String> { 155 | use std::fs::OpenOptions; 156 | // Create file and write small data 157 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?; 158 | let data = vec![9u8; 512 * 1024]; 159 | file.write_all(&data).map_err(|e| format!("write: {e}"))?; 160 | drop(file); 161 | // Grow file to 1MB 162 | let file = OpenOptions::new().write(true).open(TEST_FILE).map_err(|e| format!("open: {e}"))?; 163 | file.set_len(1024 * 1024).map_err(|e| format!("truncate: {e}"))?; 164 | drop(file); 165 | // Read back and check 166 | let mut file = File::open(TEST_FILE).map_err(|e| format!("open: {e}"))?; 167 | let mut buf = Vec::new(); 168 | file.read_to_end(&mut buf).map_err(|e| format!("read: {e}"))?; 169 | if buf.len() != 1024 * 1024 || &buf[..512*1024] != &data[..] || !buf[512*1024..].iter().all(|&b| b == 0) { 170 | return Err("data mismatch after grow".to_string()); 171 | } 172 | // Remove file 173 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?; 174 | Ok(()) 175 | } 176 | 177 | fn file_rename_check_delete() -> Result<(), String> { 178 | const RENAMED_FILE: &str = "./mnt/testfile_renamed"; 179 | // Create file 180 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?; 181 | file.write_all(b"hello").map_err(|e| format!("write: {e}"))?; 182 | drop(file); 183 | // Rename file 184 | rename(TEST_FILE, RENAMED_FILE).map_err(|e| format!("rename: {e}"))?; 185 | // Check new name exists 186 | metadata(RENAMED_FILE).map_err(|e| format!("metadata: {e}"))?; 187 | // Remove file 188 | remove_file(RENAMED_FILE).map_err(|e| format!("remove: {e}"))?; 189 | Ok(()) 190 | } 191 | 192 | fn symlink_create_read_delete() -> Result<(), String> { 193 | const SYMLINK_PATH: &str = "./mnt/testfile_symlink"; 194 | // Create file to point to 195 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?; 196 | file.write_all(b"symlink target").map_err(|e| format!("write: {e}"))?; 197 | drop(file); 198 | // Create symlink 199 | symlink(TEST_FILE, SYMLINK_PATH).map_err(|e| format!("symlink: {e}"))?; 200 | // Read symlink 201 | let target = fs::read_link(SYMLINK_PATH).map_err(|e| format!("read_link: {e}"))?; 202 | if target != std::path::Path::new(TEST_FILE) { 203 | return Err("symlink target mismatch".to_string()); 204 | } 205 | // Remove symlink 206 | fs::remove_file(SYMLINK_PATH).map_err(|e| format!("remove symlink: {e}"))?; 207 | // Remove target file 208 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?; 209 | Ok(()) 210 | } 211 | 212 | fn file_create_write_read_delete_size(size: usize) -> Result<(), String> { 213 | // Create file 214 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?; 215 | // Write data of given size 216 | let data = vec![55u8; size]; 217 | file.write_all(&data).map_err(|e| format!("write: {e}"))?; 218 | drop(file); 219 | // Read data 220 | let mut file = File::open(TEST_FILE).map_err(|e| format!("open: {e}"))?; 221 | let mut buf = Vec::new(); 222 | file.read_to_end(&mut buf).map_err(|e| format!("read: {e}"))?; 223 | if buf != data { 224 | return Err("data mismatch".to_string()); 225 | } 226 | drop(file); 227 | // Random access write: overwrite 10 random positions with unique values 228 | let mut rng = rand::rngs::StdRng::seed_from_u64(42); 229 | let mut file = std::fs::OpenOptions::new().read(true).write(true).open(TEST_FILE).map_err(|e| format!("open for random write: {e}"))?; 230 | let mut random_indices = vec![]; 231 | for i in 0..10 { 232 | let idx = rng.gen_range(0..size); 233 | random_indices.push(idx); 234 | file.seek(std::io::SeekFrom::Start(idx as u64)).map_err(|e| format!("seek: {e}"))?; 235 | file.write_all(&[i as u8]).map_err(|e| format!("random write: {e}"))?; 236 | } 237 | drop(file); 238 | // Random access read: verify the 10 random positions 239 | let mut file = std::fs::OpenOptions::new().read(true).open(TEST_FILE).map_err(|e| format!("open for random read: {e}"))?; 240 | for (i, &idx) in random_indices.iter().enumerate() { 241 | file.seek(std::io::SeekFrom::Start(idx as u64)).map_err(|e| format!("seek: {e}"))?; 242 | let mut b = [0u8; 1]; 243 | file.read_exact(&mut b).map_err(|e| format!("random read: {e}"))?; 244 | if b[0] != i as u8 { 245 | return Err(format!("random access data mismatch at {idx}: expected {} got {}", i as u8, b[0])); 246 | } 247 | } 248 | drop(file); 249 | // Remove file 250 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?; 251 | Ok(()) 252 | } 253 | 254 | fn file_create_write_read_delete_large() -> Result<(), String> { 255 | // 100MB 256 | file_create_write_read_delete_size(100 * 1024 * 1024) 257 | } 258 | 259 | fn concurrent_file_access() -> Result<(), String> { 260 | let num_threads = 8; 261 | let iterations = 1000; 262 | let barrier = Arc::new(Barrier::new(num_threads)); 263 | // Create file 264 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?; 265 | file.write_all(&[0u8; 4096]).map_err(|e| format!("init write: {e}"))?; 266 | drop(file); 267 | let mut handles = vec![]; 268 | for tid in 0..num_threads { 269 | let barrier = barrier.clone(); 270 | handles.push(thread::spawn(move || { 271 | barrier.wait(); 272 | for i in 0..iterations { 273 | let mut file = OpenOptions::new().read(true).write(true).open(TEST_FILE).map_err(|e| format!("open: {e}"))?; 274 | let pos = ((tid * 512 + i) % 4096) as u64; 275 | file.seek(std::io::SeekFrom::Start(pos)).map_err(|e| format!("seek: {e}"))?; 276 | let val = (tid as u8) ^ (i as u8); 277 | file.write_all(&[val]).map_err(|e| format!("write: {e}"))?; 278 | } 279 | Ok::<(), String>(()) 280 | })); 281 | } 282 | for h in handles { 283 | h.join().map_err(|_| "thread panic".to_string())??; 284 | } 285 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?; 286 | Ok(()) 287 | } 288 | 289 | fn dir_rename_check_delete() -> Result<(), String> { 290 | const DIR1: &str = "./mnt/testdir1"; 291 | const DIR2: &str = "./mnt/testdir2"; 292 | const FILE_IN_DIR: &str = "./mnt/testdir1/file"; 293 | // Create directory and file inside 294 | create_dir(DIR1).map_err(|e| format!("create_dir: {e}"))?; 295 | let mut file = File::create(FILE_IN_DIR).map_err(|e| format!("create file: {e}"))?; 296 | file.write_all(b"dir rename test").map_err(|e| format!("write: {e}"))?; 297 | drop(file); 298 | // Rename directory 299 | rename(DIR1, DIR2).map_err(|e| format!("rename dir: {e}"))?; 300 | // Check file is accessible at new path 301 | let mut file = File::open("./mnt/testdir2/file").map_err(|e| format!("open after rename: {e}"))?; 302 | let mut buf = String::new(); 303 | file.read_to_string(&mut buf).map_err(|e| format!("read: {e}"))?; 304 | if buf != "dir rename test" { 305 | return Err("file content mismatch after dir rename".to_string()); 306 | } 307 | // Remove file and directory 308 | fs::remove_file("./mnt/testdir2/file").map_err(|e| format!("remove file: {e}"))?; 309 | remove_dir(DIR2).map_err(|e| format!("remove dir: {e}"))?; 310 | Ok(()) 311 | } 312 | 313 | fn nested_dir_create_write_read_recursive_delete() -> Result<(), String> { 314 | let dir1 = "./mnt/dir1"; 315 | let dir2 = "./mnt/dir1/dir2"; 316 | let dir3 = "./mnt/dir1/dir2/dir3"; 317 | let file1 = format!("{}/file1", dir1); 318 | let file2 = format!("{}/file2", dir2); 319 | let file3 = format!("{}/file3", dir3); 320 | // Create nested directories 321 | fs::create_dir_all(&dir3).map_err(|e| format!("create_dir_all: {e}"))?; 322 | // Create files at each level 323 | let mut f1 = File::create(&file1).map_err(|e| format!("create file1: {e}"))?; 324 | let mut f2 = File::create(&file2).map_err(|e| format!("create file2: {e}"))?; 325 | let mut f3 = File::create(&file3).map_err(|e| format!("create file3: {e}"))?; 326 | f1.write_all(b"file1 data").map_err(|e| format!("write file1: {e}"))?; 327 | f2.write_all(b"file2 data").map_err(|e| format!("write file2: {e}"))?; 328 | f3.write_all(b"file3 data").map_err(|e| format!("write file3: {e}"))?; 329 | drop((f1, f2, f3)); 330 | // Read back and check 331 | let mut buf = String::new(); 332 | File::open(&file1).map_err(|e| format!("open file1: {e}"))?.read_to_string(&mut buf).map_err(|e| format!("read file1: {e}"))?; 333 | if buf != "file1 data" { return Err("file1 content mismatch".to_string()); } 334 | buf.clear(); 335 | File::open(&file2).map_err(|e| format!("open file2: {e}"))?.read_to_string(&mut buf).map_err(|e| format!("read file2: {e}"))?; 336 | if buf != "file2 data" { return Err("file2 content mismatch".to_string()); } 337 | buf.clear(); 338 | File::open(&file3).map_err(|e| format!("open file3: {e}"))?.read_to_string(&mut buf).map_err(|e| format!("read file3: {e}"))?; 339 | if buf != "file3 data" { return Err("file3 content mismatch".to_string()); } 340 | // Recursively delete top-level directory 341 | fs::remove_dir_all(dir1).map_err(|e| format!("remove_dir_all: {e}"))?; 342 | // Verify all gone 343 | if fs::metadata(dir1).is_ok() || fs::metadata(dir2).is_ok() || fs::metadata(dir3).is_ok() { 344 | return Err("directories not fully deleted".to_string()); 345 | } 346 | if fs::metadata(&file1).is_ok() || fs::metadata(&file2).is_ok() || fs::metadata(&file3).is_ok() { 347 | return Err("files not fully deleted".to_string()); 348 | } 349 | Ok(()) 350 | } 351 | 352 | #[test] 353 | fn integration_stress() { 354 | let providers = [ 355 | ("memory", "MemoryProvider", None), 356 | ("sqlite_simple", "SqliteSimpleProvider", Some("test-sqlite-simple.db")), 357 | ("sqlite_chunked", "SqliteChunkedProvider", Some("test-sqlite-chunked.db")), 358 | ]; 359 | let stress_tests = [ 360 | StressTest { name: "file_create_write_read_delete", func: file_create_write_read_delete, skip_providers: None }, 361 | StressTest { name: "file_create_write_read_delete_large", func: file_create_write_read_delete_large, skip_providers: Some(&["sqlite_simple"]) }, 362 | StressTest { name: "dir_create_list_delete", func: dir_create_list_delete, skip_providers: None }, 363 | StressTest { name: "file_append_read_delete", func: file_append_read_delete, skip_providers: None }, 364 | StressTest { name: "file_truncate_shrink_read_delete", func: file_truncate_shrink_read_delete, skip_providers: None }, 365 | StressTest { name: "file_truncate_grow_read_delete", func: file_truncate_grow_read_delete, skip_providers: None }, 366 | StressTest { name: "file_rename_check_delete", func: file_rename_check_delete, skip_providers: None }, 367 | StressTest { name: "symlink_create_read_delete", func: symlink_create_read_delete, skip_providers: None }, 368 | StressTest { name: "concurrent_file_access", func: concurrent_file_access, skip_providers: None }, 369 | StressTest { name: "dir_rename_check_delete", func: dir_rename_check_delete, skip_providers: None }, 370 | StressTest { name: "nested_dir_create_write_read_recursive_delete", func: nested_dir_create_write_read_recursive_delete, skip_providers: None }, 371 | // Add more tests here 372 | ]; 373 | let mut results = vec![vec![]; stress_tests.len()]; 374 | for (prov_idx, (prov, prov_name, db_path)) in providers.iter().enumerate() { 375 | clean_setup(*db_path); 376 | let mut child = run_fuse_with_provider(prov, *db_path); 377 | wait_for_mount(); 378 | for (test_idx, test) in stress_tests.iter().enumerate() { 379 | // Skip test for this provider if listed 380 | if let Some(skips) = test.skip_providers { 381 | if skips.iter().any(|&s| s == providers[prov_idx].0) { 382 | results[test_idx].push(ProviderTestResult { 383 | elapsed: Duration::from_micros(0), 384 | success: true, 385 | error: None, 386 | }); 387 | continue; 388 | } 389 | } 390 | println!("running test: {} with provider: {}", test.name, prov_name); 391 | let start = Instant::now(); 392 | let (success, error) = match (test.func)() { 393 | Ok(_) => (true, None), 394 | Err(e) => (false, Some(e)), 395 | }; 396 | let elapsed = start.elapsed(); 397 | results[test_idx].push(ProviderTestResult { 398 | elapsed, 399 | success, 400 | error, 401 | }); 402 | } 403 | unsafe { 404 | libc::kill(child.id() as i32, libc::SIGINT); 405 | } 406 | let _ = child.wait(); 407 | wait_for_unmount(); 408 | } 409 | // Print summary table 410 | let mut table = Table::new(); 411 | let mut header = vec!["operation".to_string()]; 412 | for (_, prov_name, _) in providers.iter() { 413 | header.push(format!("{} (μs)", prov_name)); 414 | } 415 | table.add_row(Row::new(header.iter().map(|s| Cell::new(s)).collect())); 416 | for (test_idx, test) in stress_tests.iter().enumerate() { 417 | let mut cells = vec![test.name.to_string()]; 418 | // Collect all elapsed times for this test row (only successful and not skipped ones) 419 | let times: Vec> = results[test_idx] 420 | .iter() 421 | .enumerate() 422 | .map(|(prov_idx, r)| { 423 | // Check if this test was skipped for this provider 424 | if let Some(skips) = test.skip_providers { 425 | if skips.iter().any(|&s| s == providers[prov_idx].0) { 426 | return None; 427 | } 428 | } 429 | if r.success { 430 | Some(r.elapsed.as_micros()) 431 | } else { 432 | None 433 | } 434 | }) 435 | .collect(); 436 | // Find the minimum time (ignore failures and skips) 437 | let min_time = times.iter().filter_map(|&t| t).min().unwrap_or(0); 438 | for (prov_idx, (_, _prov_name, _)) in providers.iter().enumerate() { 439 | // Check if this test was skipped for this provider 440 | if let Some(skips) = test.skip_providers { 441 | if skips.iter().any(|&s| s == providers[prov_idx].0) { 442 | cells.push("(skipped)".to_string()); 443 | continue; 444 | } 445 | } 446 | let r = &results[test_idx][prov_idx]; 447 | if r.success { 448 | let t = r.elapsed.as_micros(); 449 | if t == min_time && min_time > 0 { 450 | cells.push(format!("{}", t)); 451 | } else if min_time > 0 { 452 | let percent = ((t as f64 - min_time as f64) / min_time as f64 * 100.0).round() as i64; 453 | cells.push(format!("{} (+{}%)", t, percent)); 454 | } else { 455 | cells.push(format!("{}", t)); 456 | } 457 | } else { 458 | cells.push("\u{274C}".to_string()); 459 | } 460 | } 461 | table.add_row(Row::new(cells.iter().map(|s| Cell::new(s)).collect())); 462 | } 463 | table.printstd(); 464 | 465 | // Print failure details table 466 | let mut failure_table = Table::new(); 467 | failure_table.add_row(Row::new(vec![Cell::new("test"), Cell::new("provider"), Cell::new("reason")])); 468 | for (test_idx, test) in stress_tests.iter().enumerate() { 469 | for (_prov_idx, (_, _prov_name, _)) in providers.iter().enumerate() { 470 | let r = &results[test_idx][_prov_idx]; 471 | if !r.success { 472 | failure_table.add_row(Row::new(vec![ 473 | Cell::new(test.name), 474 | Cell::new(_prov_name), 475 | Cell::new(r.error.as_deref().unwrap_or("unknown error")), 476 | ])); 477 | } 478 | } 479 | } 480 | if failure_table.len() > 1 { 481 | println!("\nFailure details:"); 482 | failure_table.printstd(); 483 | } 484 | assert!(results.iter().enumerate().all(|(test_idx, row)| { 485 | row.iter().enumerate().all(|(prov_idx, r)| { 486 | // If test is skipped for this provider, treat as success 487 | if let Some(skips) = stress_tests[test_idx].skip_providers { 488 | if skips.iter().any(|&s| s == providers[prov_idx].0) { 489 | return true; 490 | } 491 | } 492 | r.success 493 | }) 494 | }), "Some providers failed"); 495 | 496 | // Final cleanup: remove test DBs if present 497 | let _ = std::fs::remove_file("test-sqlite-simple.db"); 498 | let _ = std::fs::remove_file("test-sqlite-chunked.db"); 499 | } -------------------------------------------------------------------------------- /packages/dofs-rust-client/src/providers/sqlite_simple.rs: -------------------------------------------------------------------------------- 1 | use rusqlite::{params, Connection, Result, OptionalExtension}; 2 | use std::time::SystemTime; 3 | use fuser; 4 | use crate::providers::Provider; 5 | use serde::{Serialize, Deserialize}; 6 | use std::ffi::OsStr; 7 | 8 | const ROOT_INODE: u64 = 1; 9 | const USER_INODE_START: u64 = 10; // user files/dirs start here to avoid reserved inodes 10 | 11 | #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)] 12 | enum FileTypeRepr { 13 | RegularFile, 14 | Directory, 15 | Symlink, 16 | BlockDevice, 17 | CharDevice, 18 | NamedPipe, 19 | Socket, 20 | } 21 | 22 | impl From for FileTypeRepr { 23 | fn from(ft: fuser::FileType) -> Self { 24 | match ft { 25 | fuser::FileType::RegularFile => FileTypeRepr::RegularFile, 26 | fuser::FileType::Directory => FileTypeRepr::Directory, 27 | fuser::FileType::Symlink => FileTypeRepr::Symlink, 28 | fuser::FileType::BlockDevice => FileTypeRepr::BlockDevice, 29 | fuser::FileType::CharDevice => FileTypeRepr::CharDevice, 30 | fuser::FileType::NamedPipe => FileTypeRepr::NamedPipe, 31 | fuser::FileType::Socket => FileTypeRepr::Socket, 32 | } 33 | } 34 | } 35 | 36 | impl From for fuser::FileType { 37 | fn from(ft: FileTypeRepr) -> Self { 38 | match ft { 39 | FileTypeRepr::RegularFile => fuser::FileType::RegularFile, 40 | FileTypeRepr::Directory => fuser::FileType::Directory, 41 | FileTypeRepr::Symlink => fuser::FileType::Symlink, 42 | FileTypeRepr::BlockDevice => fuser::FileType::BlockDevice, 43 | FileTypeRepr::CharDevice => fuser::FileType::CharDevice, 44 | FileTypeRepr::NamedPipe => fuser::FileType::NamedPipe, 45 | FileTypeRepr::Socket => fuser::FileType::Socket, 46 | } 47 | } 48 | } 49 | 50 | #[derive(Serialize, Deserialize, Debug, Clone)] 51 | struct SerializableFileAttr { 52 | ino: u64, 53 | size: u64, 54 | blocks: u64, 55 | atime: SystemTime, 56 | mtime: SystemTime, 57 | ctime: SystemTime, 58 | crtime: SystemTime, 59 | kind: FileTypeRepr, 60 | perm: u16, 61 | nlink: u32, 62 | uid: u32, 63 | gid: u32, 64 | rdev: u32, 65 | flags: u32, 66 | blksize: u32, 67 | } 68 | 69 | impl From<&fuser::FileAttr> for SerializableFileAttr { 70 | fn from(attr: &fuser::FileAttr) -> Self { 71 | SerializableFileAttr { 72 | ino: attr.ino, 73 | size: attr.size, 74 | blocks: attr.blocks, 75 | atime: attr.atime, 76 | mtime: attr.mtime, 77 | ctime: attr.ctime, 78 | crtime: attr.crtime, 79 | kind: FileTypeRepr::from(attr.kind), 80 | perm: attr.perm, 81 | nlink: attr.nlink, 82 | uid: attr.uid, 83 | gid: attr.gid, 84 | rdev: attr.rdev, 85 | flags: attr.flags, 86 | blksize: attr.blksize, 87 | } 88 | } 89 | } 90 | 91 | impl From<&SerializableFileAttr> for fuser::FileAttr { 92 | fn from(attr: &SerializableFileAttr) -> Self { 93 | // Ensure timestamps are within valid range to prevent overflow 94 | let now = SystemTime::now(); 95 | let safe_time = |t: SystemTime| -> SystemTime { 96 | // If timestamp is more than 100 years in the future, use current time 97 | if let Ok(duration_since_epoch) = t.duration_since(std::time::UNIX_EPOCH) { 98 | if duration_since_epoch.as_secs() > now.duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs() + (100 * 365 * 24 * 3600) { 99 | now 100 | } else { 101 | t 102 | } 103 | } else { 104 | // If before epoch, use epoch 105 | std::time::UNIX_EPOCH 106 | } 107 | }; 108 | 109 | fuser::FileAttr { 110 | ino: attr.ino, 111 | size: attr.size, 112 | blocks: attr.blocks, 113 | atime: safe_time(attr.atime), 114 | mtime: safe_time(attr.mtime), 115 | ctime: safe_time(attr.ctime), 116 | crtime: safe_time(attr.crtime), 117 | kind: fuser::FileType::from(attr.kind), 118 | perm: attr.perm, 119 | nlink: attr.nlink, 120 | uid: attr.uid, 121 | gid: attr.gid, 122 | rdev: attr.rdev, 123 | flags: attr.flags, 124 | blksize: attr.blksize, 125 | } 126 | } 127 | } 128 | 129 | pub struct SqliteProvider { 130 | conn: Connection, 131 | next_inode: u64, 132 | pub osx_mode: bool, 133 | } 134 | 135 | impl SqliteProvider { 136 | #[allow(dead_code)] 137 | pub fn new(db_path: &str) -> Result { 138 | Self::new_with_mode(db_path, false) 139 | } 140 | pub fn new_with_mode(db_path: &str, osx_mode: bool) -> Result { 141 | let conn = Connection::open(db_path)?; 142 | conn.execute_batch( 143 | "CREATE TABLE IF NOT EXISTS files ( 144 | ino INTEGER PRIMARY KEY, 145 | name TEXT NOT NULL, 146 | parent INTEGER, 147 | is_dir INTEGER NOT NULL, 148 | data BLOB, 149 | attr BLOB 150 | ); 151 | CREATE INDEX IF NOT EXISTS idx_files_parent_name ON files(parent, name); 152 | CREATE INDEX IF NOT EXISTS idx_files_parent ON files(parent); 153 | CREATE INDEX IF NOT EXISTS idx_files_name ON files(name);" 154 | )?; 155 | // Ensure root exists 156 | { 157 | let mut stmt = conn.prepare("SELECT COUNT(*) FROM files WHERE ino = ?1")?; 158 | let count: i64 = stmt.query_row(params![ROOT_INODE], |row| row.get(0))?; 159 | if count == 0 { 160 | let now = SystemTime::now(); 161 | let attr = fuser::FileAttr { 162 | ino: ROOT_INODE, 163 | size: 0, 164 | blocks: 0, 165 | atime: now, 166 | mtime: now, 167 | ctime: now, 168 | crtime: now, 169 | kind: fuser::FileType::Directory, 170 | perm: 0o755, 171 | nlink: 2, 172 | uid: unsafe { libc::geteuid() }, 173 | gid: unsafe { libc::getegid() }, 174 | rdev: 0, 175 | flags: 0, 176 | blksize: 512, 177 | }; 178 | let attr_bytes = bincode::serialize(&SerializableFileAttr::from(&attr)).unwrap(); 179 | conn.execute( 180 | "INSERT INTO files (ino, name, parent, is_dir, data, attr) VALUES (?1, ?2, ?3, ?4, ?5, ?6)", 181 | params![ROOT_INODE, "/", None::, 1, None::>, attr_bytes], 182 | )?; 183 | } 184 | } 185 | // Find max inode 186 | let mut next_inode: u64 = conn.query_row( 187 | "SELECT MAX(ino) FROM files", 188 | [], 189 | |row| row.get::<_, Option>(0), 190 | )?.unwrap_or(ROOT_INODE); 191 | if next_inode < USER_INODE_START { 192 | next_inode = USER_INODE_START; 193 | } else { 194 | next_inode += 1; 195 | } 196 | Ok(Self { conn, next_inode, osx_mode }) 197 | } 198 | fn alloc_inode(&mut self) -> u64 { 199 | let ino = self.next_inode; 200 | self.next_inode += 1; 201 | ino 202 | } 203 | fn get_attr(&self, ino: u64) -> Option { 204 | self.conn.query_row( 205 | "SELECT attr FROM files WHERE ino = ?1", 206 | params![ino], 207 | |row| { 208 | let attr_blob: Vec = row.get(0)?; 209 | let ser_attr: crate::providers::sqlite_simple::SerializableFileAttr = bincode::deserialize(&attr_blob).unwrap(); 210 | Ok(fuser::FileAttr::from(&ser_attr)) 211 | }, 212 | ).optional().unwrap_or(None) 213 | } 214 | fn set_attr(&self, ino: u64, attr: &fuser::FileAttr) { 215 | let attr_bytes = bincode::serialize(&SerializableFileAttr::from(attr)).unwrap(); 216 | let _ = self.conn.execute( 217 | "UPDATE files SET attr = ?1 WHERE ino = ?2", 218 | params![attr_bytes, ino], 219 | ); 220 | } 221 | fn get_file_data(&self, ino: u64) -> Option> { 222 | self.conn.query_row( 223 | "SELECT data FROM files WHERE ino = ?1", 224 | params![ino], 225 | |row| row.get(0), 226 | ).optional().unwrap_or(None) 227 | } 228 | fn set_file_data(&self, ino: u64, data: &[u8]) { 229 | let _ = self.conn.execute( 230 | "UPDATE files SET data = ?1 WHERE ino = ?2", 231 | params![data, ino], 232 | ); 233 | } 234 | fn get_child_ino(&self, parent: u64, name: &str) -> Option { 235 | self.conn.query_row( 236 | "SELECT ino FROM files WHERE parent = ?1 AND name = ?2", 237 | params![parent, name], 238 | |row| row.get(0), 239 | ).optional().unwrap_or(None) 240 | } 241 | fn is_dir_empty(&self, ino: u64) -> bool { 242 | let count: i64 = self.conn.query_row( 243 | "SELECT COUNT(*) FROM files WHERE parent = ?1", 244 | params![ino], 245 | |row| row.get(0), 246 | ).unwrap_or(0); 247 | count == 0 248 | } 249 | } 250 | 251 | impl Provider for SqliteProvider { 252 | fn rmdir(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty) { 253 | let name_str = name.to_str().unwrap_or(""); 254 | let target_ino = self.get_child_ino(parent, name_str); 255 | let ino = match target_ino { 256 | Some(ino) => ino, 257 | None => { reply.error(libc::ENOENT); return; } 258 | }; 259 | if !self.is_dir_empty(ino) { 260 | reply.error(libc::ENOTEMPTY); return; 261 | } 262 | let _ = self.conn.execute("DELETE FROM files WHERE ino = ?1", params![ino]); 263 | let _ = self.conn.execute("DELETE FROM files WHERE parent = ?1 AND name = ?2", params![parent, name_str]); 264 | reply.ok(); 265 | } 266 | fn open(&mut self, ino: u64, reply: fuser::ReplyOpen) { 267 | if self.get_attr(ino).is_some() { 268 | reply.opened(0, 0); 269 | } else { 270 | reply.error(libc::ENOENT); 271 | } 272 | } 273 | fn flush(&mut self, ino: u64, reply: fuser::ReplyEmpty) { 274 | if self.get_attr(ino).is_some() { 275 | reply.ok(); 276 | } else { 277 | reply.error(libc::ENOENT); 278 | } 279 | } 280 | fn release(&mut self, ino: u64, reply: fuser::ReplyEmpty) { 281 | if self.get_attr(ino).is_some() { 282 | reply.ok(); 283 | } else { 284 | reply.error(libc::ENOENT); 285 | } 286 | } 287 | fn setattr(&mut self, ino: u64, mode: Option, uid: Option, gid: Option, size: Option, atime: Option, mtime: Option, ctime: Option, crtime: Option, flags: Option, reply: fuser::ReplyAttr) { 288 | fn timeornow_to_systemtime(t: fuser::TimeOrNow) -> SystemTime { 289 | match t { 290 | fuser::TimeOrNow::SpecificTime(st) => st, 291 | fuser::TimeOrNow::Now => SystemTime::now(), 292 | } 293 | } 294 | fn safe_systemtime(t: SystemTime) -> SystemTime { 295 | // Ensure timestamp is within valid range 296 | let now = SystemTime::now(); 297 | if let Ok(duration_since_epoch) = t.duration_since(std::time::UNIX_EPOCH) { 298 | if duration_since_epoch.as_secs() > now.duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs() + (100 * 365 * 24 * 3600) { 299 | now 300 | } else { 301 | t 302 | } 303 | } else { 304 | std::time::UNIX_EPOCH 305 | } 306 | } 307 | if let Some(mut attr) = self.get_attr(ino) { 308 | if let Some(m) = mode { attr.perm = m as u16; } 309 | if let Some(u) = uid { attr.uid = u; } 310 | if let Some(g) = gid { attr.gid = g; } 311 | if let Some(a) = atime { attr.atime = timeornow_to_systemtime(a); } 312 | if let Some(m) = mtime { attr.mtime = timeornow_to_systemtime(m); } 313 | if let Some(c) = ctime { attr.ctime = safe_systemtime(c); } 314 | if let Some(cr) = crtime { attr.crtime = safe_systemtime(cr); } 315 | if let Some(fg) = flags { attr.flags = fg; } 316 | if let Some(new_size) = size { 317 | let mut data = self.get_file_data(ino).unwrap_or_default(); 318 | data.resize(new_size as usize, 0); 319 | self.set_file_data(ino, &data); 320 | attr.size = new_size; 321 | } 322 | self.set_attr(ino, &attr); 323 | reply.attr(&std::time::Duration::from_secs(1), &attr); 324 | } else { 325 | reply.error(libc::ENOENT); 326 | } 327 | } 328 | fn lookup(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEntry) { 329 | let name = name.to_str().unwrap_or(""); 330 | let ino = self.get_child_ino(parent, name); 331 | if let Some(ino) = ino { 332 | if let Some(attr) = self.get_attr(ino) { 333 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0); 334 | return; 335 | } 336 | } 337 | reply.error(libc::ENOENT); 338 | } 339 | fn getattr(&mut self, ino: u64, reply: fuser::ReplyAttr) { 340 | if let Some(attr) = self.get_attr(ino) { 341 | reply.attr(&std::time::Duration::from_secs(1), &attr); 342 | } else { 343 | reply.error(libc::ENOENT); 344 | } 345 | } 346 | fn readdir(&mut self, ino: u64, offset: i64, mut reply: fuser::ReplyDirectory) { 347 | let mut entries = vec![(ROOT_INODE, fuser::FileType::Directory, ".".to_string()), (ROOT_INODE, fuser::FileType::Directory, "..".to_string())]; 348 | let mut stmt = self.conn.prepare("SELECT ino, name, is_dir, attr FROM files WHERE parent = ?1").unwrap(); 349 | let rows = stmt.query_map(params![ino], |row| { 350 | let ino: u64 = row.get(0)?; 351 | let name: String = row.get(1)?; 352 | let is_dir: i64 = row.get(2)?; 353 | let attr_blob: Vec = row.get(3)?; 354 | let ser_attr: SerializableFileAttr = bincode::deserialize(&attr_blob).unwrap(); 355 | let kind = fuser::FileType::from(ser_attr.kind); 356 | Ok((ino, kind, name)) 357 | }).unwrap(); 358 | for row in rows { 359 | let (ino, kind, name) = row.unwrap(); 360 | if self.osx_mode && name.starts_with("._") { 361 | continue; 362 | } 363 | entries.push((ino, kind, name)); 364 | } 365 | for (i, (ino, kind, name)) in entries.into_iter().enumerate().skip(offset as usize) { 366 | if reply.add(ino, (i + 1) as i64, kind, name) { 367 | break; 368 | } 369 | } 370 | reply.ok(); 371 | } 372 | fn mkdir(&mut self, parent: u64, name: &OsStr, mode: u32, umask: u32, reply: fuser::ReplyEntry) { 373 | let name_str = name.to_str().unwrap_or(""); 374 | if self.osx_mode && name_str.starts_with("._") { 375 | reply.error(libc::EACCES); 376 | return; 377 | } 378 | if self.get_child_ino(parent, name_str).is_some() { 379 | reply.error(libc::EEXIST); return; 380 | } 381 | let ino = self.alloc_inode(); 382 | let now = SystemTime::now(); 383 | let attr = fuser::FileAttr { 384 | ino, 385 | size: 0, 386 | blocks: 0, 387 | atime: now, 388 | mtime: now, 389 | ctime: now, 390 | crtime: now, 391 | kind: fuser::FileType::Directory, 392 | perm: (mode & !umask & 0o7777) as u16, 393 | nlink: 2, 394 | uid: unsafe { libc::geteuid() }, 395 | gid: unsafe { libc::getegid() }, 396 | rdev: 0, 397 | flags: 0, 398 | blksize: 512, 399 | }; 400 | let attr_bytes = bincode::serialize(&SerializableFileAttr::from(&attr)).unwrap(); 401 | let _ = self.conn.execute( 402 | "INSERT INTO files (ino, name, parent, is_dir, data, attr) VALUES (?1, ?2, ?3, ?4, ?5, ?6)", 403 | params![ino, name_str, parent, 1, None::>, attr_bytes], 404 | ); 405 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0); 406 | } 407 | fn create(&mut self, parent: u64, name: &OsStr, mode: u32, _flags: u32, umask: i32, reply: fuser::ReplyCreate) { 408 | let name_str = name.to_str().unwrap_or(""); 409 | if self.osx_mode && name_str.starts_with("._") { 410 | reply.error(libc::EACCES); 411 | return; 412 | } 413 | if self.get_child_ino(parent, name_str).is_some() { 414 | reply.error(libc::EEXIST); return; 415 | } 416 | let ino = self.alloc_inode(); 417 | let now = SystemTime::now(); 418 | let attr = fuser::FileAttr { 419 | ino, 420 | size: 0, 421 | blocks: 0, 422 | atime: now, 423 | mtime: now, 424 | ctime: now, 425 | crtime: now, 426 | kind: fuser::FileType::RegularFile, 427 | perm: (mode & !(umask as u32) & 0o7777) as u16, 428 | nlink: 1, 429 | uid: unsafe { libc::geteuid() }, 430 | gid: unsafe { libc::getegid() }, 431 | rdev: 0, 432 | flags: 0, 433 | blksize: 512, 434 | }; 435 | let attr_bytes = bincode::serialize(&SerializableFileAttr::from(&attr)).unwrap(); 436 | let _ = self.conn.execute( 437 | "INSERT INTO files (ino, name, parent, is_dir, data, attr) VALUES (?1, ?2, ?3, ?4, ?5, ?6)", 438 | params![ino, name_str, parent, 0, Vec::::new(), attr_bytes], 439 | ); 440 | reply.created(&std::time::Duration::from_secs(1), &attr, 0, 0, 0); 441 | } 442 | fn read(&mut self, ino: u64, offset: i64, size: u32, reply: fuser::ReplyData) { 443 | if let Some(data) = self.get_file_data(ino) { 444 | let end = std::cmp::min((offset as usize) + (size as usize), data.len()); 445 | let start = std::cmp::min(offset as usize, data.len()); 446 | reply.data(&data[start..end]); 447 | } else { 448 | reply.error(libc::ENOENT); 449 | } 450 | } 451 | fn write(&mut self, ino: u64, offset: i64, data: &[u8], reply: fuser::ReplyWrite) { 452 | if let Some(mut file_data) = self.get_file_data(ino) { 453 | let offset = offset as usize; 454 | if file_data.len() < offset + data.len() { 455 | file_data.resize(offset + data.len(), 0); 456 | } 457 | file_data[offset..offset + data.len()].copy_from_slice(data); 458 | self.set_file_data(ino, &file_data); 459 | if let Some(mut attr) = self.get_attr(ino) { 460 | attr.size = file_data.len() as u64; 461 | self.set_attr(ino, &attr); 462 | } 463 | reply.written(data.len() as u32); 464 | } else { 465 | reply.error(libc::ENOENT); 466 | } 467 | } 468 | fn unlink(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty) { 469 | let name_str = name.to_str().unwrap_or(""); 470 | let target_ino = self.get_child_ino(parent, name_str); 471 | let ino = match target_ino { 472 | Some(ino) => ino, 473 | None => { reply.error(libc::ENOENT); return; } 474 | }; 475 | let _ = self.conn.execute("DELETE FROM files WHERE ino = ?1", params![ino]); 476 | reply.ok(); 477 | } 478 | fn rename(&mut self, parent: u64, name: &OsStr, newparent: u64, newname: &OsStr, _flags: u32, reply: fuser::ReplyEmpty) { 479 | let name_str = name.to_str().unwrap_or(""); 480 | let newname_str = newname.to_str().unwrap_or(""); 481 | // Find the inode to move 482 | let ino = match self.get_child_ino(parent, name_str) { 483 | Some(ino) => ino, 484 | None => { reply.error(libc::ENOENT); return; } 485 | }; 486 | // If destination exists, remove it (file or empty dir) 487 | if let Some(dest_ino) = self.get_child_ino(newparent, newname_str) { 488 | // Check if it's a directory and not empty 489 | if let Some(attr) = self.get_attr(dest_ino) { 490 | if attr.kind == fuser::FileType::Directory && !self.is_dir_empty(dest_ino) { 491 | reply.error(libc::ENOTEMPTY); 492 | return; 493 | } 494 | } 495 | let _ = self.conn.execute("DELETE FROM files WHERE ino = ?1", params![dest_ino]); 496 | } 497 | // Update the file's parent and name 498 | let res = self.conn.execute( 499 | "UPDATE files SET parent = ?1, name = ?2 WHERE ino = ?3", 500 | params![newparent, newname_str, ino], 501 | ); 502 | if res.is_ok() { 503 | // Remove the old name entry if parent/name changed 504 | let _ = self.conn.execute( 505 | "DELETE FROM files WHERE parent = ?1 AND name = ?2 AND ino != ?3", 506 | params![parent, name_str, ino], 507 | ); 508 | reply.ok(); 509 | } else { 510 | reply.error(libc::EIO); 511 | } 512 | } 513 | fn symlink(&mut self, parent: u64, name: &OsStr, link: &std::path::Path, reply: fuser::ReplyEntry) { 514 | let name_str = name.to_str().unwrap_or(""); 515 | if self.osx_mode && name_str.starts_with("._") { 516 | reply.error(libc::EACCES); 517 | return; 518 | } 519 | if self.get_child_ino(parent, name_str).is_some() { 520 | reply.error(libc::EEXIST); return; 521 | } 522 | let ino = self.alloc_inode(); 523 | let now = SystemTime::now(); 524 | let target = link.to_string_lossy().to_string().into_bytes(); 525 | let attr = fuser::FileAttr { 526 | ino, 527 | size: target.len() as u64, 528 | blocks: 0, 529 | atime: now, 530 | mtime: now, 531 | ctime: now, 532 | crtime: now, 533 | kind: fuser::FileType::Symlink, 534 | perm: 0o777, 535 | nlink: 1, 536 | uid: unsafe { libc::geteuid() }, 537 | gid: unsafe { libc::getegid() }, 538 | rdev: 0, 539 | flags: 0, 540 | blksize: 512, 541 | }; 542 | let attr_bytes = bincode::serialize(&SerializableFileAttr::from(&attr)).unwrap(); 543 | let _ = self.conn.execute( 544 | "INSERT INTO files (ino, name, parent, is_dir, data, attr) VALUES (?1, ?2, ?3, ?4, ?5, ?6)", 545 | params![ino, name_str, parent, 0, target, attr_bytes], 546 | ); 547 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0); 548 | } 549 | fn readlink(&mut self, ino: u64, reply: fuser::ReplyData) { 550 | let attr = self.get_attr(ino); 551 | if let Some(attr) = attr { 552 | if attr.kind == fuser::FileType::Symlink { 553 | if let Some(data) = self.get_file_data(ino) { 554 | reply.data(&data); 555 | return; 556 | } 557 | } 558 | } 559 | reply.error(libc::EINVAL); 560 | } 561 | } -------------------------------------------------------------------------------- /packages/dofs-rust-client/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 4 4 | 5 | [[package]] 6 | name = "ahash" 7 | version = "0.8.11" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" 10 | dependencies = [ 11 | "cfg-if", 12 | "once_cell", 13 | "version_check", 14 | "zerocopy 0.7.35", 15 | ] 16 | 17 | [[package]] 18 | name = "anstream" 19 | version = "0.6.18" 20 | source = "registry+https://github.com/rust-lang/crates.io-index" 21 | checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" 22 | dependencies = [ 23 | "anstyle", 24 | "anstyle-parse", 25 | "anstyle-query", 26 | "anstyle-wincon", 27 | "colorchoice", 28 | "is_terminal_polyfill", 29 | "utf8parse", 30 | ] 31 | 32 | [[package]] 33 | name = "anstyle" 34 | version = "1.0.10" 35 | source = "registry+https://github.com/rust-lang/crates.io-index" 36 | checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" 37 | 38 | [[package]] 39 | name = "anstyle-parse" 40 | version = "0.2.6" 41 | source = "registry+https://github.com/rust-lang/crates.io-index" 42 | checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" 43 | dependencies = [ 44 | "utf8parse", 45 | ] 46 | 47 | [[package]] 48 | name = "anstyle-query" 49 | version = "1.1.2" 50 | source = "registry+https://github.com/rust-lang/crates.io-index" 51 | checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" 52 | dependencies = [ 53 | "windows-sys", 54 | ] 55 | 56 | [[package]] 57 | name = "anstyle-wincon" 58 | version = "3.0.7" 59 | source = "registry+https://github.com/rust-lang/crates.io-index" 60 | checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" 61 | dependencies = [ 62 | "anstyle", 63 | "once_cell", 64 | "windows-sys", 65 | ] 66 | 67 | [[package]] 68 | name = "bincode" 69 | version = "1.3.3" 70 | source = "registry+https://github.com/rust-lang/crates.io-index" 71 | checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" 72 | dependencies = [ 73 | "serde", 74 | ] 75 | 76 | [[package]] 77 | name = "bitflags" 78 | version = "2.9.0" 79 | source = "registry+https://github.com/rust-lang/crates.io-index" 80 | checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" 81 | 82 | [[package]] 83 | name = "byteorder" 84 | version = "1.5.0" 85 | source = "registry+https://github.com/rust-lang/crates.io-index" 86 | checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" 87 | 88 | [[package]] 89 | name = "cf-fuse" 90 | version = "0.1.0" 91 | dependencies = [ 92 | "bincode", 93 | "clap", 94 | "ctrlc", 95 | "filetime", 96 | "fuser", 97 | "libc", 98 | "log", 99 | "prettytable-rs", 100 | "rand", 101 | "rusqlite", 102 | "serde", 103 | "simplelog", 104 | ] 105 | 106 | [[package]] 107 | name = "cfg-if" 108 | version = "1.0.0" 109 | source = "registry+https://github.com/rust-lang/crates.io-index" 110 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 111 | 112 | [[package]] 113 | name = "cfg_aliases" 114 | version = "0.2.1" 115 | source = "registry+https://github.com/rust-lang/crates.io-index" 116 | checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" 117 | 118 | [[package]] 119 | name = "clap" 120 | version = "4.5.37" 121 | source = "registry+https://github.com/rust-lang/crates.io-index" 122 | checksum = "eccb054f56cbd38340b380d4a8e69ef1f02f1af43db2f0cc817a4774d80ae071" 123 | dependencies = [ 124 | "clap_builder", 125 | "clap_derive", 126 | ] 127 | 128 | [[package]] 129 | name = "clap_builder" 130 | version = "4.5.37" 131 | source = "registry+https://github.com/rust-lang/crates.io-index" 132 | checksum = "efd9466fac8543255d3b1fcad4762c5e116ffe808c8a3043d4263cd4fd4862a2" 133 | dependencies = [ 134 | "anstream", 135 | "anstyle", 136 | "clap_lex", 137 | "strsim", 138 | ] 139 | 140 | [[package]] 141 | name = "clap_derive" 142 | version = "4.5.32" 143 | source = "registry+https://github.com/rust-lang/crates.io-index" 144 | checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" 145 | dependencies = [ 146 | "heck", 147 | "proc-macro2", 148 | "quote", 149 | "syn", 150 | ] 151 | 152 | [[package]] 153 | name = "clap_lex" 154 | version = "0.7.4" 155 | source = "registry+https://github.com/rust-lang/crates.io-index" 156 | checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" 157 | 158 | [[package]] 159 | name = "colorchoice" 160 | version = "1.0.3" 161 | source = "registry+https://github.com/rust-lang/crates.io-index" 162 | checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" 163 | 164 | [[package]] 165 | name = "csv" 166 | version = "1.3.1" 167 | source = "registry+https://github.com/rust-lang/crates.io-index" 168 | checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" 169 | dependencies = [ 170 | "csv-core", 171 | "itoa", 172 | "ryu", 173 | "serde", 174 | ] 175 | 176 | [[package]] 177 | name = "csv-core" 178 | version = "0.1.12" 179 | source = "registry+https://github.com/rust-lang/crates.io-index" 180 | checksum = "7d02f3b0da4c6504f86e9cd789d8dbafab48c2321be74e9987593de5a894d93d" 181 | dependencies = [ 182 | "memchr", 183 | ] 184 | 185 | [[package]] 186 | name = "ctrlc" 187 | version = "3.4.6" 188 | source = "registry+https://github.com/rust-lang/crates.io-index" 189 | checksum = "697b5419f348fd5ae2478e8018cb016c00a5881c7f46c717de98ffd135a5651c" 190 | dependencies = [ 191 | "nix", 192 | "windows-sys", 193 | ] 194 | 195 | [[package]] 196 | name = "deranged" 197 | version = "0.4.0" 198 | source = "registry+https://github.com/rust-lang/crates.io-index" 199 | checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" 200 | dependencies = [ 201 | "powerfmt", 202 | ] 203 | 204 | [[package]] 205 | name = "dirs-next" 206 | version = "2.0.0" 207 | source = "registry+https://github.com/rust-lang/crates.io-index" 208 | checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" 209 | dependencies = [ 210 | "cfg-if", 211 | "dirs-sys-next", 212 | ] 213 | 214 | [[package]] 215 | name = "dirs-sys-next" 216 | version = "0.1.2" 217 | source = "registry+https://github.com/rust-lang/crates.io-index" 218 | checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" 219 | dependencies = [ 220 | "libc", 221 | "redox_users", 222 | "winapi", 223 | ] 224 | 225 | [[package]] 226 | name = "encode_unicode" 227 | version = "1.0.0" 228 | source = "registry+https://github.com/rust-lang/crates.io-index" 229 | checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" 230 | 231 | [[package]] 232 | name = "fallible-iterator" 233 | version = "0.3.0" 234 | source = "registry+https://github.com/rust-lang/crates.io-index" 235 | checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" 236 | 237 | [[package]] 238 | name = "fallible-streaming-iterator" 239 | version = "0.1.9" 240 | source = "registry+https://github.com/rust-lang/crates.io-index" 241 | checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" 242 | 243 | [[package]] 244 | name = "filetime" 245 | version = "0.2.25" 246 | source = "registry+https://github.com/rust-lang/crates.io-index" 247 | checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" 248 | dependencies = [ 249 | "cfg-if", 250 | "libc", 251 | "libredox", 252 | "windows-sys", 253 | ] 254 | 255 | [[package]] 256 | name = "fuser" 257 | version = "0.14.0" 258 | source = "registry+https://github.com/rust-lang/crates.io-index" 259 | checksum = "2e697f6f62c20b6fad1ba0f84ae909f25971cf16e735273524e3977c94604cf8" 260 | dependencies = [ 261 | "libc", 262 | "log", 263 | "memchr", 264 | "page_size", 265 | "pkg-config", 266 | "smallvec", 267 | "zerocopy 0.7.35", 268 | ] 269 | 270 | [[package]] 271 | name = "getrandom" 272 | version = "0.2.16" 273 | source = "registry+https://github.com/rust-lang/crates.io-index" 274 | checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" 275 | dependencies = [ 276 | "cfg-if", 277 | "libc", 278 | "wasi", 279 | ] 280 | 281 | [[package]] 282 | name = "hashbrown" 283 | version = "0.14.5" 284 | source = "registry+https://github.com/rust-lang/crates.io-index" 285 | checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" 286 | dependencies = [ 287 | "ahash", 288 | ] 289 | 290 | [[package]] 291 | name = "hashlink" 292 | version = "0.9.1" 293 | source = "registry+https://github.com/rust-lang/crates.io-index" 294 | checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" 295 | dependencies = [ 296 | "hashbrown", 297 | ] 298 | 299 | [[package]] 300 | name = "heck" 301 | version = "0.5.0" 302 | source = "registry+https://github.com/rust-lang/crates.io-index" 303 | checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" 304 | 305 | [[package]] 306 | name = "hermit-abi" 307 | version = "0.5.0" 308 | source = "registry+https://github.com/rust-lang/crates.io-index" 309 | checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" 310 | 311 | [[package]] 312 | name = "is-terminal" 313 | version = "0.4.16" 314 | source = "registry+https://github.com/rust-lang/crates.io-index" 315 | checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" 316 | dependencies = [ 317 | "hermit-abi", 318 | "libc", 319 | "windows-sys", 320 | ] 321 | 322 | [[package]] 323 | name = "is_terminal_polyfill" 324 | version = "1.70.1" 325 | source = "registry+https://github.com/rust-lang/crates.io-index" 326 | checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" 327 | 328 | [[package]] 329 | name = "itoa" 330 | version = "1.0.15" 331 | source = "registry+https://github.com/rust-lang/crates.io-index" 332 | checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" 333 | 334 | [[package]] 335 | name = "lazy_static" 336 | version = "1.5.0" 337 | source = "registry+https://github.com/rust-lang/crates.io-index" 338 | checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" 339 | 340 | [[package]] 341 | name = "libc" 342 | version = "0.2.172" 343 | source = "registry+https://github.com/rust-lang/crates.io-index" 344 | checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" 345 | 346 | [[package]] 347 | name = "libredox" 348 | version = "0.1.3" 349 | source = "registry+https://github.com/rust-lang/crates.io-index" 350 | checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" 351 | dependencies = [ 352 | "bitflags", 353 | "libc", 354 | "redox_syscall", 355 | ] 356 | 357 | [[package]] 358 | name = "libsqlite3-sys" 359 | version = "0.28.0" 360 | source = "registry+https://github.com/rust-lang/crates.io-index" 361 | checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" 362 | dependencies = [ 363 | "pkg-config", 364 | "vcpkg", 365 | ] 366 | 367 | [[package]] 368 | name = "log" 369 | version = "0.4.27" 370 | source = "registry+https://github.com/rust-lang/crates.io-index" 371 | checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" 372 | 373 | [[package]] 374 | name = "memchr" 375 | version = "2.7.4" 376 | source = "registry+https://github.com/rust-lang/crates.io-index" 377 | checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" 378 | 379 | [[package]] 380 | name = "nix" 381 | version = "0.29.0" 382 | source = "registry+https://github.com/rust-lang/crates.io-index" 383 | checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" 384 | dependencies = [ 385 | "bitflags", 386 | "cfg-if", 387 | "cfg_aliases", 388 | "libc", 389 | ] 390 | 391 | [[package]] 392 | name = "num-conv" 393 | version = "0.1.0" 394 | source = "registry+https://github.com/rust-lang/crates.io-index" 395 | checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" 396 | 397 | [[package]] 398 | name = "num_threads" 399 | version = "0.1.7" 400 | source = "registry+https://github.com/rust-lang/crates.io-index" 401 | checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" 402 | dependencies = [ 403 | "libc", 404 | ] 405 | 406 | [[package]] 407 | name = "once_cell" 408 | version = "1.21.3" 409 | source = "registry+https://github.com/rust-lang/crates.io-index" 410 | checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" 411 | 412 | [[package]] 413 | name = "page_size" 414 | version = "0.6.0" 415 | source = "registry+https://github.com/rust-lang/crates.io-index" 416 | checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" 417 | dependencies = [ 418 | "libc", 419 | "winapi", 420 | ] 421 | 422 | [[package]] 423 | name = "pkg-config" 424 | version = "0.3.32" 425 | source = "registry+https://github.com/rust-lang/crates.io-index" 426 | checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" 427 | 428 | [[package]] 429 | name = "powerfmt" 430 | version = "0.2.0" 431 | source = "registry+https://github.com/rust-lang/crates.io-index" 432 | checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" 433 | 434 | [[package]] 435 | name = "ppv-lite86" 436 | version = "0.2.21" 437 | source = "registry+https://github.com/rust-lang/crates.io-index" 438 | checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" 439 | dependencies = [ 440 | "zerocopy 0.8.25", 441 | ] 442 | 443 | [[package]] 444 | name = "prettytable-rs" 445 | version = "0.10.0" 446 | source = "registry+https://github.com/rust-lang/crates.io-index" 447 | checksum = "eea25e07510aa6ab6547308ebe3c036016d162b8da920dbb079e3ba8acf3d95a" 448 | dependencies = [ 449 | "csv", 450 | "encode_unicode", 451 | "is-terminal", 452 | "lazy_static", 453 | "term", 454 | "unicode-width", 455 | ] 456 | 457 | [[package]] 458 | name = "proc-macro2" 459 | version = "1.0.95" 460 | source = "registry+https://github.com/rust-lang/crates.io-index" 461 | checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" 462 | dependencies = [ 463 | "unicode-ident", 464 | ] 465 | 466 | [[package]] 467 | name = "quote" 468 | version = "1.0.40" 469 | source = "registry+https://github.com/rust-lang/crates.io-index" 470 | checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" 471 | dependencies = [ 472 | "proc-macro2", 473 | ] 474 | 475 | [[package]] 476 | name = "rand" 477 | version = "0.8.5" 478 | source = "registry+https://github.com/rust-lang/crates.io-index" 479 | checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" 480 | dependencies = [ 481 | "libc", 482 | "rand_chacha", 483 | "rand_core", 484 | ] 485 | 486 | [[package]] 487 | name = "rand_chacha" 488 | version = "0.3.1" 489 | source = "registry+https://github.com/rust-lang/crates.io-index" 490 | checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" 491 | dependencies = [ 492 | "ppv-lite86", 493 | "rand_core", 494 | ] 495 | 496 | [[package]] 497 | name = "rand_core" 498 | version = "0.6.4" 499 | source = "registry+https://github.com/rust-lang/crates.io-index" 500 | checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" 501 | dependencies = [ 502 | "getrandom", 503 | ] 504 | 505 | [[package]] 506 | name = "redox_syscall" 507 | version = "0.5.12" 508 | source = "registry+https://github.com/rust-lang/crates.io-index" 509 | checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" 510 | dependencies = [ 511 | "bitflags", 512 | ] 513 | 514 | [[package]] 515 | name = "redox_users" 516 | version = "0.4.6" 517 | source = "registry+https://github.com/rust-lang/crates.io-index" 518 | checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" 519 | dependencies = [ 520 | "getrandom", 521 | "libredox", 522 | "thiserror", 523 | ] 524 | 525 | [[package]] 526 | name = "rusqlite" 527 | version = "0.31.0" 528 | source = "registry+https://github.com/rust-lang/crates.io-index" 529 | checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" 530 | dependencies = [ 531 | "bitflags", 532 | "fallible-iterator", 533 | "fallible-streaming-iterator", 534 | "hashlink", 535 | "libsqlite3-sys", 536 | "smallvec", 537 | ] 538 | 539 | [[package]] 540 | name = "rustversion" 541 | version = "1.0.20" 542 | source = "registry+https://github.com/rust-lang/crates.io-index" 543 | checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" 544 | 545 | [[package]] 546 | name = "ryu" 547 | version = "1.0.20" 548 | source = "registry+https://github.com/rust-lang/crates.io-index" 549 | checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" 550 | 551 | [[package]] 552 | name = "serde" 553 | version = "1.0.219" 554 | source = "registry+https://github.com/rust-lang/crates.io-index" 555 | checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" 556 | dependencies = [ 557 | "serde_derive", 558 | ] 559 | 560 | [[package]] 561 | name = "serde_derive" 562 | version = "1.0.219" 563 | source = "registry+https://github.com/rust-lang/crates.io-index" 564 | checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" 565 | dependencies = [ 566 | "proc-macro2", 567 | "quote", 568 | "syn", 569 | ] 570 | 571 | [[package]] 572 | name = "simplelog" 573 | version = "0.12.2" 574 | source = "registry+https://github.com/rust-lang/crates.io-index" 575 | checksum = "16257adbfaef1ee58b1363bdc0664c9b8e1e30aed86049635fb5f147d065a9c0" 576 | dependencies = [ 577 | "log", 578 | "termcolor", 579 | "time", 580 | ] 581 | 582 | [[package]] 583 | name = "smallvec" 584 | version = "1.15.0" 585 | source = "registry+https://github.com/rust-lang/crates.io-index" 586 | checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" 587 | 588 | [[package]] 589 | name = "strsim" 590 | version = "0.11.1" 591 | source = "registry+https://github.com/rust-lang/crates.io-index" 592 | checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" 593 | 594 | [[package]] 595 | name = "syn" 596 | version = "2.0.101" 597 | source = "registry+https://github.com/rust-lang/crates.io-index" 598 | checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" 599 | dependencies = [ 600 | "proc-macro2", 601 | "quote", 602 | "unicode-ident", 603 | ] 604 | 605 | [[package]] 606 | name = "term" 607 | version = "0.7.0" 608 | source = "registry+https://github.com/rust-lang/crates.io-index" 609 | checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" 610 | dependencies = [ 611 | "dirs-next", 612 | "rustversion", 613 | "winapi", 614 | ] 615 | 616 | [[package]] 617 | name = "termcolor" 618 | version = "1.4.1" 619 | source = "registry+https://github.com/rust-lang/crates.io-index" 620 | checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" 621 | dependencies = [ 622 | "winapi-util", 623 | ] 624 | 625 | [[package]] 626 | name = "thiserror" 627 | version = "1.0.69" 628 | source = "registry+https://github.com/rust-lang/crates.io-index" 629 | checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" 630 | dependencies = [ 631 | "thiserror-impl", 632 | ] 633 | 634 | [[package]] 635 | name = "thiserror-impl" 636 | version = "1.0.69" 637 | source = "registry+https://github.com/rust-lang/crates.io-index" 638 | checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" 639 | dependencies = [ 640 | "proc-macro2", 641 | "quote", 642 | "syn", 643 | ] 644 | 645 | [[package]] 646 | name = "time" 647 | version = "0.3.41" 648 | source = "registry+https://github.com/rust-lang/crates.io-index" 649 | checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" 650 | dependencies = [ 651 | "deranged", 652 | "itoa", 653 | "libc", 654 | "num-conv", 655 | "num_threads", 656 | "powerfmt", 657 | "serde", 658 | "time-core", 659 | "time-macros", 660 | ] 661 | 662 | [[package]] 663 | name = "time-core" 664 | version = "0.1.4" 665 | source = "registry+https://github.com/rust-lang/crates.io-index" 666 | checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" 667 | 668 | [[package]] 669 | name = "time-macros" 670 | version = "0.2.22" 671 | source = "registry+https://github.com/rust-lang/crates.io-index" 672 | checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" 673 | dependencies = [ 674 | "num-conv", 675 | "time-core", 676 | ] 677 | 678 | [[package]] 679 | name = "unicode-ident" 680 | version = "1.0.18" 681 | source = "registry+https://github.com/rust-lang/crates.io-index" 682 | checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" 683 | 684 | [[package]] 685 | name = "unicode-width" 686 | version = "0.1.14" 687 | source = "registry+https://github.com/rust-lang/crates.io-index" 688 | checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" 689 | 690 | [[package]] 691 | name = "utf8parse" 692 | version = "0.2.2" 693 | source = "registry+https://github.com/rust-lang/crates.io-index" 694 | checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" 695 | 696 | [[package]] 697 | name = "vcpkg" 698 | version = "0.2.15" 699 | source = "registry+https://github.com/rust-lang/crates.io-index" 700 | checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" 701 | 702 | [[package]] 703 | name = "version_check" 704 | version = "0.9.5" 705 | source = "registry+https://github.com/rust-lang/crates.io-index" 706 | checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" 707 | 708 | [[package]] 709 | name = "wasi" 710 | version = "0.11.0+wasi-snapshot-preview1" 711 | source = "registry+https://github.com/rust-lang/crates.io-index" 712 | checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" 713 | 714 | [[package]] 715 | name = "winapi" 716 | version = "0.3.9" 717 | source = "registry+https://github.com/rust-lang/crates.io-index" 718 | checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" 719 | dependencies = [ 720 | "winapi-i686-pc-windows-gnu", 721 | "winapi-x86_64-pc-windows-gnu", 722 | ] 723 | 724 | [[package]] 725 | name = "winapi-i686-pc-windows-gnu" 726 | version = "0.4.0" 727 | source = "registry+https://github.com/rust-lang/crates.io-index" 728 | checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" 729 | 730 | [[package]] 731 | name = "winapi-util" 732 | version = "0.1.9" 733 | source = "registry+https://github.com/rust-lang/crates.io-index" 734 | checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" 735 | dependencies = [ 736 | "windows-sys", 737 | ] 738 | 739 | [[package]] 740 | name = "winapi-x86_64-pc-windows-gnu" 741 | version = "0.4.0" 742 | source = "registry+https://github.com/rust-lang/crates.io-index" 743 | checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" 744 | 745 | [[package]] 746 | name = "windows-sys" 747 | version = "0.59.0" 748 | source = "registry+https://github.com/rust-lang/crates.io-index" 749 | checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" 750 | dependencies = [ 751 | "windows-targets", 752 | ] 753 | 754 | [[package]] 755 | name = "windows-targets" 756 | version = "0.52.6" 757 | source = "registry+https://github.com/rust-lang/crates.io-index" 758 | checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" 759 | dependencies = [ 760 | "windows_aarch64_gnullvm", 761 | "windows_aarch64_msvc", 762 | "windows_i686_gnu", 763 | "windows_i686_gnullvm", 764 | "windows_i686_msvc", 765 | "windows_x86_64_gnu", 766 | "windows_x86_64_gnullvm", 767 | "windows_x86_64_msvc", 768 | ] 769 | 770 | [[package]] 771 | name = "windows_aarch64_gnullvm" 772 | version = "0.52.6" 773 | source = "registry+https://github.com/rust-lang/crates.io-index" 774 | checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" 775 | 776 | [[package]] 777 | name = "windows_aarch64_msvc" 778 | version = "0.52.6" 779 | source = "registry+https://github.com/rust-lang/crates.io-index" 780 | checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" 781 | 782 | [[package]] 783 | name = "windows_i686_gnu" 784 | version = "0.52.6" 785 | source = "registry+https://github.com/rust-lang/crates.io-index" 786 | checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" 787 | 788 | [[package]] 789 | name = "windows_i686_gnullvm" 790 | version = "0.52.6" 791 | source = "registry+https://github.com/rust-lang/crates.io-index" 792 | checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" 793 | 794 | [[package]] 795 | name = "windows_i686_msvc" 796 | version = "0.52.6" 797 | source = "registry+https://github.com/rust-lang/crates.io-index" 798 | checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" 799 | 800 | [[package]] 801 | name = "windows_x86_64_gnu" 802 | version = "0.52.6" 803 | source = "registry+https://github.com/rust-lang/crates.io-index" 804 | checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" 805 | 806 | [[package]] 807 | name = "windows_x86_64_gnullvm" 808 | version = "0.52.6" 809 | source = "registry+https://github.com/rust-lang/crates.io-index" 810 | checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" 811 | 812 | [[package]] 813 | name = "windows_x86_64_msvc" 814 | version = "0.52.6" 815 | source = "registry+https://github.com/rust-lang/crates.io-index" 816 | checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" 817 | 818 | [[package]] 819 | name = "zerocopy" 820 | version = "0.7.35" 821 | source = "registry+https://github.com/rust-lang/crates.io-index" 822 | checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" 823 | dependencies = [ 824 | "byteorder", 825 | "zerocopy-derive 0.7.35", 826 | ] 827 | 828 | [[package]] 829 | name = "zerocopy" 830 | version = "0.8.25" 831 | source = "registry+https://github.com/rust-lang/crates.io-index" 832 | checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" 833 | dependencies = [ 834 | "zerocopy-derive 0.8.25", 835 | ] 836 | 837 | [[package]] 838 | name = "zerocopy-derive" 839 | version = "0.7.35" 840 | source = "registry+https://github.com/rust-lang/crates.io-index" 841 | checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" 842 | dependencies = [ 843 | "proc-macro2", 844 | "quote", 845 | "syn", 846 | ] 847 | 848 | [[package]] 849 | name = "zerocopy-derive" 850 | version = "0.8.25" 851 | source = "registry+https://github.com/rust-lang/crates.io-index" 852 | checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" 853 | dependencies = [ 854 | "proc-macro2", 855 | "quote", 856 | "syn", 857 | ] 858 | -------------------------------------------------------------------------------- /packages/dofs/src/Fs.ts: -------------------------------------------------------------------------------- 1 | import { RpcTarget } from 'cloudflare:workers' 2 | 3 | export type CreateOptions = { mode?: number; umask?: number } 4 | export type DeviceStats = { 5 | deviceSize: number 6 | spaceUsed: number 7 | spaceAvailable: number 8 | } 9 | export type ReadFileOptions = { encoding?: string } 10 | export type WriteFileOptions = { encoding?: string } 11 | export type ReadOptions = { offset?: number; length?: number; encoding?: string } 12 | export type WriteOptions = { offset?: number; encoding?: string } 13 | export type MkdirOptions = { recursive?: boolean } & CreateOptions 14 | export type RmdirOptions = { recursive?: boolean } 15 | export type ListDirOptions = { recursive?: boolean } 16 | export type SetAttrOptions = { mode?: number; uid?: number; gid?: number } 17 | export type Stat = { 18 | isFile: boolean 19 | isDirectory: boolean 20 | size: number 21 | mode?: number 22 | uid?: number 23 | gid?: number 24 | mtime?: number 25 | ctime?: number 26 | atime?: number 27 | crtime?: number 28 | blocks?: number 29 | nlink?: number 30 | rdev?: number 31 | flags?: number 32 | blksize?: number 33 | kind?: string 34 | } 35 | 36 | export type FsOptions = { 37 | chunkSize?: number 38 | } 39 | 40 | export class Fs extends RpcTarget { 41 | protected ctx: DurableObjectState 42 | protected env: Env 43 | protected chunkSize: number 44 | 45 | constructor(ctx: DurableObjectState, env: Env, options?: FsOptions) { 46 | super() 47 | this.env = env 48 | this.ctx = ctx 49 | this.chunkSize = options?.chunkSize ?? 64 * 1024 // 64kb 50 | this.ctx.blockConcurrencyWhile(async () => { 51 | this.ensureSchema() 52 | }) 53 | } 54 | 55 | public readFile(path: string, options?: ReadFileOptions) { 56 | const ino = this.resolvePathToInode(path) 57 | // Get file size 58 | const statCursor = this.ctx.storage.sql.exec('SELECT attr FROM dofs_files WHERE ino = ?', ino) 59 | const statRow = statCursor.next().value 60 | if (!statRow || !statRow.attr) throw new Error('ENOENT') 61 | const attr = typeof statRow.attr === 'string' ? JSON.parse(statRow.attr) : statRow.attr 62 | const fileSize = attr.size || 0 63 | let currentOffset = 0 64 | const self = this 65 | return new ReadableStream({ 66 | pull(controller) { 67 | console.log('pull', { currentOffset, fileSize }) 68 | if (currentOffset >= fileSize) { 69 | controller.close() 70 | return 71 | } 72 | const readLength = Math.min(self.chunkSize, fileSize - currentOffset) 73 | // Read chunk from DB 74 | const chunkCursor = self.ctx.storage.sql.exec( 75 | 'SELECT data FROM dofs_chunks WHERE ino = ? AND offset = ? LIMIT 1', 76 | ino, 77 | currentOffset 78 | ) 79 | const chunkRow = chunkCursor.next().value 80 | let chunk: Uint8Array 81 | if (chunkRow && chunkRow.data) { 82 | if (chunkRow.data instanceof ArrayBuffer) { 83 | chunk = new Uint8Array(chunkRow.data) 84 | } else if (ArrayBuffer.isView(chunkRow.data)) { 85 | chunk = new Uint8Array(chunkRow.data.buffer) 86 | } else if (typeof chunkRow.data === 'string') { 87 | chunk = Uint8Array.from(chunkRow.data) 88 | } else { 89 | chunk = new Uint8Array(0) 90 | } 91 | } else { 92 | chunk = new Uint8Array(0) 93 | } 94 | console.log('chunk', { chunk }) 95 | controller.enqueue(chunk) 96 | currentOffset += readLength 97 | }, 98 | }) 99 | } 100 | 101 | public async writeFile( 102 | path: string, 103 | data: ArrayBuffer | string | ReadableStream, 104 | options?: WriteFileOptions 105 | ) { 106 | // Try to unlink if exists 107 | try { 108 | this.unlink(path) 109 | } catch (e: any) { 110 | if (!(e instanceof Error && e.message === 'ENOENT')) throw e 111 | } 112 | // Check available space before creating 113 | const deviceSize = this.getDeviceSize() 114 | const spaceUsed = this.getSpaceUsed() 115 | // Create the file 116 | this.create(path) 117 | // Handle streaming upload 118 | if (typeof data === 'object' && data !== null && typeof (data as any).getReader === 'function') { 119 | // Stream case 120 | const CHUNK_SIZE = 1024 * 1024 // 1MB 121 | let offset = 0 122 | let total = 0 123 | const reader = (data as ReadableStream).getReader() 124 | while (true) { 125 | const { value, done } = await reader.read() 126 | if (done) break 127 | if (!value) continue 128 | if (spaceUsed + total + value.length > deviceSize) { 129 | throw Object.assign(new Error('ENOSPC'), { code: 'ENOSPC' }) 130 | } 131 | // Write chunk 132 | this.write(path, value, { offset, encoding: options?.encoding }) 133 | offset += value.length 134 | total += value.length 135 | } 136 | return 137 | } 138 | // Buffer or string case 139 | if (typeof data === 'string') { 140 | const buf = new TextEncoder().encode(data) 141 | if (spaceUsed + buf.length > deviceSize) { 142 | throw Object.assign(new Error('ENOSPC'), { code: 'ENOSPC' }) 143 | } 144 | this.write(path, buf, { offset: 0, encoding: options?.encoding }) 145 | return 146 | } 147 | if (data instanceof ArrayBuffer) { 148 | const buf = new Uint8Array(data) 149 | if (spaceUsed + buf.length > deviceSize) { 150 | throw Object.assign(new Error('ENOSPC'), { code: 'ENOSPC' }) 151 | } 152 | this.write(path, buf, { offset: 0, encoding: options?.encoding }) 153 | return 154 | } 155 | if (ArrayBuffer.isView(data)) { 156 | const buf = new Uint8Array(data.buffer) 157 | if (spaceUsed + buf.length > deviceSize) { 158 | throw Object.assign(new Error('ENOSPC'), { code: 'ENOSPC' }) 159 | } 160 | this.write(path, buf, { offset: 0, encoding: options?.encoding }) 161 | return 162 | } 163 | throw new Error('Unsupported data type for writeFile') 164 | } 165 | 166 | public read(path: string, options: ReadOptions) { 167 | const ino = this.resolvePathToInode(path) 168 | const offset = options?.offset ?? 0 169 | const length = options?.length ?? undefined 170 | const cursor = this.ctx.storage.sql.exec( 171 | 'SELECT offset, data, length FROM dofs_chunks WHERE ino = ? ORDER BY offset ASC', 172 | ino 173 | ) 174 | let chunks: { offset: number; data: Uint8Array }[] = [] 175 | let fileEnd = 0 176 | for (let row of cursor) { 177 | if (row.data && (row.data instanceof ArrayBuffer || ArrayBuffer.isView(row.data))) { 178 | const arr = row.data instanceof ArrayBuffer ? new Uint8Array(row.data) : new Uint8Array(row.data.buffer) 179 | chunks.push({ offset: Number(row.offset), data: arr }) 180 | fileEnd = Math.max(fileEnd, Number(row.offset) + arr.length) 181 | } 182 | } 183 | const end = length !== undefined ? offset + length : fileEnd 184 | const result = new Uint8Array(end - offset) 185 | for (const chunk of chunks) { 186 | const chunkStart = chunk.offset 187 | const chunkEnd = chunk.offset + chunk.data.length 188 | const readStart = Math.max(offset, chunkStart) 189 | const readEnd = Math.min(end, chunkEnd) 190 | if (readStart < readEnd) { 191 | const destStart = readStart - offset 192 | const srcStart = readStart - chunkStart 193 | const len = readEnd - readStart 194 | result.set(chunk.data.subarray(srcStart, srcStart + len), destStart) 195 | } 196 | } 197 | return result.buffer 198 | } 199 | 200 | public write(path: string, data: ArrayBuffer | string, options: WriteOptions) { 201 | let ino: number 202 | try { 203 | ino = this.resolvePathToInode(path) 204 | } catch (e: any) { 205 | if (e instanceof Error && e.message === 'ENOENT') { 206 | this.create(path) 207 | ino = this.resolvePathToInode(path) 208 | } else { 209 | throw e 210 | } 211 | } 212 | const offset = options?.offset ?? 0 213 | const buf = typeof data === 'string' ? new TextEncoder().encode(data) : new Uint8Array(data) 214 | // Check available space 215 | const deviceSize = this.getDeviceSize() 216 | const spaceUsed = this.getSpaceUsed() 217 | // Estimate new space needed: sum of new data written beyond current file size 218 | const fileCursor = this.ctx.storage.sql.exec('SELECT attr FROM dofs_files WHERE ino = ?', ino) 219 | const fileRow = fileCursor.next().value 220 | let fileSize = 0 221 | if (fileRow && fileRow.attr) { 222 | const attr = typeof fileRow.attr === 'string' ? JSON.parse(fileRow.attr) : fileRow.attr 223 | fileSize = attr.size || 0 224 | } 225 | const endOffset = offset + buf.length 226 | const additional = endOffset > fileSize ? endOffset - fileSize : 0 227 | if (spaceUsed + additional > deviceSize) { 228 | throw Object.assign(new Error('ENOSPC'), { code: 'ENOSPC' }) 229 | } 230 | const CHUNK_SIZE = this.chunkSize 231 | let written = 0 232 | let maxEnd = 0 233 | while (written < buf.length) { 234 | const absOffset = offset + written 235 | const chunkIdx = Math.floor(absOffset / CHUNK_SIZE) 236 | const chunkOffset = chunkIdx * CHUNK_SIZE 237 | const chunkOffInChunk = absOffset % CHUNK_SIZE 238 | const writeLen = Math.min(CHUNK_SIZE - chunkOffInChunk, buf.length - written) 239 | // Use helper to load chunk 240 | let chunkData = this.loadChunk(ino, chunkOffset, CHUNK_SIZE) 241 | chunkData.set(buf.subarray(written, written + writeLen), chunkOffInChunk) 242 | // Calculate chunk length (last chunk may be partial) 243 | let chunkLength = CHUNK_SIZE 244 | const thisEnd = chunkOffInChunk + writeLen 245 | if (thisEnd < CHUNK_SIZE) { 246 | chunkLength = thisEnd 247 | } 248 | // Upsert chunk 249 | this.ctx.storage.sql.exec( 250 | 'INSERT INTO dofs_chunks (ino, offset, data, length) VALUES (?, ?, ?, ?) ON CONFLICT(ino, offset) DO UPDATE SET data=excluded.data, length=excluded.length', 251 | ino, 252 | chunkOffset, 253 | chunkData.subarray(0, chunkLength), 254 | chunkLength 255 | ) 256 | written += writeLen 257 | maxEnd = Math.max(maxEnd, absOffset + writeLen) 258 | } 259 | // Update file size and space used 260 | this.updateFileSizeAndSpaceUsed(ino) 261 | } 262 | 263 | public mkdir(path: string, options?: MkdirOptions) { 264 | const parts = path.split('/').filter(Boolean) 265 | if (parts.length === 0) throw Object.assign(new Error('EEXIST'), { code: 'EEXIST' }) 266 | const name = parts[parts.length - 1] 267 | const parentPath = '/' + parts.slice(0, -1).join('/') 268 | let parent: number 269 | try { 270 | parent = this.resolvePathToInode(parentPath) 271 | } catch (e: any) { 272 | if (e.message === 'ENOENT' && options?.recursive) { 273 | this.mkdir(parentPath, options) 274 | parent = this.resolvePathToInode(parentPath) 275 | } else { 276 | throw e 277 | } 278 | } 279 | const cursor = this.ctx.storage.sql.exec('SELECT ino FROM dofs_files WHERE parent = ? AND name = ?', parent, name) 280 | if (cursor.next().value) { 281 | if (options?.recursive) return 282 | throw Object.assign(new Error('EEXIST'), { code: 'EEXIST' }) 283 | } 284 | const ino = this.allocInode() 285 | const now = Date.now() 286 | const mode = options?.mode ?? 0o755 287 | const umask = options?.umask ?? 0 288 | const perm = mode & ~umask & 0o7777 289 | const attr = { 290 | ino, 291 | size: 0, 292 | blocks: 0, 293 | atime: now, 294 | mtime: now, 295 | ctime: now, 296 | crtime: now, 297 | kind: 'Directory', 298 | perm, 299 | nlink: 2, 300 | uid: 0, 301 | gid: 0, 302 | rdev: 0, 303 | flags: 0, 304 | blksize: 512, 305 | } 306 | this.ctx.storage.sql.exec( 307 | 'INSERT INTO dofs_files (ino, name, parent, is_dir, attr, data) VALUES (?, ?, ?, ?, ?, NULL)', 308 | ino, 309 | name, 310 | parent, 311 | 1, 312 | JSON.stringify(attr) 313 | ) 314 | } 315 | 316 | public rmdir(path: string, options?: RmdirOptions) { 317 | let ino: number 318 | try { 319 | ino = this.resolvePathToInode(path) 320 | } catch (e: any) { 321 | if (e.message === 'ENOENT' && options?.recursive) return 322 | throw e 323 | } 324 | if (options?.recursive) { 325 | const cursor = this.ctx.storage.sql.exec('SELECT name, is_dir FROM dofs_files WHERE parent = ?', ino) 326 | for (let row of cursor) { 327 | const childPath = path === '/' ? `/${row.name}` : `${path}/${row.name}` 328 | if (row.is_dir) { 329 | this.rmdir(childPath, options) 330 | } else { 331 | this.unlink(childPath) 332 | } 333 | } 334 | } else { 335 | const cursor = this.ctx.storage.sql.exec('SELECT COUNT(*) as count FROM dofs_files WHERE parent = ?', ino) 336 | const row = cursor.next().value 337 | if (!row) throw new Error('ENOENT') 338 | if (Number(row.count) > 0) throw new Error('ENOTEMPTY') 339 | } 340 | this.ctx.storage.sql.exec('DELETE FROM dofs_files WHERE ino = ?', ino) 341 | } 342 | 343 | public listDir(path: string, options?: ListDirOptions) { 344 | const ino = this.resolvePathToInode(path) 345 | const cursor = this.ctx.storage.sql.exec('SELECT name, is_dir FROM dofs_files WHERE parent = ?', ino) 346 | const names: string[] = ['.', '..'] 347 | for (let row of cursor) { 348 | if (typeof row.name === 'string') { 349 | names.push(row.name) 350 | if (options?.recursive && row.is_dir) { 351 | const childPath = path === '/' ? `/${row.name}` : `${path}/${row.name}` 352 | const childNames = this.listDir(childPath, options) 353 | for (const childName of childNames) { 354 | if (childName !== '.' && childName !== '..') { 355 | names.push(`${row.name}/${childName}`) 356 | } 357 | } 358 | } 359 | } 360 | } 361 | return names 362 | } 363 | 364 | public stat(path: string): Stat { 365 | const ino = this.resolvePathToInode(path) 366 | const cursor = this.ctx.storage.sql.exec('SELECT attr, is_dir FROM dofs_files WHERE ino = ?', ino) 367 | const row = cursor.next().value 368 | if (!row) throw new Error('ENOENT') 369 | const attr = typeof row.attr === 'string' ? JSON.parse(row.attr) : row.attr 370 | return { 371 | isFile: !row.is_dir, 372 | isDirectory: !!row.is_dir, 373 | size: attr.size, 374 | mode: attr.perm, 375 | uid: attr.uid, 376 | gid: attr.gid, 377 | mtime: attr.mtime, 378 | ctime: attr.ctime, 379 | atime: attr.atime, 380 | crtime: attr.crtime, 381 | blocks: attr.blocks, 382 | nlink: attr.nlink, 383 | rdev: attr.rdev, 384 | flags: attr.flags, 385 | blksize: attr.blksize, 386 | kind: attr.kind, 387 | } 388 | } 389 | 390 | public setattr(path: string, options: SetAttrOptions) { 391 | const ino = this.resolvePathToInode(path) 392 | const cursor = this.ctx.storage.sql.exec('SELECT attr FROM dofs_files WHERE ino = ?', ino) 393 | const row = cursor.next().value 394 | if (!row) throw new Error('ENOENT') 395 | const attr = typeof row.attr === 'string' ? JSON.parse(row.attr) : row.attr 396 | if (options.mode !== undefined) attr.perm = options.mode 397 | if (options.uid !== undefined) attr.uid = options.uid 398 | if (options.gid !== undefined) attr.gid = options.gid 399 | this.ctx.storage.sql.exec('UPDATE dofs_files SET attr = ? WHERE ino = ?', JSON.stringify(attr), ino) 400 | } 401 | 402 | public symlink(target: string, path: string) { 403 | const parts = path.split('/').filter(Boolean) 404 | if (parts.length === 0) throw new Error('EEXIST') 405 | const name = parts[parts.length - 1] 406 | const parentPath = '/' + parts.slice(0, -1).join('/') 407 | const parent = this.resolvePathToInode(parentPath) 408 | // Check if already exists 409 | const cursor = this.ctx.storage.sql.exec('SELECT ino FROM dofs_files WHERE parent = ? AND name = ?', parent, name) 410 | if (cursor.next().value) throw new Error('EEXIST') 411 | const ino = this.allocInode() 412 | const now = Date.now() 413 | const attr = { 414 | ino, 415 | size: target.length, 416 | blocks: 0, 417 | atime: now, 418 | mtime: now, 419 | ctime: now, 420 | crtime: now, 421 | kind: 'Symlink', 422 | perm: 0o777, 423 | nlink: 1, 424 | uid: 0, 425 | gid: 0, 426 | rdev: 0, 427 | flags: 0, 428 | blksize: 512, 429 | } 430 | const data = new TextEncoder().encode(target) 431 | this.ctx.storage.sql.exec( 432 | 'INSERT INTO dofs_files (ino, name, parent, is_dir, attr, data) VALUES (?, ?, ?, ?, ?, ?)', 433 | ino, 434 | name, 435 | parent, 436 | 0, 437 | JSON.stringify(attr), 438 | data 439 | ) 440 | } 441 | 442 | public readlink(path: string) { 443 | const ino = this.resolvePathToInode(path) 444 | const cursor = this.ctx.storage.sql.exec('SELECT data FROM dofs_files WHERE ino = ?', ino) 445 | const row = cursor.next().value 446 | if (!row || !row.data) throw new Error('ENOENT') 447 | let arr: Uint8Array 448 | if (row.data instanceof ArrayBuffer) { 449 | arr = new Uint8Array(row.data) 450 | } else if (ArrayBuffer.isView(row.data)) { 451 | arr = new Uint8Array(row.data.buffer) 452 | } else { 453 | throw new Error('ENOENT') 454 | } 455 | return new TextDecoder().decode(arr) 456 | } 457 | 458 | public rename(oldPath: string, newPath: string) { 459 | const oldParts = oldPath.split('/').filter(Boolean) 460 | const newParts = newPath.split('/').filter(Boolean) 461 | if (oldParts.length === 0 || newParts.length === 0) throw Object.assign(new Error('ENOENT'), { code: 'ENOENT' }) 462 | const oldName = oldParts[oldParts.length - 1] 463 | const oldParentPath = '/' + oldParts.slice(0, -1).join('/') 464 | const newName = newParts[newParts.length - 1] 465 | const newParentPath = '/' + newParts.slice(0, -1).join('/') 466 | const oldParent = this.resolvePathToInode(oldParentPath) 467 | const newParent = this.resolvePathToInode(newParentPath) 468 | const oldCursor = this.ctx.storage.sql.exec( 469 | 'SELECT ino FROM dofs_files WHERE parent = ? AND name = ?', 470 | oldParent, 471 | oldName 472 | ) 473 | const oldRow = oldCursor.next().value 474 | if (!oldRow) throw Object.assign(new Error('ENOENT'), { code: 'ENOENT' }) 475 | const ino = oldRow.ino 476 | // If destination exists, check if it's a non-empty directory 477 | const newCursor = this.ctx.storage.sql.exec( 478 | 'SELECT ino, is_dir FROM dofs_files WHERE parent = ? AND name = ?', 479 | newParent, 480 | newName 481 | ) 482 | const newRow = newCursor.next().value 483 | if (newRow) { 484 | if (newRow.is_dir) { 485 | const childCursor = this.ctx.storage.sql.exec( 486 | 'SELECT COUNT(*) as count FROM dofs_files WHERE parent = ?', 487 | newRow.ino 488 | ) 489 | const childRow = childCursor.next().value 490 | if (childRow && Number(childRow.count) > 0) throw Object.assign(new Error('ENOTEMPTY'), { code: 'ENOTEMPTY' }) 491 | } 492 | this.ctx.storage.sql.exec('DELETE FROM dofs_files WHERE ino = ?', newRow.ino) 493 | this.ctx.storage.sql.exec('DELETE FROM dofs_chunks WHERE ino = ?', newRow.ino) 494 | } 495 | this.ctx.storage.sql.exec('UPDATE dofs_files SET parent = ?, name = ? WHERE ino = ?', newParent, newName, ino) 496 | } 497 | 498 | public unlink(path: string) { 499 | const ino = this.resolvePathToInode(path) 500 | const cursor = this.ctx.storage.sql.exec('SELECT is_dir FROM dofs_files WHERE ino = ?', ino) 501 | const row = cursor.next().value 502 | if (!row) throw Object.assign(new Error('ENOENT'), { code: 'ENOENT' }) 503 | if (row.is_dir) throw Object.assign(new Error('EISDIR'), { code: 'EISDIR' }) 504 | this.ctx.storage.sql.exec('DELETE FROM dofs_files WHERE ino = ?', ino) 505 | this.ctx.storage.sql.exec('DELETE FROM dofs_chunks WHERE ino = ?', ino) 506 | // Update space used 507 | this.updateFileSizeAndSpaceUsed(ino) 508 | } 509 | 510 | public create(path: string, options?: CreateOptions) { 511 | const parts = path.split('/').filter(Boolean) 512 | if (parts.length === 0) throw Object.assign(new Error('EEXIST'), { code: 'EEXIST' }) 513 | const name = parts[parts.length - 1] 514 | const parentPath = '/' + parts.slice(0, -1).join('/') 515 | const parent = this.resolvePathToInode(parentPath) 516 | // Check if already exists 517 | const cursor = this.ctx.storage.sql.exec('SELECT ino FROM dofs_files WHERE parent = ? AND name = ?', parent, name) 518 | if (cursor.next().value) throw Object.assign(new Error('EEXIST'), { code: 'EEXIST' }) 519 | const ino = this.allocInode() 520 | const now = Date.now() 521 | const mode = options?.mode ?? 0o644 522 | const umask = options?.umask ?? 0 523 | const perm = mode & ~umask & 0o7777 524 | const attr = { 525 | ino, 526 | size: 0, 527 | blocks: 0, 528 | atime: now, 529 | mtime: now, 530 | ctime: now, 531 | crtime: now, 532 | kind: 'File', 533 | perm, 534 | nlink: 1, 535 | uid: 0, 536 | gid: 0, 537 | rdev: 0, 538 | flags: 0, 539 | blksize: 512, 540 | } 541 | this.ctx.storage.sql.exec( 542 | 'INSERT INTO dofs_files (ino, name, parent, is_dir, attr, data) VALUES (?, ?, ?, ?, ?, NULL)', 543 | ino, 544 | name, 545 | parent, 546 | 0, 547 | JSON.stringify(attr) 548 | ) 549 | } 550 | 551 | public truncate(path: string, size: number) { 552 | const ino = this.resolvePathToInode(path) 553 | const CHUNK_SIZE = this.chunkSize 554 | // Delete all chunks past the new size 555 | const firstExcessChunk = Math.floor(size / CHUNK_SIZE) * CHUNK_SIZE 556 | this.ctx.storage.sql.exec('DELETE FROM dofs_chunks WHERE ino = ? AND offset >= ?', ino, firstExcessChunk) 557 | // If the last chunk is partial, trim it 558 | if (size % CHUNK_SIZE !== 0) { 559 | const lastChunkOffset = Math.floor(size / CHUNK_SIZE) * CHUNK_SIZE 560 | const lastLen = size % CHUNK_SIZE 561 | // Use helper to load chunk 562 | let chunkData = this.loadChunk(ino, lastChunkOffset, CHUNK_SIZE) 563 | chunkData = chunkData.subarray(0, lastLen) 564 | this.ctx.storage.sql.exec( 565 | 'UPDATE dofs_chunks SET data = ?, length = ? WHERE ino = ? AND offset = ?', 566 | chunkData, 567 | lastLen, 568 | ino, 569 | lastChunkOffset 570 | ) 571 | } 572 | // Update file size and space used 573 | this.updateFileSizeAndSpaceUsed(ino) 574 | } 575 | 576 | public getDeviceStats(): DeviceStats { 577 | const size = this.getDeviceSize() 578 | const used = this.getSpaceUsed() 579 | return { 580 | deviceSize: size, 581 | spaceUsed: used, 582 | spaceAvailable: size - used, 583 | } 584 | } 585 | 586 | public setDeviceSize(newSize: number) { 587 | const used = this.getSpaceUsed() 588 | if (newSize < used) { 589 | throw Object.assign(new Error('ENOSPC'), { code: 'ENOSPC' }) 590 | } 591 | this.ctx.storage.sql.exec('UPDATE dofs_meta SET value = ? WHERE key = ?', newSize.toString(), 'device_size') 592 | } 593 | 594 | private rootDirAttr() { 595 | const now = Date.now() 596 | return { 597 | ino: 1, 598 | size: 0, 599 | blocks: 0, 600 | atime: now, 601 | mtime: now, 602 | ctime: now, 603 | crtime: now, 604 | kind: 'Directory', 605 | perm: 0o755, 606 | nlink: 2, 607 | uid: 0, 608 | gid: 0, 609 | rdev: 0, 610 | flags: 0, 611 | blksize: 512, 612 | } 613 | } 614 | 615 | private ensureSchema() { 616 | this.ctx.storage.sql.exec(` 617 | CREATE TABLE IF NOT EXISTS dofs_meta ( 618 | key TEXT PRIMARY KEY, 619 | value TEXT 620 | ); 621 | CREATE TABLE IF NOT EXISTS dofs_files ( 622 | ino INTEGER PRIMARY KEY, 623 | name TEXT NOT NULL, 624 | parent INTEGER, 625 | is_dir INTEGER NOT NULL, 626 | attr BLOB, 627 | data BLOB 628 | ); 629 | CREATE TABLE IF NOT EXISTS dofs_chunks ( 630 | ino INTEGER NOT NULL, 631 | offset INTEGER NOT NULL, 632 | data BLOB NOT NULL, 633 | length INTEGER NOT NULL, 634 | PRIMARY KEY (ino, offset) 635 | ); 636 | CREATE INDEX IF NOT EXISTS idx_dofs_files_parent_name ON dofs_files(parent, name); 637 | CREATE INDEX IF NOT EXISTS idx_dofs_files_parent ON dofs_files(parent); 638 | CREATE INDEX IF NOT EXISTS idx_dofs_files_name ON dofs_files(name); 639 | CREATE INDEX IF NOT EXISTS idx_dofs_chunks_ino ON dofs_chunks(ino); 640 | CREATE INDEX IF NOT EXISTS idx_dofs_chunks_ino_offset ON dofs_chunks(ino, offset); 641 | `) 642 | 643 | // Ensure meta row exists 644 | const metaCursor = this.ctx.storage.sql.exec('SELECT value FROM dofs_meta WHERE key = ?', 'device_size') 645 | if (!metaCursor.next().value) { 646 | this.ctx.storage.sql.exec( 647 | 'INSERT INTO dofs_meta (key, value) VALUES (?, ?)', 648 | 'device_size', 649 | (1024 * 1024 * 1024).toString() 650 | ) 651 | } 652 | const usedCursor = this.ctx.storage.sql.exec('SELECT value FROM dofs_meta WHERE key = ?', 'space_used') 653 | if (!usedCursor.next().value) { 654 | this.ctx.storage.sql.exec('INSERT INTO dofs_meta (key, value) VALUES (?, ?)', 'space_used', '0') 655 | } 656 | 657 | // Ensure root exists 658 | const cursor = this.ctx.storage.sql.exec('SELECT COUNT(*) as count FROM dofs_files WHERE ino = ?', 1) 659 | const row = cursor.next().value 660 | if (!row || row.count === 0) { 661 | const attr = this.rootDirAttr() 662 | this.ctx.storage.sql.exec( 663 | 'INSERT INTO dofs_files (ino, name, parent, is_dir, attr, data) VALUES (?, ?, ?, ?, ?, NULL)', 664 | 1, 665 | '/', 666 | undefined, 667 | 1, 668 | JSON.stringify(attr) 669 | ) 670 | } 671 | } 672 | 673 | // Add a sync version of resolvePathToInode for use in sync methods 674 | private resolvePathToInode(path: string): number { 675 | if (path === '/' || path === '') return 1 676 | const parts = path.split('/').filter(Boolean) 677 | let parent = 1 678 | for (const name of parts) { 679 | const cursor = this.ctx.storage.sql.exec('SELECT ino FROM dofs_files WHERE parent = ? AND name = ?', parent, name) 680 | const row = cursor.next().value 681 | if (!row || row.ino == null) throw new Error('ENOENT') 682 | parent = Number(row.ino) 683 | } 684 | return parent 685 | } 686 | 687 | // Add a sync version of allocInode for use in sync methods 688 | private allocInode(): number { 689 | const cursor = this.ctx.storage.sql.exec('SELECT MAX(ino) as max FROM dofs_files') 690 | const row = cursor.next().value 691 | return row && row.max != null ? Number(row.max) + 1 : 2 692 | } 693 | 694 | // Helper to load a chunk as Uint8Array, or zero-filled if not present 695 | private loadChunk(ino: number, chunkOffset: number, chunkSize: number): Uint8Array { 696 | const chunkCursor = this.ctx.storage.sql.exec( 697 | 'SELECT data FROM dofs_chunks WHERE ino = ? AND offset = ?', 698 | ino, 699 | chunkOffset 700 | ) 701 | const chunkRow = chunkCursor.next().value 702 | if (chunkRow && chunkRow.data) { 703 | if (chunkRow.data instanceof ArrayBuffer) { 704 | return new Uint8Array(chunkRow.data) 705 | } else if (ArrayBuffer.isView(chunkRow.data)) { 706 | return new Uint8Array(chunkRow.data.buffer) 707 | } 708 | } 709 | return new Uint8Array(chunkSize) 710 | } 711 | 712 | // Helper to get/set device size and space used 713 | private getDeviceSize(): number { 714 | const cursor = this.ctx.storage.sql.exec('SELECT value FROM dofs_meta WHERE key = ?', 'device_size') 715 | const row = cursor.next().value 716 | return row ? Number(row.value) : 1024 * 1024 * 1024 717 | } 718 | private getSpaceUsed(): number { 719 | const cursor = this.ctx.storage.sql.exec('SELECT value FROM dofs_meta WHERE key = ?', 'space_used') 720 | const row = cursor.next().value 721 | return row ? Number(row.value) : 0 722 | } 723 | private setSpaceUsed(val: number) { 724 | this.ctx.storage.sql.exec('UPDATE dofs_meta SET value = ? WHERE key = ?', val.toString(), 'space_used') 725 | } 726 | private updateFileSizeAndSpaceUsed(ino: number) { 727 | // Sum all chunk lengths for this ino 728 | const cursor = this.ctx.storage.sql.exec('SELECT SUM(length) as total FROM dofs_chunks WHERE ino = ?', ino) 729 | const row = cursor.next().value 730 | const size = row && row.total ? Number(row.total) : 0 731 | // Update file attr 732 | this.ctx.storage.sql.exec('UPDATE dofs_files SET attr = json_set(attr, "$.size", ?) WHERE ino = ?', size, ino) 733 | // Update space_used (sum all chunk lengths for all files) 734 | const usedCursor = this.ctx.storage.sql.exec('SELECT SUM(length) as total FROM dofs_chunks') 735 | const usedRow = usedCursor.next().value 736 | const used = usedRow && usedRow.total ? Number(usedRow.total) : 0 737 | this.setSpaceUsed(used) 738 | } 739 | } 740 | -------------------------------------------------------------------------------- /packages/dofs-rust-client/src/providers/sqlite_chunked.rs: -------------------------------------------------------------------------------- 1 | use rusqlite::{params, Connection, Result, OptionalExtension}; 2 | use std::ffi::OsStr; 3 | use std::time::SystemTime; 4 | use fuser; 5 | use serde::{Serialize, Deserialize}; 6 | 7 | const ROOT_INODE: u64 = 1; 8 | const USER_INODE_START: u64 = 10; // user files/dirs start here to avoid reserved inodes 9 | 10 | #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)] 11 | enum FileTypeRepr { 12 | RegularFile, 13 | Directory, 14 | Symlink, 15 | BlockDevice, 16 | CharDevice, 17 | NamedPipe, 18 | Socket, 19 | } 20 | 21 | impl From for FileTypeRepr { 22 | fn from(ft: fuser::FileType) -> Self { 23 | match ft { 24 | fuser::FileType::RegularFile => FileTypeRepr::RegularFile, 25 | fuser::FileType::Directory => FileTypeRepr::Directory, 26 | fuser::FileType::Symlink => FileTypeRepr::Symlink, 27 | fuser::FileType::BlockDevice => FileTypeRepr::BlockDevice, 28 | fuser::FileType::CharDevice => FileTypeRepr::CharDevice, 29 | fuser::FileType::NamedPipe => FileTypeRepr::NamedPipe, 30 | fuser::FileType::Socket => FileTypeRepr::Socket, 31 | } 32 | } 33 | } 34 | 35 | impl From for fuser::FileType { 36 | fn from(ft: FileTypeRepr) -> Self { 37 | match ft { 38 | FileTypeRepr::RegularFile => fuser::FileType::RegularFile, 39 | FileTypeRepr::Directory => fuser::FileType::Directory, 40 | FileTypeRepr::Symlink => fuser::FileType::Symlink, 41 | FileTypeRepr::BlockDevice => fuser::FileType::BlockDevice, 42 | FileTypeRepr::CharDevice => fuser::FileType::CharDevice, 43 | FileTypeRepr::NamedPipe => fuser::FileType::NamedPipe, 44 | FileTypeRepr::Socket => fuser::FileType::Socket, 45 | } 46 | } 47 | } 48 | 49 | #[derive(Serialize, Deserialize, Debug, Clone)] 50 | struct SerializableFileAttr { 51 | ino: u64, 52 | size: u64, 53 | blocks: u64, 54 | atime: SystemTime, 55 | mtime: SystemTime, 56 | ctime: SystemTime, 57 | crtime: SystemTime, 58 | kind: FileTypeRepr, 59 | perm: u16, 60 | nlink: u32, 61 | uid: u32, 62 | gid: u32, 63 | rdev: u32, 64 | flags: u32, 65 | blksize: u32, 66 | } 67 | 68 | impl From<&fuser::FileAttr> for SerializableFileAttr { 69 | fn from(attr: &fuser::FileAttr) -> Self { 70 | SerializableFileAttr { 71 | ino: attr.ino, 72 | size: attr.size, 73 | blocks: attr.blocks, 74 | atime: attr.atime, 75 | mtime: attr.mtime, 76 | ctime: attr.ctime, 77 | crtime: attr.crtime, 78 | kind: FileTypeRepr::from(attr.kind), 79 | perm: attr.perm, 80 | nlink: attr.nlink, 81 | uid: attr.uid, 82 | gid: attr.gid, 83 | rdev: attr.rdev, 84 | flags: attr.flags, 85 | blksize: attr.blksize, 86 | } 87 | } 88 | } 89 | 90 | impl From<&SerializableFileAttr> for fuser::FileAttr { 91 | fn from(attr: &SerializableFileAttr) -> Self { 92 | // Ensure timestamps are within valid range to prevent overflow 93 | let now = SystemTime::now(); 94 | let safe_time = |t: SystemTime| -> SystemTime { 95 | // If timestamp is more than 100 years in the future, use current time 96 | if let Ok(duration_since_epoch) = t.duration_since(std::time::UNIX_EPOCH) { 97 | if duration_since_epoch.as_secs() > now.duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs() + (100 * 365 * 24 * 3600) { 98 | now 99 | } else { 100 | t 101 | } 102 | } else { 103 | // If before epoch, use epoch 104 | std::time::UNIX_EPOCH 105 | } 106 | }; 107 | 108 | fuser::FileAttr { 109 | ino: attr.ino, 110 | size: attr.size, 111 | blocks: attr.blocks, 112 | atime: safe_time(attr.atime), 113 | mtime: safe_time(attr.mtime), 114 | ctime: safe_time(attr.ctime), 115 | crtime: safe_time(attr.crtime), 116 | kind: fuser::FileType::from(attr.kind), 117 | perm: attr.perm, 118 | nlink: attr.nlink, 119 | uid: attr.uid, 120 | gid: attr.gid, 121 | rdev: attr.rdev, 122 | flags: attr.flags, 123 | blksize: attr.blksize, 124 | } 125 | } 126 | } 127 | 128 | pub struct SqliteChunkedProvider { 129 | conn: Connection, 130 | next_inode: u64, 131 | pub osx_mode: bool, 132 | pub chunk_size: usize, 133 | } 134 | 135 | impl SqliteChunkedProvider { 136 | const SCHEMA: &'static str = "CREATE TABLE IF NOT EXISTS files ( 137 | ino INTEGER PRIMARY KEY, 138 | name TEXT NOT NULL, 139 | parent INTEGER, 140 | is_dir INTEGER NOT NULL, 141 | attr BLOB, 142 | data BLOB 143 | ); 144 | CREATE TABLE IF NOT EXISTS chunks ( 145 | ino INTEGER NOT NULL, 146 | offset INTEGER NOT NULL, 147 | data BLOB NOT NULL, 148 | length INTEGER NOT NULL, 149 | PRIMARY KEY (ino, offset) 150 | ); 151 | CREATE INDEX IF NOT EXISTS idx_files_parent_name ON files(parent, name); 152 | CREATE INDEX IF NOT EXISTS idx_files_parent ON files(parent); 153 | CREATE INDEX IF NOT EXISTS idx_files_name ON files(name); 154 | CREATE INDEX IF NOT EXISTS idx_chunks_ino ON chunks(ino); 155 | CREATE INDEX IF NOT EXISTS idx_chunks_ino_offset ON chunks(ino, offset);"; 156 | fn root_dir_attr() -> fuser::FileAttr { 157 | let now = SystemTime::now(); 158 | fuser::FileAttr { 159 | ino: ROOT_INODE, 160 | size: 0, 161 | blocks: 0, 162 | atime: now, 163 | mtime: now, 164 | ctime: now, 165 | crtime: now, 166 | kind: fuser::FileType::Directory, 167 | perm: 0o755, 168 | nlink: 2, 169 | uid: unsafe { libc::geteuid() }, 170 | gid: unsafe { libc::getegid() }, 171 | rdev: 0, 172 | flags: 0, 173 | blksize: 512, 174 | } 175 | } 176 | #[allow(dead_code)] 177 | pub fn new(db_path: &str, chunk_size: Option) -> Result { 178 | let conn = Connection::open(db_path)?; 179 | conn.execute_batch(Self::SCHEMA)?; 180 | // Ensure root exists 181 | { 182 | let mut stmt = conn.prepare("SELECT COUNT(*) FROM files WHERE ino = ?1")?; 183 | let count: i64 = stmt.query_row(params![ROOT_INODE], |row| row.get(0))?; 184 | if count == 0 { 185 | let attr = Self::root_dir_attr(); 186 | let attr_bytes = bincode::serialize(&SerializableFileAttr::from(&attr)).unwrap(); 187 | conn.execute( 188 | "INSERT INTO files (ino, name, parent, is_dir, attr, data) VALUES (?1, ?2, ?3, ?4, ?5, NULL)", 189 | params![ROOT_INODE, "/", None::, 1, attr_bytes], 190 | )?; 191 | } 192 | } 193 | // Find max inode 194 | let mut next_inode: u64 = conn.query_row( 195 | "SELECT MAX(ino) FROM files", 196 | [], 197 | |row| row.get::<_, Option>(0), 198 | )?.unwrap_or(ROOT_INODE); 199 | if next_inode < USER_INODE_START { 200 | next_inode = USER_INODE_START; 201 | } else { 202 | next_inode += 1; 203 | } 204 | Ok(Self { conn, next_inode, osx_mode: false, chunk_size: chunk_size.unwrap_or(4096) }) 205 | } 206 | pub fn new_with_mode(db_path: &str, osx_mode: bool, chunk_size: usize) -> Result { 207 | let conn = Connection::open(db_path)?; 208 | conn.execute_batch(Self::SCHEMA)?; 209 | // Ensure root exists 210 | { 211 | let mut stmt = conn.prepare("SELECT COUNT(*) FROM files WHERE ino = ?1")?; 212 | let count: i64 = stmt.query_row(params![ROOT_INODE], |row| row.get(0))?; 213 | if count == 0 { 214 | let attr = Self::root_dir_attr(); 215 | let attr_bytes = bincode::serialize(&SerializableFileAttr::from(&attr)).unwrap(); 216 | conn.execute( 217 | "INSERT INTO files (ino, name, parent, is_dir, attr, data) VALUES (?1, ?2, ?3, ?4, ?5, NULL)", 218 | params![ROOT_INODE, "/", None::, 1, attr_bytes], 219 | )?; 220 | } 221 | } 222 | // Find max inode 223 | let mut next_inode: u64 = conn.query_row( 224 | "SELECT MAX(ino) FROM files", 225 | [], 226 | |row| row.get::<_, Option>(0), 227 | )?.unwrap_or(ROOT_INODE); 228 | if next_inode < USER_INODE_START { 229 | next_inode = USER_INODE_START; 230 | } else { 231 | next_inode += 1; 232 | } 233 | Ok(Self { conn, next_inode, osx_mode, chunk_size }) 234 | } 235 | #[allow(dead_code)] 236 | fn get_file_data(&self, ino: u64) -> Option> { 237 | // Minimal stub: just return all chunks concatenated (not efficient, but placeholder) 238 | let mut stmt = self.conn.prepare("SELECT offset, data, length FROM chunks WHERE ino = ?1 ORDER BY offset ASC").ok()?; 239 | let mut rows = stmt.query(params![ino]).ok()?; 240 | let mut data = Vec::new(); 241 | while let Some(row) = rows.next().ok()? { 242 | let offset: i64 = row.get(0).ok()?; 243 | let chunk_data: Vec = row.get(1).ok()?; 244 | let length: i64 = row.get(2).ok()?; 245 | if data.len() < (offset as usize) { 246 | data.resize(offset as usize, 0); 247 | } 248 | if data.len() < (offset as usize + length as usize) { 249 | data.resize(offset as usize + length as usize, 0); 250 | } 251 | data[offset as usize..offset as usize + length as usize].copy_from_slice(&chunk_data[..length as usize]); 252 | } 253 | Some(data) 254 | } 255 | #[allow(dead_code)] 256 | fn set_file_data(&self, ino: u64, data: &[u8]) { 257 | // Minimal stub: delete all chunks and insert a single chunk 258 | let _ = self.conn.execute("DELETE FROM chunks WHERE ino = ?1", params![ino]); 259 | let _ = self.conn.execute( 260 | "INSERT INTO chunks (ino, offset, data, length) VALUES (?1, ?2, ?3, ?4)", 261 | params![ino, 0i64, data, data.len() as i64], 262 | ); 263 | } 264 | fn get_attr(&self, ino: u64) -> Option { 265 | self.conn.query_row( 266 | "SELECT attr FROM files WHERE ino = ?1", 267 | params![ino], 268 | |row| { 269 | let attr_blob: Vec = row.get(0)?; 270 | let ser_attr: SerializableFileAttr = bincode::deserialize(&attr_blob).unwrap(); 271 | Ok(fuser::FileAttr::from(&ser_attr)) 272 | }, 273 | ).optional().unwrap_or(None) 274 | } 275 | fn set_attr(&self, ino: u64, attr: &fuser::FileAttr) { 276 | let attr_bytes = bincode::serialize(&SerializableFileAttr::from(attr)).unwrap(); 277 | let _ = self.conn.execute( 278 | "UPDATE files SET attr = ?1 WHERE ino = ?2", 279 | params![attr_bytes, ino], 280 | ); 281 | } 282 | fn get_file_size(&self, ino: u64) -> u64 { 283 | self.get_attr(ino).map(|attr| attr.size).unwrap_or(0) 284 | } 285 | fn set_file_size(&self, ino: u64, size: u64) { 286 | if let Some(mut attr) = self.get_attr(ino) { 287 | attr.size = size; 288 | self.set_attr(ino, &attr); 289 | } 290 | } 291 | fn get_file_data_range(&self, ino: u64, offset: usize, size: usize) -> Vec { 292 | let mut result = vec![0u8; size]; 293 | let chunk_size = self.chunk_size; 294 | let start_chunk = offset / chunk_size; 295 | let end_chunk = (offset + size + chunk_size - 1) / chunk_size; 296 | let mut stmt = self.conn.prepare( 297 | "SELECT offset, data, length FROM chunks WHERE ino = ?1 AND offset >= ?2 AND offset < ?3 ORDER BY offset ASC" 298 | ).unwrap(); 299 | let chunk_start = (start_chunk * chunk_size) as i64; 300 | let chunk_end = (end_chunk * chunk_size) as i64; 301 | let mut rows = stmt.query(params![ino, chunk_start, chunk_end]).unwrap(); 302 | while let Some(row) = rows.next().unwrap() { 303 | let chunk_offset: i64 = row.get(0).unwrap(); 304 | let chunk_data: Vec = row.get(1).unwrap(); 305 | let chunk_len: i64 = row.get(2).unwrap(); 306 | let chunk_offset_usize = chunk_offset as usize; 307 | let chunk_start_in_file = chunk_offset_usize; 308 | let chunk_end_in_file = chunk_offset_usize + chunk_len as usize; 309 | let read_start = offset.max(chunk_start_in_file); 310 | let read_end = (offset + size).min(chunk_end_in_file); 311 | if read_start < read_end { 312 | let dest_start = read_start - offset; 313 | let src_start = read_start - chunk_start_in_file; 314 | let len = read_end - read_start; 315 | result[dest_start..dest_start + len].copy_from_slice(&chunk_data[src_start..src_start + len]); 316 | } 317 | } 318 | result 319 | } 320 | fn write_file_data(&self, ino: u64, offset: usize, data: &[u8]) { 321 | let chunk_size = self.chunk_size; 322 | let tx = self.conn.unchecked_transaction().unwrap(); 323 | let mut written = 0; 324 | while written < data.len() { 325 | let abs_offset = offset + written; 326 | let chunk_idx = abs_offset / chunk_size; 327 | let chunk_offset = chunk_idx * chunk_size; 328 | let chunk_off_in_chunk = abs_offset % chunk_size; 329 | let write_len = (chunk_size - chunk_off_in_chunk).min(data.len() - written); 330 | // Read existing chunk if present 331 | let mut chunk_data: Vec = tx.query_row( 332 | "SELECT data FROM chunks WHERE ino = ?1 AND offset = ?2", 333 | params![ino, chunk_offset as i64], 334 | |row| row.get(0), 335 | ).optional().unwrap_or(None).unwrap_or(vec![0u8; chunk_size]); 336 | if chunk_data.len() < chunk_size { 337 | chunk_data.resize(chunk_size, 0); 338 | } 339 | chunk_data[chunk_off_in_chunk..chunk_off_in_chunk + write_len] 340 | .copy_from_slice(&data[written..written + write_len]); 341 | // Calculate new chunk length 342 | let mut chunk_length = chunk_size; 343 | // If this is the last chunk, length may be less 344 | let file_end = abs_offset + write_len; 345 | let new_file_size = self.get_file_size(ino).max(file_end as u64); 346 | if (chunk_offset + chunk_size) as u64 > new_file_size { 347 | chunk_length = (new_file_size as usize - chunk_offset).min(chunk_size); 348 | } 349 | // Upsert chunk 350 | let _ = tx.execute( 351 | "INSERT INTO chunks (ino, offset, data, length) VALUES (?1, ?2, ?3, ?4) 352 | ON CONFLICT(ino, offset) DO UPDATE SET data=excluded.data, length=excluded.length", 353 | params![ino, chunk_offset as i64, &chunk_data[..chunk_length], chunk_length as i64], 354 | ); 355 | written += write_len; 356 | } 357 | tx.commit().unwrap(); 358 | let new_size = (offset + data.len()).max(self.get_file_size(ino) as usize) as u64; 359 | self.set_file_size(ino, new_size); 360 | } 361 | fn truncate_file(&self, ino: u64, size: u64) { 362 | let chunk_size = self.chunk_size as u64; 363 | let tx = self.conn.unchecked_transaction().unwrap(); 364 | // Delete all chunks past the new size 365 | let first_excess_chunk = (size / chunk_size) * chunk_size; 366 | let _ = tx.execute( 367 | "DELETE FROM chunks WHERE ino = ?1 AND offset >= ?2", 368 | params![ino, first_excess_chunk as i64], 369 | ); 370 | // If the last chunk is partial, trim it 371 | if size % chunk_size != 0 { 372 | let last_chunk_offset = (size / chunk_size) * chunk_size; 373 | let last_len = (size % chunk_size) as i64; 374 | let chunk_data: Option> = tx.query_row( 375 | "SELECT data FROM chunks WHERE ino = ?1 AND offset = ?2", 376 | params![ino, last_chunk_offset as i64], 377 | |row| row.get(0), 378 | ).optional().unwrap_or(None); 379 | if let Some(mut chunk_data) = chunk_data { 380 | chunk_data.resize(last_len as usize, 0); 381 | let _ = tx.execute( 382 | "UPDATE chunks SET data = ?1, length = ?2 WHERE ino = ?3 AND offset = ?4", 383 | params![&chunk_data, last_len, ino, last_chunk_offset as i64], 384 | ); 385 | } 386 | } 387 | tx.commit().unwrap(); 388 | self.set_file_size(ino, size); 389 | } 390 | fn delete_file_chunks(&self, ino: u64) { 391 | let _ = self.conn.execute("DELETE FROM chunks WHERE ino = ?1", params![ino]); 392 | } 393 | fn alloc_inode(&mut self) -> u64 { 394 | let ino = self.next_inode; 395 | self.next_inode += 1; 396 | ino 397 | } 398 | fn get_child_ino(&self, parent: u64, name: &str) -> Option { 399 | self.conn.query_row( 400 | "SELECT ino FROM files WHERE parent = ?1 AND name = ?2", 401 | params![parent, name], 402 | |row| row.get(0), 403 | ).optional().unwrap_or(None) 404 | } 405 | fn is_dir_empty(&self, ino: u64) -> bool { 406 | let count: i64 = self.conn.query_row( 407 | "SELECT COUNT(*) FROM files WHERE parent = ?1", 408 | params![ino], 409 | |row| row.get(0), 410 | ).unwrap_or(0); 411 | count == 0 412 | } 413 | fn new_file_attr(ino: u64, kind: fuser::FileType, perm: u16, nlink: u32, size: u64) -> fuser::FileAttr { 414 | let now = SystemTime::now(); 415 | fuser::FileAttr { 416 | ino, 417 | size, 418 | blocks: 0, 419 | atime: now, 420 | mtime: now, 421 | ctime: now, 422 | crtime: now, 423 | kind, 424 | perm, 425 | nlink, 426 | uid: unsafe { libc::geteuid() }, 427 | gid: unsafe { libc::getegid() }, 428 | rdev: 0, 429 | flags: 0, 430 | blksize: 512, 431 | } 432 | } 433 | fn insert_file(&self, ino: u64, name: &str, parent: u64, is_dir: bool, attr_bytes: Vec) { 434 | let _ = self.conn.execute( 435 | "INSERT INTO files (ino, name, parent, is_dir, attr) VALUES (?1, ?2, ?3, ?4, ?5)", 436 | params![ino, name, parent, if is_dir { 1 } else { 0 }, attr_bytes], 437 | ); 438 | } 439 | } 440 | 441 | impl crate::providers::Provider for SqliteChunkedProvider { 442 | fn rmdir(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty) { 443 | let name_str = name.to_str().unwrap_or(""); 444 | let target_ino = self.get_child_ino(parent, name_str); 445 | let ino = match target_ino { 446 | Some(ino) => ino, 447 | None => { reply.error(libc::ENOENT); return; } 448 | }; 449 | if !self.is_dir_empty(ino) { 450 | reply.error(libc::ENOTEMPTY); return; 451 | } 452 | let _ = self.conn.execute("DELETE FROM files WHERE ino = ?1", params![ino]); 453 | let _ = self.conn.execute("DELETE FROM files WHERE parent = ?1 AND name = ?2", params![parent, name_str]); 454 | self.delete_file_chunks(ino); 455 | reply.ok(); 456 | } 457 | fn open(&mut self, ino: u64, reply: fuser::ReplyOpen) { 458 | if self.get_attr(ino).is_some() { 459 | reply.opened(0, 0); 460 | } else { 461 | reply.error(libc::ENOENT); 462 | } 463 | } 464 | fn flush(&mut self, ino: u64, reply: fuser::ReplyEmpty) { 465 | if self.get_attr(ino).is_some() { 466 | reply.ok(); 467 | } else { 468 | reply.error(libc::ENOENT); 469 | } 470 | } 471 | fn release(&mut self, ino: u64, reply: fuser::ReplyEmpty) { 472 | if self.get_attr(ino).is_some() { 473 | reply.ok(); 474 | } else { 475 | reply.error(libc::ENOENT); 476 | } 477 | } 478 | fn setattr(&mut self, ino: u64, mode: Option, uid: Option, gid: Option, size: Option, atime: Option, mtime: Option, ctime: Option, crtime: Option, flags: Option, reply: fuser::ReplyAttr) { 479 | fn timeornow_to_systemtime(t: fuser::TimeOrNow) -> SystemTime { 480 | match t { 481 | fuser::TimeOrNow::SpecificTime(st) => st, 482 | fuser::TimeOrNow::Now => SystemTime::now(), 483 | } 484 | } 485 | fn safe_systemtime(t: SystemTime) -> SystemTime { 486 | // Ensure timestamp is within valid range 487 | let now = SystemTime::now(); 488 | if let Ok(duration_since_epoch) = t.duration_since(std::time::UNIX_EPOCH) { 489 | if duration_since_epoch.as_secs() > now.duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs() + (100 * 365 * 24 * 3600) { 490 | now 491 | } else { 492 | t 493 | } 494 | } else { 495 | std::time::UNIX_EPOCH 496 | } 497 | } 498 | if let Some(mut attr) = self.get_attr(ino) { 499 | if let Some(m) = mode { attr.perm = m as u16; } 500 | if let Some(u) = uid { attr.uid = u; } 501 | if let Some(g) = gid { attr.gid = g; } 502 | if let Some(a) = atime { attr.atime = timeornow_to_systemtime(a); } 503 | if let Some(m) = mtime { attr.mtime = timeornow_to_systemtime(m); } 504 | if let Some(c) = ctime { attr.ctime = safe_systemtime(c); } 505 | if let Some(cr) = crtime { attr.crtime = safe_systemtime(cr); } 506 | if let Some(fg) = flags { attr.flags = fg; } 507 | if let Some(new_size) = size { 508 | self.truncate_file(ino, new_size); 509 | attr.size = new_size; 510 | } 511 | self.set_attr(ino, &attr); 512 | reply.attr(&std::time::Duration::from_secs(1), &attr); 513 | } else { 514 | reply.error(libc::ENOENT); 515 | } 516 | } 517 | fn lookup(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEntry) { 518 | let name = name.to_str().unwrap_or(""); 519 | let ino = self.get_child_ino(parent, name); 520 | if let Some(ino) = ino { 521 | if let Some(attr) = self.get_attr(ino) { 522 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0); 523 | return; 524 | } 525 | } 526 | reply.error(libc::ENOENT); 527 | } 528 | fn getattr(&mut self, ino: u64, reply: fuser::ReplyAttr) { 529 | if let Some(attr) = self.get_attr(ino) { 530 | reply.attr(&std::time::Duration::from_secs(1), &attr); 531 | } else { 532 | reply.error(libc::ENOENT); 533 | } 534 | } 535 | fn readdir(&mut self, ino: u64, offset: i64, mut reply: fuser::ReplyDirectory) { 536 | let mut entries = vec![(ROOT_INODE, fuser::FileType::Directory, ".".to_string()), (ROOT_INODE, fuser::FileType::Directory, "..".to_string())]; 537 | let mut stmt = self.conn.prepare("SELECT ino, name, is_dir, attr FROM files WHERE parent = ?1").unwrap(); 538 | let rows = stmt.query_map(params![ino], |row| { 539 | let ino: u64 = row.get(0)?; 540 | let name: String = row.get(1)?; 541 | let _is_dir: i64 = row.get(2)?; 542 | let attr_blob: Vec = row.get(3)?; 543 | let ser_attr: SerializableFileAttr = bincode::deserialize(&attr_blob).unwrap(); 544 | let kind = fuser::FileType::from(ser_attr.kind); 545 | Ok((ino, kind, name)) 546 | }).unwrap(); 547 | for row in rows { 548 | let (ino, kind, name) = row.unwrap(); 549 | if self.osx_mode && name.starts_with("._") { 550 | continue; 551 | } 552 | entries.push((ino, kind, name)); 553 | } 554 | for (i, (ino, kind, name)) in entries.into_iter().enumerate().skip(offset as usize) { 555 | if reply.add(ino, (i + 1) as i64, kind, name) { 556 | break; 557 | } 558 | } 559 | reply.ok(); 560 | } 561 | fn mkdir(&mut self, parent: u64, name: &OsStr, mode: u32, umask: u32, reply: fuser::ReplyEntry) { 562 | let name_str = name.to_str().unwrap_or(""); 563 | if self.osx_mode && name_str.starts_with("._") { 564 | reply.error(libc::EACCES); 565 | return; 566 | } 567 | if self.get_child_ino(parent, name_str).is_some() { 568 | reply.error(libc::EEXIST); return; 569 | } 570 | let ino = self.alloc_inode(); 571 | let perm = (mode & !umask & 0o7777) as u16; 572 | let attr = Self::new_file_attr(ino, fuser::FileType::Directory, perm, 2, 0); 573 | let attr_bytes = bincode::serialize(&SerializableFileAttr::from(&attr)).unwrap(); 574 | self.insert_file(ino, name_str, parent, true, attr_bytes); 575 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0); 576 | } 577 | fn create(&mut self, parent: u64, name: &OsStr, mode: u32, _flags: u32, umask: i32, reply: fuser::ReplyCreate) { 578 | let name_str = name.to_str().unwrap_or(""); 579 | if self.osx_mode && name_str.starts_with("._") { 580 | reply.error(libc::EACCES); 581 | return; 582 | } 583 | if self.get_child_ino(parent, name_str).is_some() { 584 | reply.error(libc::EEXIST); return; 585 | } 586 | let ino = self.alloc_inode(); 587 | let perm = (mode & !(umask as u32) & 0o7777) as u16; 588 | let attr = Self::new_file_attr(ino, fuser::FileType::RegularFile, perm, 1, 0); 589 | let attr_bytes = bincode::serialize(&SerializableFileAttr::from(&attr)).unwrap(); 590 | self.insert_file(ino, name_str, parent, false, attr_bytes); 591 | reply.created(&std::time::Duration::from_secs(1), &attr, 0, 0, 0); 592 | } 593 | fn read(&mut self, ino: u64, offset: i64, size: u32, reply: fuser::ReplyData) { 594 | if let Some(attr) = self.get_attr(ino) { 595 | if attr.kind == fuser::FileType::Symlink { 596 | reply.error(libc::EINVAL); 597 | return; 598 | } 599 | } 600 | let file_size = self.get_file_size(ino); 601 | if offset as u64 >= file_size { 602 | reply.data(&[]); 603 | return; 604 | } 605 | let read_size = std::cmp::min(size as u64, file_size.saturating_sub(offset as u64)) as usize; 606 | let data = self.get_file_data_range(ino, offset as usize, read_size); 607 | reply.data(&data); 608 | } 609 | fn write(&mut self, ino: u64, offset: i64, data: &[u8], reply: fuser::ReplyWrite) { 610 | self.write_file_data(ino, offset as usize, data); 611 | reply.written(data.len() as u32); 612 | } 613 | fn unlink(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty) { 614 | let name_str = name.to_str().unwrap_or(""); 615 | let target_ino = self.get_child_ino(parent, name_str); 616 | let ino = match target_ino { 617 | Some(ino) => ino, 618 | None => { reply.error(libc::ENOENT); return; } 619 | }; 620 | let _ = self.conn.execute("DELETE FROM files WHERE ino = ?1", params![ino]); 621 | self.delete_file_chunks(ino); 622 | reply.ok(); 623 | } 624 | fn rename(&mut self, parent: u64, name: &OsStr, newparent: u64, newname: &OsStr, _flags: u32, reply: fuser::ReplyEmpty) { 625 | let name_str = name.to_str().unwrap_or(""); 626 | let newname_str = newname.to_str().unwrap_or(""); 627 | // Find the inode to move 628 | let ino = match self.get_child_ino(parent, name_str) { 629 | Some(ino) => ino, 630 | None => { reply.error(libc::ENOENT); return; } 631 | }; 632 | // If destination exists, remove it (file or empty dir) 633 | if let Some(dest_ino) = self.get_child_ino(newparent, newname_str) { 634 | // Check if it's a directory and not empty 635 | if let Some(attr) = self.get_attr(dest_ino) { 636 | if attr.kind == fuser::FileType::Directory && !self.is_dir_empty(dest_ino) { 637 | reply.error(libc::ENOTEMPTY); 638 | return; 639 | } 640 | } 641 | let _ = self.conn.execute("DELETE FROM files WHERE ino = ?1", params![dest_ino]); 642 | let _ = self.conn.execute("DELETE FROM files WHERE parent = ?1 AND name = ?2", params![newparent, newname_str]); 643 | self.delete_file_chunks(dest_ino); 644 | } 645 | // Update the file's parent and name 646 | let res = self.conn.execute( 647 | "UPDATE files SET parent = ?1, name = ?2 WHERE ino = ?3", 648 | params![newparent, newname_str, ino], 649 | ); 650 | if res.is_ok() { 651 | // Remove the old name entry if parent/name changed 652 | let _ = self.conn.execute( 653 | "DELETE FROM files WHERE parent = ?1 AND name = ?2 AND ino != ?3", 654 | params![parent, name_str, ino], 655 | ); 656 | reply.ok(); 657 | } else { 658 | reply.error(libc::EIO); 659 | } 660 | } 661 | fn symlink(&mut self, parent: u64, name: &OsStr, link: &std::path::Path, reply: fuser::ReplyEntry) { 662 | let name_str = name.to_str().unwrap_or(""); 663 | if self.osx_mode && name_str.starts_with("._") { 664 | reply.error(libc::EACCES); 665 | return; 666 | } 667 | if self.get_child_ino(parent, name_str).is_some() { 668 | reply.error(libc::EEXIST); return; 669 | } 670 | let ino = self.alloc_inode(); 671 | let target = link.to_string_lossy().to_string().into_bytes(); 672 | let attr = Self::new_file_attr(ino, fuser::FileType::Symlink, 0o777, 1, target.len() as u64); 673 | let attr_bytes = bincode::serialize(&SerializableFileAttr::from(&attr)).unwrap(); 674 | let _ = self.conn.execute( 675 | "INSERT INTO files (ino, name, parent, is_dir, attr, data) VALUES (?1, ?2, ?3, ?4, ?5, ?6)", 676 | params![ino, name_str, parent, 0, attr_bytes, target], 677 | ); 678 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0); 679 | } 680 | fn readlink(&mut self, ino: u64, reply: fuser::ReplyData) { 681 | let attr = self.get_attr(ino); 682 | if let Some(attr) = attr { 683 | if attr.kind == fuser::FileType::Symlink { 684 | let data: Option> = self.conn.query_row( 685 | "SELECT data FROM files WHERE ino = ?1", 686 | params![ino], 687 | |row| row.get(0), 688 | ).optional().unwrap_or(None); 689 | if let Some(data) = data { 690 | reply.data(&data); 691 | return; 692 | } 693 | } 694 | } 695 | reply.error(libc::EINVAL); 696 | } 697 | } --------------------------------------------------------------------------------