├── .nvmrc
├── packages
├── dofs
│ ├── src
│ │ ├── index.ts
│ │ ├── cli
│ │ │ └── index.ts
│ │ ├── hono
│ │ │ ├── index.ts
│ │ │ ├── types.ts
│ │ │ └── routes.ts
│ │ ├── withDofs.ts
│ │ └── Fs.ts
│ ├── example
│ │ ├── README.md
│ │ ├── vite.config.ts
│ │ ├── index.html
│ │ ├── package.json
│ │ ├── tsconfig.json
│ │ ├── src
│ │ │ └── index.ts
│ │ └── wrangler.jsonc
│ ├── tsdown.config.ts
│ ├── LICENSE
│ ├── CHANGELOG.md
│ ├── package.json
│ ├── tsconfig.json
│ └── README.md
└── dofs-rust-client
│ ├── Cargo.toml
│ ├── README.md
│ ├── src
│ ├── providers
│ │ ├── mod.rs
│ │ ├── memory.rs
│ │ ├── sqlite_simple.rs
│ │ └── sqlite_chunked.rs
│ ├── main.rs
│ └── fusefs.rs
│ ├── tests
│ └── integration_stress.rs
│ └── Cargo.lock
├── .gitignore
├── .changeset
├── config.json
└── README.md
├── .vscode
└── settings.json
├── .cursor
└── rules
│ └── start.mdc
├── package.json
├── .github
└── workflows
│ └── claude.yml
└── README.md
/.nvmrc:
--------------------------------------------------------------------------------
1 | node
--------------------------------------------------------------------------------
/packages/dofs/src/index.ts:
--------------------------------------------------------------------------------
1 | export * from './Fs'
2 | export * from './withDofs'
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | target
2 | mnt
3 | *.db
4 | .DS_Store
5 | node_modules
6 | .wrangler
7 | dist
8 | *.tgz
--------------------------------------------------------------------------------
/packages/dofs/example/README.md:
--------------------------------------------------------------------------------
1 | # DTerm Demo
2 |
3 | This is an example of how to use DTerm.
4 |
--------------------------------------------------------------------------------
/packages/dofs/example/vite.config.ts:
--------------------------------------------------------------------------------
1 | import { cloudflare } from '@cloudflare/vite-plugin'
2 | import { defineConfig } from 'vite'
3 |
4 | export default defineConfig({
5 | plugins: [cloudflare()],
6 | })
7 |
--------------------------------------------------------------------------------
/packages/dofs/example/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Durable Object File System
7 |
8 |
9 |
10 | DOFS API
11 |
12 |
13 |
--------------------------------------------------------------------------------
/.changeset/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://unpkg.com/@changesets/config@3.1.0/schema.json",
3 | "changelog": "@changesets/cli/changelog",
4 | "commit": false,
5 | "fixed": [],
6 | "linked": [],
7 | "access": "public",
8 | "baseBranch": "main",
9 | "updateInternalDependencies": "patch",
10 | "ignore": ["dofs-harness"]
11 | }
12 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "files.associations": {
3 | "*.jsonc": "jsonc"
4 | },
5 | "editor.defaultFormatter": "esbenp.prettier-vscode",
6 | "editor.formatOnSave": true,
7 | "[jsonc]": {
8 | "editor.defaultFormatter": "esbenp.prettier-vscode",
9 | "editor.formatOnSave": true
10 | },
11 | "typescript.tsdk": "node_modules/typescript/lib"
12 | }
13 |
--------------------------------------------------------------------------------
/packages/dofs-rust-client/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "cf-fuse"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | [dependencies]
7 | fuser = "0.14"
8 | libc = "0.2"
9 | log = "0.4"
10 | simplelog = "0.12"
11 | ctrlc = "3.4"
12 | rusqlite = "0.31"
13 | bincode = "1.3"
14 | serde = { version = "1.0", features = ["derive"] }
15 | filetime = "0.2.25"
16 | clap = { version = "4.5", features = ["derive"] }
17 |
18 | [dev-dependencies]
19 | prettytable-rs = "0.10"
20 | rand = "0.8"
21 |
--------------------------------------------------------------------------------
/.changeset/README.md:
--------------------------------------------------------------------------------
1 | # Changesets
2 |
3 | Hello and welcome! This folder has been automatically generated by `@changesets/cli`, a build tool that works
4 | with multi-package repos, or single-package repos to help you version and publish your code. You can
5 | find the full documentation for it [in our repository](https://github.com/changesets/changesets)
6 |
7 | We have a quick list of common questions to get you started engaging with this project in
8 | [our documentation](https://github.com/changesets/changesets/blob/main/docs/common-questions.md)
9 |
--------------------------------------------------------------------------------
/.cursor/rules/start.mdc:
--------------------------------------------------------------------------------
1 | ---
2 | description:
3 | globs:
4 | alwaysApply: true
5 | ---
6 | This is a monorepo.
7 |
8 | * fix errors without prompting
9 | * don't ask to proceed, just go
10 | * fix all warnings along the way without prompting
11 | * ONLY when making changes to /packages/dofs-rust-client:
12 | * run `cargo build` after each change to be sure it compiles
13 | * run `cargo test` after each change to be sure it passes tests
14 | * when making changes to /packages/dofs:
15 | * do not automatically build
16 | * do not automatically install packages
17 |
--------------------------------------------------------------------------------
/packages/dofs/example/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "dofs-harness",
3 | "version": "0.0.1",
4 | "private": true,
5 | "type": "module",
6 | "scripts": {
7 | "dev": "vite dev",
8 | "build": "vite build",
9 | "deploy": "npm run build && wrangler deploy",
10 | "cf-typegen": "wrangler types"
11 | },
12 | "devDependencies": {
13 | "@cloudflare/vite-plugin": "^1.2.2",
14 | "@xterm/addon-fit": "^0.10.0",
15 | "dofs": "workspace:*",
16 | "hono": "^4.7.8",
17 | "vite": "^6.3.5",
18 | "wrangler": "^4.19.1"
19 | },
20 | "packageManager": "bun@1.1.13"
21 | }
22 |
--------------------------------------------------------------------------------
/packages/dofs/tsdown.config.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
3 | import { copyFileSync } from 'fs'
4 | import { defineConfig } from 'tsdown'
5 |
6 | export default defineConfig({
7 | entry: {
8 | index: 'src/index.ts',
9 | hono: 'src/hono/index.ts',
10 | cli: 'src/cli/index.ts',
11 | },
12 | format: ['esm'],
13 | dts: {
14 | sourcemap: true,
15 | },
16 | sourcemap: true,
17 | external: ['cloudflare:workers'],
18 | outDir: 'dist',
19 | clean: true,
20 | onSuccess: async () => {
21 | console.log('Copying README.md to root')
22 | copyFileSync('README.md', '../../README.md')
23 | },
24 | })
25 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "dofs-root",
3 | "version": "0.0.1",
4 | "private": true,
5 | "scripts": {
6 | "format": "prettier --write ."
7 | },
8 | "prettier": {
9 | "printWidth": 120,
10 | "tabWidth": 2,
11 | "useTabs": false,
12 | "singleQuote": true,
13 | "trailingComma": "es5",
14 | "semi": false,
15 | "plugins": [
16 | "prettier-plugin-organize-imports"
17 | ],
18 | "overrides": [
19 | {
20 | "files": [
21 | "*.jsonc"
22 | ],
23 | "options": {
24 | "parser": "jsonc-parser",
25 | "trailingComma": "none"
26 | }
27 | }
28 | ]
29 | },
30 | "workspaces": [
31 | "packages/*",
32 | "packages/dofs/example"
33 | ],
34 | "devDependencies": {
35 | "@changesets/cli": "^2.29.5",
36 | "prettier": "^3.5.3",
37 | "prettier-plugin-organize-imports": "^4.1.0",
38 | "tsup": "^8.5.0",
39 | "typescript": "^5.8.3",
40 | "wrangler": "^4.20.5"
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/packages/dofs/src/cli/index.ts:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | import { Command } from 'commander'
4 | import pkg from '../../package.json'
5 |
6 | const program = new Command()
7 |
8 | program.name('dofs').description('A filesystem for Cloudflare Durable Objects').version(pkg.version)
9 |
10 | program
11 | .command('init')
12 | .description('Initialize a new DOFS filesystem')
13 | .action(() => {
14 | console.log('Initializing DOFS filesystem...')
15 | // TODO: Implement init command
16 | })
17 |
18 | program
19 | .command('mount')
20 | .description('Mount a DOFS filesystem')
21 | .option('-p, --path ', 'Mount path')
22 | .action((options: { path?: string }) => {
23 | console.log('Mounting DOFS filesystem...', options)
24 | // TODO: Implement mount command
25 | })
26 |
27 | program
28 | .command('status')
29 | .description('Show DOFS filesystem status')
30 | .action(() => {
31 | console.log('DOFS filesystem status:')
32 | // TODO: Implement status command
33 | })
34 |
35 | program.parse()
36 |
--------------------------------------------------------------------------------
/packages/dofs/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 Ben Allfree
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/packages/dofs/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # dofs
2 |
3 | ## 0.1.0
4 |
5 | ### Minor Changes
6 |
7 | - c5c129d: Update withDofs and add @Dofs attribute
8 | - 37f0c55: enh: Durable Object configuration
9 |
10 | ### Patch Changes
11 |
12 | - d8185b8: enh: type refinement for withDofs
13 |
14 | ## 0.0.2
15 |
16 | ### Patch Changes
17 |
18 | - d007ebb: Added withDofs support
19 |
20 | ## 0.0.1
21 |
22 | ### Patch Changes
23 |
24 | - recursive mkdir support
25 | - 967c86c: Device size discussion
26 | - 5b7e291: IDurableObjectFs
27 | - fcc96d5: Adjust default chunk size to 64kb and update readme
28 | - 649afc2: Initial release
29 | - 199c42c: Update readme with sync/async notes
30 | - af1e53a: fix write method signature in IDurableObjectFs
31 |
32 | ## 0.0.1-rc.2
33 |
34 | ### Patch Changes
35 |
36 | - 967c86c: Device size discussion
37 | - 5b7e291: IDurableObjectFs
38 | - 199c42c: Update readme with sync/async notes
39 | - af1e53a: fix write method signature in IDurableObjectFs
40 |
41 | ## 0.0.1-rc.1
42 |
43 | ### Patch Changes
44 |
45 | - Adjust default chunk size to 64kb and update readme
46 |
47 | ## 0.0.1-rc.0
48 |
49 | ### Patch Changes
50 |
51 | - Initial release
52 |
--------------------------------------------------------------------------------
/.github/workflows/claude.yml:
--------------------------------------------------------------------------------
1 | name: Claude Code
2 |
3 | on:
4 | issue_comment:
5 | types: [created]
6 | pull_request_review_comment:
7 | types: [created]
8 | issues:
9 | types: [opened, assigned]
10 | pull_request_review:
11 | types: [submitted]
12 |
13 | jobs:
14 | claude:
15 | if: |
16 | (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
17 | (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
18 | (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
19 | (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
20 | runs-on: ubuntu-latest
21 | permissions:
22 | contents: read
23 | pull-requests: read
24 | issues: read
25 | id-token: write
26 | steps:
27 | - name: Checkout repository
28 | uses: actions/checkout@v4
29 | with:
30 | fetch-depth: 1
31 |
32 | - name: Run Claude Code
33 | id: claude
34 | uses: anthropics/claude-code-action@beta
35 | with:
36 | anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
37 |
38 |
--------------------------------------------------------------------------------
/packages/dofs-rust-client/README.md:
--------------------------------------------------------------------------------
1 | # cf-fuse
2 |
3 | ## Running the Main Program
4 |
5 | Build the project:
6 |
7 | ```sh
8 | cargo build --release
9 | ```
10 |
11 | Run the FUSE filesystem (default mountpoint is `./mnt`):
12 |
13 | ```sh
14 | cargo run --release -- [--provider=memory|sqlite_simple|sqlite_chunked] [--mountpoint=PATH] [--chunk_size=SIZE] [--mode=osx]
15 | ```
16 |
17 | - `--provider` (optional): Choose backend. Default is `memory`.
18 | - `--mountpoint` (optional): Directory to mount. Default is `./mnt`.
19 | - `--chunk_size` (optional): Only for `sqlite_chunked`. Default is 4096.
20 | - `--mode=osx` (optional): Enable macOS-specific mode.
21 |
22 | Example:
23 |
24 | ```sh
25 | cargo run --release -- --provider=sqlite_simple --mountpoint=./mnt
26 | ```
27 |
28 | Unmount with:
29 |
30 | ```sh
31 | umount ./mnt
32 | ```
33 |
34 | ## Running the Stress Tests
35 |
36 | The stress test runs for all providers and prints a summary table.
37 |
38 | ```sh
39 | cargo test --test integration_stress -- --nocapture
40 | ```
41 |
42 | - Requires `umount` command and `prettytable-rs` crate (should be in dependencies).
43 | - The test will mount and unmount `./mnt` and create/remove test files.
44 |
45 | ---
46 |
47 | For more options, see `src/main.rs` and `tests/integration_stress.rs`.
48 |
--------------------------------------------------------------------------------
/packages/dofs/src/hono/index.ts:
--------------------------------------------------------------------------------
1 | import { Hono } from 'hono'
2 | import { WithDofs } from '../withDofs.js'
3 | import { createFsRoutes } from './routes.js'
4 | import { DofsContext, DurableObjectConfig } from './types.js'
5 |
6 | export * from './types.js'
7 |
8 | export const dofs = (config: DurableObjectConfig) => {
9 | const api = new Hono<{ Bindings: TEnv } & DofsContext>()
10 |
11 | const getFs = async (doNamespace: string, doName: string, env: TEnv) => {
12 | if (!(doNamespace in env)) {
13 | throw new Error(`Durable Object namespace ${doNamespace} not found`)
14 | }
15 | const ns = env[doNamespace as keyof TEnv] as DurableObjectNamespace>
16 | const doId = ns.idFromName(doName)
17 | const stub = ns.get(doId)
18 | return stub.getFs()
19 | }
20 |
21 | // Create filesystem routes
22 | const fsRoutes = createFsRoutes()
23 |
24 | // Middleware to extract filesystem stub and mount the fs routes
25 | api.use('/:doNamespace/:doId/*', async (c, next) => {
26 | const { doNamespace, doId } = c.req.param()
27 | try {
28 | const fs = await getFs(doNamespace, doId, c.env)
29 | c.set('fs', fs)
30 | await next()
31 | } catch (error) {
32 | return c.text(`Error accessing filesystem: ${error instanceof Error ? error.message : String(error)}`, 500)
33 | }
34 | })
35 |
36 | // Mount the filesystem routes at /:doNamespace/:doId
37 | api.route('/:doNamespace/:doId', fsRoutes)
38 |
39 | return api
40 | }
41 |
--------------------------------------------------------------------------------
/packages/dofs/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "dofs",
3 | "description": "A filesystem for Cloudflare Durable Objects.",
4 | "version": "0.1.0",
5 | "type": "module",
6 | "author": {
7 | "name": "Ben Allfree",
8 | "url": "https://x.com/benallfree"
9 | },
10 | "license": "MIT",
11 | "repository": {
12 | "type": "git",
13 | "url": "https://github.com/benallfree/dofs"
14 | },
15 | "keywords": [
16 | "cloudflare",
17 | "durable objects",
18 | "dofs",
19 | "filesystem",
20 | "file system",
21 | "file-system",
22 | "file-system-api",
23 | "file-system-api-client",
24 | "file-system-api-server",
25 | "file-system-api-client-server"
26 | ],
27 | "homepage": "https://github.com/benallfree/dofs/tree/main/packages/dofs",
28 | "scripts": {
29 | "build": "tsdown",
30 | "dev": "tsdown --watch"
31 | },
32 | "main": "./dist/Fs.js",
33 | "module": "./dist/Fs.js",
34 | "types": "./dist/Fs.d.ts",
35 | "exports": {
36 | ".": {
37 | "import": "./dist/index.js",
38 | "types": "./dist/index.d.ts"
39 | },
40 | "./hono": {
41 | "import": "./dist/hono.js",
42 | "types": "./dist/hono.d.ts"
43 | }
44 | },
45 | "bin": {
46 | "dofs": "./dist/cli/index.js"
47 | },
48 | "files": [
49 | "dist"
50 | ],
51 | "dependencies": {
52 | "commander": "^14.0.0",
53 | "neofuse": "^0.0.1-rc.3"
54 | },
55 | "peerDependencies": {
56 | "hono": "^4.7.11"
57 | },
58 | "devDependencies": {
59 | "@types/node": "^22.15.30",
60 | "tsdown": "^0.12.7"
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/packages/dofs/example/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | /* Visit https://aka.ms/tsconfig.json to read more about this file */
4 |
5 | /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
6 | "target": "es2021",
7 | /* Specify a set of bundled library declaration files that describe the target runtime environment. */
8 | "lib": ["es2021"],
9 | /* Specify what JSX code is generated. */
10 | "jsx": "react-jsx",
11 |
12 | /* Specify what module code is generated. */
13 | "module": "es2022",
14 | /* Specify how TypeScript looks up a file from a given module specifier. */
15 | "moduleResolution": "bundler",
16 | /* Specify type package names to be included without being referenced in a source file. */
17 | "types": [],
18 | /* Enable importing .json files */
19 | "resolveJsonModule": true,
20 |
21 | /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */
22 | "allowJs": true,
23 | /* Enable error reporting in type-checked JavaScript files. */
24 | "checkJs": false,
25 |
26 | /* Disable emitting files from a compilation. */
27 | "noEmit": true,
28 |
29 | /* Ensure that each file can be safely transpiled without relying on other imports. */
30 | "isolatedModules": true,
31 | /* Allow 'import x from y' when a module doesn't have a default export. */
32 | "allowSyntheticDefaultImports": true,
33 | /* Ensure that casing is correct in imports. */
34 | "forceConsistentCasingInFileNames": true,
35 |
36 | /* Enable all strict type-checking options. */
37 | "strict": true,
38 |
39 | /* Skip type checking all .d.ts files. */
40 | "skipLibCheck": true
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/packages/dofs-rust-client/src/providers/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod memory;
2 | pub mod sqlite_simple;
3 | pub mod sqlite_chunked;
4 |
5 | use fuser::{ReplyAttr, ReplyEntry, ReplyDirectory, ReplyData, ReplyCreate, ReplyWrite};
6 | use std::ffi::OsStr;
7 |
8 | pub trait Provider {
9 | fn rmdir(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty);
10 | fn open(&mut self, ino: u64, reply: fuser::ReplyOpen);
11 | fn flush(&mut self, ino: u64, reply: fuser::ReplyEmpty);
12 | fn release(&mut self, ino: u64, reply: fuser::ReplyEmpty);
13 | fn setattr(&mut self, ino: u64, mode: Option, uid: Option, gid: Option, size: Option, atime: Option, mtime: Option, ctime: Option, crtime: Option, flags: Option, reply: ReplyAttr);
14 | fn lookup(&mut self, parent: u64, name: &OsStr, reply: ReplyEntry);
15 | fn getattr(&mut self, ino: u64, reply: ReplyAttr);
16 | fn readdir(&mut self, ino: u64, offset: i64, reply: ReplyDirectory);
17 | fn mkdir(&mut self, parent: u64, name: &OsStr, mode: u32, umask: u32, reply: ReplyEntry);
18 | fn create(&mut self, parent: u64, name: &OsStr, mode: u32, flags: u32, umask: i32, reply: ReplyCreate);
19 | fn read(&mut self, ino: u64, offset: i64, size: u32, reply: ReplyData);
20 | fn write(&mut self, ino: u64, offset: i64, data: &[u8], reply: ReplyWrite);
21 | fn unlink(&mut self, parent: u64, name: &std::ffi::OsStr, reply: fuser::ReplyEmpty);
22 | fn rename(&mut self, parent: u64, name: &OsStr, newparent: u64, newname: &OsStr, flags: u32, reply: fuser::ReplyEmpty);
23 | fn symlink(&mut self, parent: u64, name: &OsStr, link: &std::path::Path, reply: fuser::ReplyEntry);
24 | fn readlink(&mut self, ino: u64, reply: fuser::ReplyData);
25 | }
--------------------------------------------------------------------------------
/packages/dofs/src/hono/types.ts:
--------------------------------------------------------------------------------
1 | import { DurableObject } from 'cloudflare:workers'
2 | import { Fs } from '../Fs.js'
3 |
4 | // Extend the context type to include our fs property
5 | export type DofsContext = {
6 | Variables: {
7 | fs: Rpc.Stub // The filesystem stub
8 | }
9 | }
10 |
11 | /**
12 | * Represents an instance of a Durable Object
13 | */
14 | export interface DurableObjectInstance {
15 | /** The unique slug identifier for the instance */
16 | slug: string
17 | /** The display name of the instance */
18 | name: string
19 | }
20 |
21 | export type FsStat = {
22 | mtime: Date
23 | atime: Date
24 | ctime: Date
25 | size: number
26 | mode: number
27 | uid: number
28 | gid: number
29 | nlink: number
30 | }
31 |
32 | /**
33 | * Configuration for a single Durable Object
34 | */
35 | export interface DurableObjectConfigItem {
36 | /** The name of the Durable Object */
37 | name: string
38 | /** Reference to the Durable Object class for compatibility checking */
39 | classRef: typeof DurableObject
40 | /** Function to get instances, optionally paginated */
41 | getInstances: (page?: number) => Promise
42 | /** Function to get the stat for the namespace directory */
43 | resolveNamespaceStat?: (cfg: DurableObjectConfig) => Promise
44 | /** Function to get the stat for the instance directory */
45 | resolveInstanceStat?: (cfg: DurableObjectConfig, instanceId: string) => Promise
46 | }
47 |
48 | /**
49 | * Configuration object for Durable Objects
50 | */
51 | export type DurableObjectConfig = {
52 | resolveRootStat?: (cfg: DurableObjectConfig) => Promise
53 | dos: Record>
54 | }
55 |
--------------------------------------------------------------------------------
/packages/dofs/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | /* Visit https://aka.ms/tsconfig.json to read more about this file */
4 |
5 | /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
6 | "target": "es2021",
7 | /* Specify a set of bundled library declaration files that describe the target runtime environment. */
8 | "lib": ["es2021"],
9 | /* Specify what JSX code is generated. */
10 | "jsx": "react-jsx",
11 |
12 | /* Specify what module code is generated. */
13 | "module": "es2022",
14 | /* Specify how TypeScript looks up a file from a given module specifier. */
15 | "moduleResolution": "node",
16 | /* Specify type package names to be included without being referenced in a source file. */
17 | "types": ["./worker-configuration.d.ts"],
18 | /* Enable importing .json files */
19 | "resolveJsonModule": true,
20 |
21 | /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */
22 | "allowJs": true,
23 | /* Enable error reporting in type-checked JavaScript files. */
24 | "checkJs": false,
25 |
26 | /* Disable emitting files from a compilation. */
27 | "noEmit": true,
28 |
29 | /* Ensure that each file can be safely transpiled without relying on other imports. */
30 | "isolatedModules": true,
31 | /* Allow 'import x from y' when a module doesn't have a default export. */
32 | "allowSyntheticDefaultImports": true,
33 | /* Ensure that casing is correct in imports. */
34 | "forceConsistentCasingInFileNames": true,
35 |
36 | /* Enable all strict type-checking options. */
37 | "strict": true,
38 |
39 | /* Skip type checking all .d.ts files. */
40 | "skipLibCheck": true
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/packages/dofs/example/src/index.ts:
--------------------------------------------------------------------------------
1 | import { DurableObject } from 'cloudflare:workers'
2 | import { Dofs, withDofs } from 'dofs'
3 | import { dofs } from 'dofs/hono'
4 | import { Hono } from 'hono'
5 |
6 | export class MyDurableObjectBase extends DurableObject {
7 | constructor(ctx: DurableObjectState, env: Env) {
8 | super(ctx, env)
9 | }
10 | }
11 |
12 | export class MyDurableObjectWithDofsMixin extends withDofs(MyDurableObjectBase, { chunkSize: 4 * 1024 }) {
13 | constructor(ctx: DurableObjectState, env: Env) {
14 | super(ctx, env)
15 | }
16 |
17 | test() {
18 | this.getFs().readFile('test.txt')
19 | }
20 | }
21 |
22 | @Dofs({ chunkSize: 4 * 1024 })
23 | export class MyDurableObjectWithDofsAttribute extends DurableObject {
24 | constructor(ctx: DurableObjectState, env: Env) {
25 | super(ctx, env)
26 | }
27 | }
28 |
29 | const app = new Hono<{ Bindings: Env }>()
30 |
31 | // Mount the API middleware
32 | app.route(
33 | '/',
34 | dofs({
35 | dos: {
36 | MY_DURABLE_OBJECT_WITH_DOFS_MIXIN: {
37 | classRef: MyDurableObjectWithDofsMixin,
38 | getInstances: async () => {
39 | return [
40 | {
41 | slug: 'my-durable-object-with-dofs-mixin',
42 | name: 'My Durable Object with Dofs Mixin',
43 | },
44 | ]
45 | },
46 | name: 'My Durable Object with Dofs Mixin',
47 | },
48 | MY_DURABLE_OBJECT_WITH_DOFS_ATTRIBUTE: {
49 | classRef: MyDurableObjectWithDofsAttribute,
50 | getInstances: async () => {
51 | return [
52 | {
53 | slug: 'my-durable-object-with-dofs-attribute',
54 | name: 'My Durable Object with Dofs Attribute',
55 | },
56 | ]
57 | },
58 | name: 'My Durable Object with Dofs Attribute',
59 | },
60 | },
61 | }) as any
62 | )
63 |
64 | export default app
65 |
--------------------------------------------------------------------------------
/packages/dofs/src/withDofs.ts:
--------------------------------------------------------------------------------
1 | import { DurableObject } from 'cloudflare:workers'
2 | import { Fs, FsOptions } from './Fs.js'
3 |
4 | export type WithDofs = DurableObject & {
5 | getFs: () => Fs
6 | }
7 |
8 | // Utility to create the extended class
9 | export const withDofs = (
10 | cls: new (ctx: DurableObjectState, env: TEnv) => DurableObject,
11 | options: FsOptions = {}
12 | ): new (ctx: DurableObjectState, env: TEnv) => WithDofs => {
13 | return class DurableObjectWithDofs extends cls {
14 | fs: Fs
15 | constructor(ctx: DurableObjectState, env: TEnv) {
16 | super(ctx, env)
17 | this.fs = new Fs(ctx, env, options)
18 | }
19 | getFs(): Fs {
20 | return this.fs
21 | }
22 | }
23 | }
24 |
25 | export function Dofs(options: FsOptions = {}) {
26 | return function DurableObject>(
27 | target: new (ctx: DurableObjectState, env: TEnv) => DurableObject
28 | ): new (ctx: DurableObjectState, env: TEnv) => WithDofs {
29 | return class extends target {
30 | fs: Fs
31 | constructor(ctx: DurableObjectState, env: TEnv) {
32 | super(ctx, env)
33 | this.fs = new Fs(ctx, env, options)
34 | }
35 | getFs(): Fs {
36 | return this.fs
37 | }
38 | }
39 | }
40 | }
41 |
42 | // Testing
43 |
44 | class MyDurableObjectBase extends DurableObject {
45 | constructor(ctx: DurableObjectState, env: Env) {
46 | super(ctx, env)
47 | }
48 | }
49 |
50 | class MyDurableObject2 extends withDofs(MyDurableObjectBase, { chunkSize: 4 * 1024 }) {
51 | constructor(ctx: DurableObjectState, env: Env) {
52 | super(ctx, env)
53 | }
54 | test() {
55 | this.getFs().readFile('test.txt')
56 | }
57 | }
58 |
59 | @Dofs({ chunkSize: 4 * 1024 })
60 | class MyAttributeObject extends DurableObject {
61 | constructor(ctx: DurableObjectState, env: Env) {
62 | super(ctx, env)
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/packages/dofs/example/wrangler.jsonc:
--------------------------------------------------------------------------------
1 | /**
2 | * For more details on how to configure Wrangler, refer to:
3 | * https://developers.cloudflare.com/workers/wrangler/configuration/
4 | */
5 | {
6 | "$schema": "../../../node_modules/wrangler/config-schema.json",
7 | "name": "dofs",
8 | "main": "src/index.ts",
9 | "compatibility_date": "2025-05-04",
10 | "migrations": [
11 | {
12 | "new_sqlite_classes": ["MyDurableObjectWithDofsMixin", "MyDurableObjectWithDofsAttribute"],
13 | "tag": "v1",
14 | },
15 | ],
16 | "durable_objects": {
17 | "bindings": [
18 | {
19 | "class_name": "MyDurableObjectWithDofsMixin",
20 | "name": "MY_DURABLE_OBJECT_WITH_DOFS_MIXIN",
21 | },
22 | {
23 | "class_name": "MyDurableObjectWithDofsAttribute",
24 | "name": "MY_DURABLE_OBJECT_WITH_DOFS_ATTRIBUTE",
25 | },
26 | ],
27 | },
28 | "observability": {
29 | "enabled": true,
30 | },
31 | /**
32 | * Smart Placement
33 | * Docs: https://developers.cloudflare.com/workers/configuration/smart-placement/#smart-placement
34 | */
35 | // "placement": { "mode": "smart" },
36 |
37 | /**
38 | * Bindings
39 | * Bindings allow your Worker to interact with resources on the Cloudflare Developer Platform, including
40 | * databases, object storage, AI inference, real-time communication and more.
41 | * https://developers.cloudflare.com/workers/runtime-apis/bindings/
42 | */
43 |
44 | /**
45 | * Environment Variables
46 | * https://developers.cloudflare.com/workers/wrangler/configuration/#environment-variables
47 | */
48 | // "vars": { "MY_VARIABLE": "production_value" },
49 | /**
50 | * Note: Use secrets to store sensitive data.
51 | * https://developers.cloudflare.com/workers/configuration/secrets/
52 | */
53 |
54 | /**
55 | * Static Assets
56 | * https://developers.cloudflare.com/workers/static-assets/binding/
57 | */
58 | // "assets": {
59 | // "directory": "./public/",
60 | // "binding": "ASSETS",
61 | // "not_found_handling": "single-page-application",
62 | // "run_worker_first": true,
63 | // },
64 |
65 | /**
66 | * Service Bindings (communicate between multiple Workers)
67 | * https://developers.cloudflare.com/workers/wrangler/configuration/#service-bindings
68 | */
69 | // "services": [{ "binding": "MY_SERVICE", "service": "my-service" }]
70 | }
71 |
--------------------------------------------------------------------------------
/packages/dofs/src/hono/routes.ts:
--------------------------------------------------------------------------------
1 | import { Hono } from 'hono'
2 | import { DofsContext } from './types.js'
3 |
4 | export const createFsRoutes = () => {
5 | const fsRoutes = new Hono<{ Bindings: TEnv } & DofsContext>()
6 |
7 | fsRoutes.post('/upload', async (c) => {
8 | const fs = c.get('fs')
9 | const formData = await c.req.formData()
10 | const file = formData.get('file')
11 | if (!file || typeof file === 'string') {
12 | return c.text('No file uploaded', 400)
13 | }
14 | const dir = c.req.query('path') || '/'
15 | const finalPath = (dir.endsWith('/') ? dir : dir + '/') + file.name
16 | await fs.writeFile(finalPath, file.stream())
17 | return c.redirect('/')
18 | })
19 |
20 | fsRoutes.get('/ls', async (c) => {
21 | const fs = c.get('fs')
22 | const path = c.req.query('path') || '/'
23 | const entries = await fs.listDir(path)
24 | const stats = await Promise.all(
25 | entries
26 | .filter((e: string) => e !== '.' && e !== '..')
27 | .map(async (e: string) => {
28 | try {
29 | const s = await fs.stat((path.endsWith('/') ? path : path + '/') + e)
30 | return { name: e, ...s }
31 | } catch (err) {
32 | return { name: e, error: true }
33 | }
34 | })
35 | )
36 | return c.json(stats)
37 | })
38 |
39 | fsRoutes.get('/file', async (c) => {
40 | const fs = c.get('fs')
41 | const path = c.req.query('path')
42 | if (!path) return c.text('Missing path', 400)
43 | try {
44 | // Try to guess content type from extension
45 | const ext = (path.split('.').pop() || '').toLowerCase()
46 | const typeMap = {
47 | jpg: 'image/jpeg',
48 | jpeg: 'image/jpeg',
49 | png: 'image/png',
50 | gif: 'image/gif',
51 | webp: 'image/webp',
52 | bmp: 'image/bmp',
53 | svg: 'image/svg+xml',
54 | }
55 | const contentType = typeMap[ext as keyof typeof typeMap] || 'application/octet-stream'
56 | const stat = await fs.stat(path)
57 | const size = stat.size
58 | const stream = await fs.readFile(path)
59 | return new Response(stream, {
60 | status: 200,
61 | headers: {
62 | 'content-type': contentType,
63 | 'content-disposition': `inline; filename="${encodeURIComponent(path.split('/').pop() || 'file')}"`,
64 | 'content-length': String(size),
65 | },
66 | })
67 | } catch (e) {
68 | return c.text('Not found', 404)
69 | }
70 | })
71 |
72 | fsRoutes.post('/rm', async (c) => {
73 | const fs = c.get('fs')
74 | const path = c.req.query('path')
75 | if (!path) return c.text('Missing path', 400)
76 | try {
77 | await fs.unlink(path)
78 | return c.text('OK')
79 | } catch (e) {
80 | return c.text('Not found', 404)
81 | }
82 | })
83 |
84 | fsRoutes.post('/mkdir', async (c) => {
85 | const fs = c.get('fs')
86 | const path = c.req.query('path')
87 | if (!path) return c.text('Missing path', 400)
88 | try {
89 | await fs.mkdir(path)
90 | return c.text('OK')
91 | } catch (e) {
92 | return c.text('Error: ' + (e instanceof Error ? e.message : String(e)), 400)
93 | }
94 | })
95 |
96 | fsRoutes.post('/rmdir', async (c) => {
97 | const fs = c.get('fs')
98 | const path = c.req.query('path')
99 | if (!path) return c.text('Missing path', 400)
100 | try {
101 | await fs.rmdir(path)
102 | return c.text('OK')
103 | } catch (e) {
104 | return c.text('Error: ' + (e instanceof Error ? e.message : String(e)), 400)
105 | }
106 | })
107 |
108 | fsRoutes.post('/mv', async (c) => {
109 | const fs = c.get('fs')
110 | const src = c.req.query('src')
111 | const dest = c.req.query('dest')
112 | if (!src || !dest) return c.text('Missing src or dest', 400)
113 | try {
114 | await fs.rename(src, dest)
115 | return c.text('OK')
116 | } catch (e) {
117 | return c.text('Error: ' + (e instanceof Error ? e.message : String(e)), 400)
118 | }
119 | })
120 |
121 | fsRoutes.post('/symlink', async (c) => {
122 | const fs = c.get('fs')
123 | const target = c.req.query('target')
124 | const path = c.req.query('path')
125 | if (!target || !path) return c.text('Missing target or path', 400)
126 | try {
127 | await fs.symlink(target, path)
128 | return c.text('OK')
129 | } catch (e) {
130 | return c.text('Error: ' + (e instanceof Error ? e.message : String(e)), 400)
131 | }
132 | })
133 |
134 | fsRoutes.get('/stat', async (c) => {
135 | const fs = c.get('fs')
136 | const path = c.req.query('path')
137 | if (!path) return c.text('Missing path', 400)
138 | try {
139 | const stat = await fs.stat(path)
140 | return c.json(stat)
141 | } catch (e) {
142 | return c.text('Error: ' + (e instanceof Error ? e.message : String(e)), 400)
143 | }
144 | })
145 |
146 | fsRoutes.get('/df', async (c) => {
147 | const fs = c.get('fs')
148 | const stats = await fs.getDeviceStats()
149 | return c.json(stats)
150 | })
151 |
152 | return fsRoutes
153 | }
154 |
--------------------------------------------------------------------------------
/packages/dofs-rust-client/src/main.rs:
--------------------------------------------------------------------------------
1 | use fuser::{MountOption};
2 | use ctrlc;
3 | use std::process::Command;
4 | use std::fs;
5 | use log::info;
6 | use simplelog::*;
7 | mod fusefs;
8 | mod providers;
9 | use fusefs::FuseFS;
10 | use providers::memory::MemoryProvider;
11 | use providers::sqlite_simple::SqliteProvider as SqliteSimpleProvider;
12 | use providers::sqlite_chunked::SqliteChunkedProvider;
13 | use clap::{Parser, Subcommand};
14 |
15 | #[derive(Parser, Debug)]
16 | #[command(author, version, about, long_about = None)]
17 | struct Cli {
18 | #[command(subcommand)]
19 | command: Commands,
20 | }
21 |
22 | #[derive(Subcommand, Debug)]
23 | enum Commands {
24 | /// Mount the filesystem
25 | Mount {
26 | #[arg(long, default_value = "memory")]
27 | provider: String,
28 | #[arg(long, default_value_t = false)]
29 | mode_osx: bool,
30 | #[arg(long, default_value_t = 4096)]
31 | chunk_size: usize,
32 | #[arg(long, default_value = "./mnt")]
33 | mountpoint: String,
34 | #[arg(long, default_value = "")]
35 | db_path: String,
36 | },
37 | /// List available providers
38 | ListProviders,
39 | /// Show filesystem stats
40 | Stats {
41 | #[arg(long, default_value = "")]
42 | db_path: String,
43 | },
44 | }
45 |
46 | fn main() {
47 | TermLogger::init(LevelFilter::Info, Config::default(), TerminalMode::Mixed, ColorChoice::Auto).unwrap();
48 | let cli = Cli::parse();
49 |
50 | match cli.command {
51 | Commands::Mount { provider, mode_osx, chunk_size, mountpoint, db_path } => {
52 | let provider_name = provider.as_str();
53 | let osx_mode = mode_osx;
54 | let mountpoint = mountpoint.as_str();
55 | let db_path = if db_path.is_empty() {
56 | None
57 | } else {
58 | Some(db_path.as_str())
59 | };
60 | if std::path::Path::new(mountpoint).exists() {
61 | // Try to unmount in case it was left mounted from a previous panic
62 | let _ = Command::new("umount").arg(mountpoint).status();
63 | }
64 | if !std::path::Path::new(mountpoint).exists() {
65 | fs::create_dir_all(mountpoint).expect("Failed to create mountpoint");
66 | }
67 |
68 | // Setup Ctrl+C handler to unmount
69 | let mountpoint_string = mountpoint.to_string();
70 | ctrlc::set_handler(move || {
71 | eprintln!("\nReceived Ctrl+C, unmounting {}...", mountpoint_string);
72 | let status = Command::new("umount").arg(&mountpoint_string).status();
73 | match status {
74 | Ok(s) if s.success() => {
75 | eprintln!("Successfully unmounted {}", mountpoint_string);
76 | }
77 | Ok(s) => {
78 | eprintln!("umount exited with status: {}", s);
79 | }
80 | Err(e) => {
81 | eprintln!("Failed to run umount: {}", e);
82 | }
83 | }
84 | std::process::exit(0);
85 | }).expect("Error setting Ctrl+C handler");
86 |
87 | let fs: FuseFS = match provider_name {
88 | "sqlite_simple" => {
89 | println!("Using SQLite Simple provider");
90 | let db_file = db_path.unwrap_or("cf-fuse-simple.db");
91 | let sqlite = SqliteSimpleProvider::new_with_mode(db_file, osx_mode).expect("Failed to open SQLite DB");
92 | FuseFS::new(Box::new(sqlite))
93 | },
94 | "sqlite_chunked" => {
95 | println!("Using SQLite Chunked provider");
96 | let db_file = db_path.unwrap_or("cf-fuse-chunked.db");
97 | let sqlite = SqliteChunkedProvider::new_with_mode(db_file, osx_mode, chunk_size).expect("Failed to open SQLite DB");
98 | FuseFS::new(Box::new(sqlite))
99 | },
100 | _ => {
101 | println!("Using memory provider");
102 | FuseFS::new(Box::new(MemoryProvider::new_with_mode(osx_mode)))
103 | }
104 | };
105 | info!("Mounting FS at {} with provider {}", mountpoint, provider_name);
106 | fuser::mount2(fs, mountpoint, &[MountOption::FSName(format!("{}fs", provider_name)), MountOption::AutoUnmount]).unwrap();
107 | },
108 | Commands::ListProviders => {
109 | println!("Available providers:");
110 | println!(" memory - In-memory storage (default)");
111 | println!(" sqlite_simple - Simple SQLite storage");
112 | println!(" sqlite_chunked - Chunked SQLite storage");
113 | },
114 | Commands::Stats { db_path } => {
115 | if db_path.is_empty() {
116 | println!("Please specify a database path with --db-path");
117 | return;
118 | }
119 | println!("Stats for database: {}", db_path);
120 | // TODO: Implement stats command
121 | },
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/packages/dofs-rust-client/src/fusefs.rs:
--------------------------------------------------------------------------------
1 | use fuser::{Filesystem, Request, ReplyAttr, ReplyEntry, ReplyDirectory, ReplyData, ReplyCreate, ReplyWrite};
2 | use crate::providers::Provider;
3 | use std::ffi::OsStr;
4 | use std::time::{SystemTime, UNIX_EPOCH};
5 |
6 | pub struct FuseFS {
7 | pub provider: Box,
8 | mount_time_ms: u128,
9 | }
10 |
11 | impl FuseFS {
12 | pub fn new(provider: Box) -> Self {
13 | let mount_time_ms = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis();
14 | Self { provider, mount_time_ms }
15 | }
16 |
17 | fn fuse_ready_attr(&self) -> fuser::FileAttr {
18 | let mount_time = UNIX_EPOCH + std::time::Duration::from_millis(self.mount_time_ms as u64);
19 | fuser::FileAttr {
20 | ino: FUSE_READY_INO,
21 | size: self.mount_time_ms.to_string().len() as u64,
22 | blocks: 1,
23 | atime: mount_time,
24 | mtime: mount_time,
25 | ctime: mount_time,
26 | crtime: mount_time,
27 | kind: fuser::FileType::RegularFile,
28 | perm: 0o444,
29 | nlink: 1,
30 | uid: unsafe { libc::geteuid() },
31 | gid: unsafe { libc::getegid() },
32 | rdev: 0,
33 | flags: 0,
34 | blksize: 512,
35 | }
36 | }
37 | }
38 |
39 | const FUSE_READY_NAME: &str = ".fuse_ready";
40 | const FUSE_READY_INO: u64 = 2;
41 |
42 | impl Filesystem for FuseFS {
43 | fn rmdir(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty) {
44 | self.provider.rmdir(parent, name, reply)
45 | }
46 | fn open(&mut self, _req: &Request<'_>, ino: u64, _flags: i32, reply: fuser::ReplyOpen) {
47 | if ino == FUSE_READY_INO {
48 | reply.opened(0, 0);
49 | return;
50 | }
51 | self.provider.open(ino, reply)
52 | }
53 | fn flush(&mut self, _req: &Request<'_>, ino: u64, _fh: u64, _lock_owner: u64, reply: fuser::ReplyEmpty) {
54 | self.provider.flush(ino, reply)
55 | }
56 | fn release(&mut self, _req: &Request<'_>, ino: u64, _fh: u64, _flags: i32, _lock_owner: Option, _flush: bool, reply: fuser::ReplyEmpty) {
57 | self.provider.release(ino, reply)
58 | }
59 | fn setattr(&mut self, _req: &Request<'_>, ino: u64, mode: Option, uid: Option, gid: Option, size: Option, atime: Option, mtime: Option, ctime: Option, _fh: Option, crtime: Option, _chgtime: Option, _bkuptime: Option, flags: Option, reply: ReplyAttr) {
60 | self.provider.setattr(ino, mode, uid, gid, size, atime, mtime, ctime, crtime, flags, reply)
61 | }
62 | fn lookup(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, reply: ReplyEntry) {
63 | if parent == 1 && name.to_str() == Some(FUSE_READY_NAME) {
64 | let attr = self.fuse_ready_attr();
65 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0);
66 | return;
67 | }
68 | self.provider.lookup(parent, name, reply)
69 | }
70 | fn getattr(&mut self, _req: &Request<'_>, ino: u64, reply: ReplyAttr) {
71 | if ino == FUSE_READY_INO {
72 | let attr = self.fuse_ready_attr();
73 | reply.attr(&std::time::Duration::from_secs(1), &attr);
74 | return;
75 | }
76 | self.provider.getattr(ino, reply)
77 | }
78 | fn readdir(&mut self, _req: &Request<'_>, ino: u64, _fh: u64, offset: i64, reply: ReplyDirectory) {
79 | self.provider.readdir(ino, offset, reply)
80 | }
81 | fn mkdir(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, mode: u32, umask: u32, reply: ReplyEntry) {
82 | self.provider.mkdir(parent, name, mode, umask, reply)
83 | }
84 | fn create(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, mode: u32, flags: u32, umask: i32, reply: ReplyCreate) {
85 | self.provider.create(parent, name, mode, flags, umask, reply)
86 | }
87 | fn read(&mut self, _req: &Request<'_>, ino: u64, _fh: u64, offset: i64, size: u32, _flags: i32, _lock_owner: Option, reply: ReplyData) {
88 | if ino == FUSE_READY_INO {
89 | let data = self.mount_time_ms.to_string().into_bytes();
90 | let start = std::cmp::min(offset as usize, data.len());
91 | let end = std::cmp::min(start + size as usize, data.len());
92 | reply.data(&data[start..end]);
93 | return;
94 | }
95 | self.provider.read(ino, offset, size, reply)
96 | }
97 | fn write(&mut self, _req: &Request<'_>, ino: u64, _fh: u64, offset: i64, data: &[u8], _write_flags: u32, _flags: i32, _lock_owner: Option, reply: ReplyWrite) {
98 | self.provider.write(ino, offset, data, reply)
99 | }
100 | fn unlink(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty) {
101 | self.provider.unlink(parent, name, reply)
102 | }
103 | fn rename(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, newparent: u64, newname: &OsStr, flags: u32, reply: fuser::ReplyEmpty) {
104 | self.provider.rename(parent, name, newparent, newname, flags, reply)
105 | }
106 | fn symlink(&mut self, _req: &Request<'_>, parent: u64, name: &OsStr, link: &std::path::Path, reply: ReplyEntry) {
107 | self.provider.symlink(parent, name, link, reply)
108 | }
109 | fn readlink(&mut self, _req: &Request<'_>, ino: u64, reply: ReplyData) {
110 | self.provider.readlink(ino, reply)
111 | }
112 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Durable Objects File System (dofs)
2 |
3 | A filesystem-like API for Cloudflare Durable Objects, supporting streaming reads and writes with chunked storage.
4 |
5 | ## Features
6 |
7 | - File and directory operations (read, write, mkdir, rmdir, stat, etc.)
8 | - Efficient chunked storage for large files
9 | - Streaming read and write support via ReadableStream and WritableStream
10 | - Designed for use in Durable Objects (DOs)
11 |
12 | ## Basic Usage
13 |
14 | The recommended way to add dofs to your Durable Object is using the `@Dofs` decorator:
15 |
16 | ```ts
17 | import { DurableObject } from 'cloudflare:workers'
18 | import { Dofs } from 'dofs'
19 |
20 | @Dofs({ chunkSize: 256 * 1024 })
21 | export class MyDurableObject extends DurableObject {
22 | // Your custom methods here
23 | // Access filesystem via this.getFs()
24 | }
25 | ```
26 |
27 | The `@Dofs` decorator:
28 |
29 | - Automatically creates the `fs` property in your Durable Object
30 | - Adds a `getFs()` method to access the filesystem instance
31 | - Accepts the same configuration options as the `Fs` constructor
32 | - Works directly with classes extending `DurableObject`
33 |
34 | ### Alternative: Using withDofs Helper
35 |
36 | For cases where you need more control or are working with existing class hierarchies, you can use the `withDofs` helper:
37 |
38 | ```ts
39 | import { DurableObject } from 'cloudflare:workers'
40 | import { withDofs } from 'dofs'
41 |
42 | // Create a concrete base class first
43 | class MyDurableObjectBase extends DurableObject {
44 | constructor(ctx: DurableObjectState, env: Env) {
45 | super(ctx, env)
46 | }
47 | }
48 |
49 | // Then extend it with dofs
50 | export class MyDurableObject extends withDofs(MyDurableObjectBase) {
51 | // Your custom methods here
52 | }
53 |
54 | // Or with configuration options:
55 | export class MyDurableObject extends withDofs(MyDurableObjectBase, { chunkSize: 256 * 1024 }) {
56 | // Your custom methods here
57 | }
58 | ```
59 |
60 | **Important:** Due to TypeScript declaration generation limitations, `withDofs` requires a concrete base class. You cannot pass the abstract `DurableObject` class directly to `withDofs`.
61 |
62 | Both approaches provide the same functionality:
63 |
64 | - Automatically creates the `fs` property in your Durable Object
65 | - Adds a `getFs()` method to access the filesystem instance
66 | - Accepts the same configuration options as the `Fs` constructor
67 |
68 | > Note: class instances can be [passed via RPC](https://developers.cloudflare.com/workers/runtime-apis/rpc/#class-instances) as long as they inherit from `RpcTarget` as `Fs` does.
69 |
70 | ### Advanced: Manual Setup
71 |
72 | For more control, you can manually create a dofs instance in your Durable Object:
73 |
74 | ```ts
75 | import { DurableObject } from 'cloudflare:workers'
76 | import { Fs } from 'dofs'
77 |
78 | export class MyDurableObject extends DurableObject {
79 | private fs: Fs
80 |
81 | constructor(ctx: DurableObjectState, env: Env) {
82 | super(ctx, env)
83 | this.fs = new Fs(ctx, env)
84 | }
85 |
86 | // Expose fs
87 | public getDofs() {
88 | return this.fs
89 | }
90 | }
91 | ```
92 |
93 | ## Configuration Options
94 |
95 | ### Chunk Size
96 |
97 | By default, the chunk size is 64kb. You can configure it by passing the `chunkSize` option (in bytes) to the `Fs` constructor:
98 |
99 | ```ts
100 | import { Fs } from 'dofs'
101 |
102 | const fs = new Fs(ctx, env, { chunkSize: 256 * 1024 }) // 256kb chunks
103 | ```
104 |
105 | **How chunk size affects query frequency and cost:**
106 |
107 | - Smaller chunk sizes mean more database queries per file read/write, which can increase Durable Object query costs and latency.
108 | - Larger chunk sizes reduce the number of queries (lower cost, better throughput), but may use more memory per operation and can be less efficient for small files or random access.
109 | - Choose a chunk size that balances your workload's cost, performance, and memory needs.
110 |
111 | > **Note:** Chunk size cannot be changed after the first file has been written to the filesystem. It is fixed for the lifetime of the filesystem instance.
112 |
113 | ### Device Size
114 |
115 | By default, the device size (total storage available) is 1GB (`1024 * 1024 * 1024` bytes). You can change this limit using the `setDeviceSize` method:
116 |
117 | ```ts
118 | fs.setDeviceSize(10 * 1024 * 1024 * 1024) // Set device size to 10GB
119 | ```
120 |
121 | - The device size must be set before writing data that would exceed the current limit.
122 | - If you try to write more data than the device size allows, an `ENOSPC` error will be thrown.
123 | - You can check the current device size and usage with `getDeviceStats()`.
124 |
125 | ```ts
126 | const stats = fs.getDeviceStats()
127 | console.log(stats.deviceSize, stats.spaceUsed, stats.spaceAvailable)
128 | ```
129 |
130 | > **Default:** 1GB if not set.
131 |
132 | ## Streaming Support
133 |
134 | - **Read:** `readFile(path)` returns a `ReadableStream` for efficient, chunked reading.
135 | - **Write:** `writeFile(path, stream)` accepts a `ReadableStream` for efficient, chunked writing.
136 | - You can also use `writeFile(path, data)` with a string or ArrayBuffer for non-streaming writes.
137 |
138 | ## API Reference
139 |
140 | **Note:** These are async from the CF Worker stub (RPC call), but are sync when called inside the Durable Object (direct call).
141 |
142 | - `readFile(path: string): ReadableStream`
143 | - `writeFile(path: string, data: string | ArrayBuffer | ReadableStream): void`
144 | - `read(path: string, options): ArrayBuffer` (non-streaming, offset/length)
145 | - `write(path: string, data, options): void` (non-streaming, offset)
146 | - `mkdir(path: string, options?): void`
147 | - `rmdir(path: string, options?): void`
148 | - `listDir(path: string, options?): string[]`
149 | - `stat(path: string): Stat`
150 | - `unlink(path: string): void`
151 | - `rename(oldPath: string, newPath: string): void`
152 | - `symlink(target: string, path: string): void`
153 | - `readlink(path: string): string`
154 |
155 | ## Projects that work with dofs
156 |
157 | - dterm
158 |
159 | ## Future Plans
160 |
161 | - In-memory block caching for improved read/write performance
162 | - Store small files (that fit in one block) directly in the inode table instead of the chunk table to reduce queries
163 | - `defrag()` method to allow changing chunk size and optimizing storage
164 |
--------------------------------------------------------------------------------
/packages/dofs/README.md:
--------------------------------------------------------------------------------
1 | # Durable Objects File System (dofs)
2 |
3 | A filesystem-like API for Cloudflare Durable Objects, supporting streaming reads and writes with chunked storage.
4 |
5 | ## Features
6 |
7 | - File and directory operations (read, write, mkdir, rmdir, stat, etc.)
8 | - Efficient chunked storage for large files
9 | - Streaming read and write support via ReadableStream and WritableStream
10 | - Designed for use in Durable Objects (DOs)
11 |
12 | ## Basic Usage
13 |
14 | The recommended way to add dofs to your Durable Object is using the `@Dofs` decorator:
15 |
16 | ```ts
17 | import { DurableObject } from 'cloudflare:workers'
18 | import { Dofs } from 'dofs'
19 |
20 | @Dofs({ chunkSize: 256 * 1024 })
21 | export class MyDurableObject extends DurableObject {
22 | // Your custom methods here
23 | // Access filesystem via this.getFs()
24 | }
25 | ```
26 |
27 | The `@Dofs` decorator:
28 |
29 | - Automatically creates the `fs` property in your Durable Object
30 | - Adds a `getFs()` method to access the filesystem instance
31 | - Accepts the same configuration options as the `Fs` constructor
32 | - Works directly with classes extending `DurableObject`
33 |
34 | ### Alternative: Using withDofs Helper
35 |
36 | For cases where you need more control or are working with existing class hierarchies, you can use the `withDofs` helper:
37 |
38 | ```ts
39 | import { DurableObject } from 'cloudflare:workers'
40 | import { withDofs } from 'dofs'
41 |
42 | // Create a concrete base class first
43 | class MyDurableObjectBase extends DurableObject {
44 | constructor(ctx: DurableObjectState, env: Env) {
45 | super(ctx, env)
46 | }
47 | }
48 |
49 | // Then extend it with dofs
50 | export class MyDurableObject extends withDofs(MyDurableObjectBase) {
51 | // Your custom methods here
52 | }
53 |
54 | // Or with configuration options:
55 | export class MyDurableObject extends withDofs(MyDurableObjectBase, { chunkSize: 256 * 1024 }) {
56 | // Your custom methods here
57 | }
58 | ```
59 |
60 | **Important:** Due to TypeScript declaration generation limitations, `withDofs` requires a concrete base class. You cannot pass the abstract `DurableObject` class directly to `withDofs`.
61 |
62 | Both approaches provide the same functionality:
63 |
64 | - Automatically creates the `fs` property in your Durable Object
65 | - Adds a `getFs()` method to access the filesystem instance
66 | - Accepts the same configuration options as the `Fs` constructor
67 |
68 | > Note: class instances can be [passed via RPC](https://developers.cloudflare.com/workers/runtime-apis/rpc/#class-instances) as long as they inherit from `RpcTarget` as `Fs` does.
69 |
70 | ### Advanced: Manual Setup
71 |
72 | For more control, you can manually create a dofs instance in your Durable Object:
73 |
74 | ```ts
75 | import { DurableObject } from 'cloudflare:workers'
76 | import { Fs } from 'dofs'
77 |
78 | export class MyDurableObject extends DurableObject {
79 | private fs: Fs
80 |
81 | constructor(ctx: DurableObjectState, env: Env) {
82 | super(ctx, env)
83 | this.fs = new Fs(ctx, env)
84 | }
85 |
86 | // Expose fs
87 | public getDofs() {
88 | return this.fs
89 | }
90 | }
91 | ```
92 |
93 | ## Configuration Options
94 |
95 | ### Chunk Size
96 |
97 | By default, the chunk size is 64kb. You can configure it by passing the `chunkSize` option (in bytes) to the `Fs` constructor:
98 |
99 | ```ts
100 | import { Fs } from 'dofs'
101 |
102 | const fs = new Fs(ctx, env, { chunkSize: 256 * 1024 }) // 256kb chunks
103 | ```
104 |
105 | **How chunk size affects query frequency and cost:**
106 |
107 | - Smaller chunk sizes mean more database queries per file read/write, which can increase Durable Object query costs and latency.
108 | - Larger chunk sizes reduce the number of queries (lower cost, better throughput), but may use more memory per operation and can be less efficient for small files or random access.
109 | - Choose a chunk size that balances your workload's cost, performance, and memory needs.
110 |
111 | > **Note:** Chunk size cannot be changed after the first file has been written to the filesystem. It is fixed for the lifetime of the filesystem instance.
112 |
113 | ### Device Size
114 |
115 | By default, the device size (total storage available) is 1GB (`1024 * 1024 * 1024` bytes). You can change this limit using the `setDeviceSize` method:
116 |
117 | ```ts
118 | fs.setDeviceSize(10 * 1024 * 1024 * 1024) // Set device size to 10GB
119 | ```
120 |
121 | - The device size must be set before writing data that would exceed the current limit.
122 | - If you try to write more data than the device size allows, an `ENOSPC` error will be thrown.
123 | - You can check the current device size and usage with `getDeviceStats()`.
124 |
125 | ```ts
126 | const stats = fs.getDeviceStats()
127 | console.log(stats.deviceSize, stats.spaceUsed, stats.spaceAvailable)
128 | ```
129 |
130 | > **Default:** 1GB if not set.
131 |
132 | ## Streaming Support
133 |
134 | - **Read:** `readFile(path)` returns a `ReadableStream` for efficient, chunked reading.
135 | - **Write:** `writeFile(path, stream)` accepts a `ReadableStream` for efficient, chunked writing.
136 | - You can also use `writeFile(path, data)` with a string or ArrayBuffer for non-streaming writes.
137 |
138 | ## API Reference
139 |
140 | **Note:** These are async from the CF Worker stub (RPC call), but are sync when called inside the Durable Object (direct call).
141 |
142 | - `readFile(path: string): ReadableStream`
143 | - `writeFile(path: string, data: string | ArrayBuffer | ReadableStream): void`
144 | - `read(path: string, options): ArrayBuffer` (non-streaming, offset/length)
145 | - `write(path: string, data, options): void` (non-streaming, offset)
146 | - `mkdir(path: string, options?): void`
147 | - `rmdir(path: string, options?): void`
148 | - `listDir(path: string, options?): string[]`
149 | - `stat(path: string): Stat`
150 | - `unlink(path: string): void`
151 | - `rename(oldPath: string, newPath: string): void`
152 | - `symlink(target: string, path: string): void`
153 | - `readlink(path: string): string`
154 |
155 | ## Projects that work with dofs
156 |
157 | - dterm
158 |
159 | ## Future Plans
160 |
161 | - In-memory block caching for improved read/write performance
162 | - Store small files (that fit in one block) directly in the inode table instead of the chunk table to reduce queries
163 | - `defrag()` method to allow changing chunk size and optimizing storage
164 |
--------------------------------------------------------------------------------
/packages/dofs-rust-client/src/providers/memory.rs:
--------------------------------------------------------------------------------
1 | use std::collections::{HashMap, BTreeMap};
2 | use std::path::PathBuf;
3 | use std::ffi::OsStr;
4 | use std::time::SystemTime;
5 | use fuser;
6 | use crate::providers::Provider;
7 |
8 | const ROOT_INODE: u64 = 1;
9 | const USER_INODE_START: u64 = 10;
10 |
11 | #[derive(Debug, Clone)]
12 | pub struct InMemoryFile {
13 | pub data: Vec,
14 | pub attr: fuser::FileAttr,
15 | }
16 |
17 | #[derive(Debug, Clone)]
18 | pub struct InMemoryDir {
19 | pub children: BTreeMap,
20 | pub attr: fuser::FileAttr,
21 | }
22 |
23 | #[derive(Debug, Clone)]
24 | pub struct InMemorySymlink {
25 | pub target: String,
26 | pub attr: fuser::FileAttr,
27 | }
28 |
29 | #[derive(Debug, Clone)]
30 | pub enum Node {
31 | File(InMemoryFile),
32 | Dir(InMemoryDir),
33 | Symlink(InMemorySymlink),
34 | }
35 |
36 | pub struct MemoryProvider {
37 | pub inodes: HashMap,
38 | #[allow(dead_code)]
39 | pub paths: HashMap,
40 | pub next_inode: u64,
41 | #[allow(dead_code)]
42 | pub xattrs: HashMap<(u64, String), Vec>,
43 | pub osx_mode: bool,
44 | }
45 |
46 | impl MemoryProvider {
47 | #[allow(dead_code)]
48 | pub fn new() -> Self {
49 | Self::new_with_mode(false)
50 | }
51 | pub fn new_with_mode(osx_mode: bool) -> Self {
52 | let mut inodes = HashMap::new();
53 | let mut paths = HashMap::new();
54 | let now = SystemTime::now();
55 | let root_attr = fuser::FileAttr {
56 | ino: ROOT_INODE,
57 | size: 0,
58 | blocks: 0,
59 | atime: now,
60 | mtime: now,
61 | ctime: now,
62 | crtime: now,
63 | kind: fuser::FileType::Directory,
64 | perm: 0o755,
65 | nlink: 2,
66 | uid: unsafe { libc::geteuid() },
67 | gid: unsafe { libc::getegid() },
68 | rdev: 0,
69 | flags: 0,
70 | blksize: 512,
71 | };
72 | let root = Node::Dir(InMemoryDir {
73 | children: BTreeMap::new(),
74 | attr: root_attr,
75 | });
76 | inodes.insert(ROOT_INODE, root);
77 | paths.insert(PathBuf::from("/"), ROOT_INODE);
78 | Self { inodes, paths, next_inode: USER_INODE_START, xattrs: HashMap::new(), osx_mode }
79 | }
80 | pub fn alloc_inode(&mut self) -> u64 {
81 | let ino = self.next_inode;
82 | self.next_inode += 1;
83 | ino
84 | }
85 | }
86 |
87 | impl Provider for MemoryProvider {
88 | fn rmdir(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty) {
89 | let name_str = name.to_str().unwrap_or("");
90 | let target_ino = if let Some(Node::Dir(parent_dir)) = self.inodes.get(&parent) {
91 | parent_dir.children.get(name_str).copied()
92 | } else {
93 | reply.error(libc::ENOENT);
94 | return;
95 | };
96 | let ino = match target_ino {
97 | Some(ino) => ino,
98 | None => {
99 | reply.error(libc::ENOENT);
100 | return;
101 | }
102 | };
103 | let is_empty_dir = if let Some(Node::Dir(dir)) = self.inodes.get(&ino) {
104 | dir.children.is_empty()
105 | } else {
106 | reply.error(libc::ENOTDIR);
107 | return;
108 | };
109 | if !is_empty_dir {
110 | reply.error(libc::ENOTEMPTY);
111 | return;
112 | }
113 | if let Some(Node::Dir(parent_dir)) = self.inodes.get_mut(&parent) {
114 | parent_dir.children.remove(name_str);
115 | }
116 | self.inodes.remove(&ino);
117 | reply.ok();
118 | }
119 | fn open(&mut self, ino: u64, reply: fuser::ReplyOpen) {
120 | if self.inodes.contains_key(&ino) {
121 | reply.opened(0, 0);
122 | } else {
123 | reply.error(libc::ENOENT);
124 | }
125 | }
126 | fn flush(&mut self, ino: u64, reply: fuser::ReplyEmpty) {
127 | if self.inodes.contains_key(&ino) {
128 | reply.ok();
129 | } else {
130 | reply.error(libc::ENOENT);
131 | }
132 | }
133 | fn release(&mut self, ino: u64, reply: fuser::ReplyEmpty) {
134 | if self.inodes.contains_key(&ino) {
135 | reply.ok();
136 | } else {
137 | reply.error(libc::ENOENT);
138 | }
139 | }
140 | fn setattr(&mut self, ino: u64, mode: Option, uid: Option, gid: Option, size: Option, atime: Option, mtime: Option, ctime: Option, crtime: Option, flags: Option, reply: fuser::ReplyAttr) {
141 | fn timeornow_to_systemtime(t: fuser::TimeOrNow) -> SystemTime {
142 | match t {
143 | fuser::TimeOrNow::SpecificTime(st) => st,
144 | fuser::TimeOrNow::Now => SystemTime::now(),
145 | }
146 | }
147 | fn safe_systemtime(t: SystemTime) -> SystemTime {
148 | // Ensure timestamp is within valid range
149 | let now = SystemTime::now();
150 | if let Ok(duration_since_epoch) = t.duration_since(std::time::UNIX_EPOCH) {
151 | if duration_since_epoch.as_secs() > now.duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs() + (100 * 365 * 24 * 3600) {
152 | now
153 | } else {
154 | t
155 | }
156 | } else {
157 | std::time::UNIX_EPOCH
158 | }
159 | }
160 | if let Some(node) = self.inodes.get_mut(&ino) {
161 | match node {
162 | Node::File(f) => {
163 | if let Some(new_size) = size {
164 | f.data.resize(new_size as usize, 0);
165 | f.attr.size = new_size;
166 | }
167 | if let Some(m) = mode { f.attr.perm = m as u16; }
168 | if let Some(u) = uid { f.attr.uid = u; }
169 | if let Some(g) = gid { f.attr.gid = g; }
170 | if let Some(a) = atime { f.attr.atime = timeornow_to_systemtime(a); }
171 | if let Some(m) = mtime { f.attr.mtime = timeornow_to_systemtime(m); }
172 | if let Some(c) = ctime { f.attr.ctime = safe_systemtime(c); }
173 | if let Some(cr) = crtime { f.attr.crtime = safe_systemtime(cr); }
174 | if let Some(fg) = flags { f.attr.flags = fg; }
175 | reply.attr(&std::time::Duration::from_secs(1), &f.attr);
176 | }
177 | Node::Dir(d) => {
178 | if let Some(m) = mode { d.attr.perm = m as u16; }
179 | if let Some(u) = uid { d.attr.uid = u; }
180 | if let Some(g) = gid { d.attr.gid = g; }
181 | if let Some(a) = atime { d.attr.atime = timeornow_to_systemtime(a); }
182 | if let Some(m) = mtime { d.attr.mtime = timeornow_to_systemtime(m); }
183 | if let Some(c) = ctime { d.attr.ctime = safe_systemtime(c); }
184 | if let Some(cr) = crtime { d.attr.crtime = safe_systemtime(cr); }
185 | if let Some(fg) = flags { d.attr.flags = fg; }
186 | reply.attr(&std::time::Duration::from_secs(1), &d.attr);
187 | }
188 | Node::Symlink(s) => {
189 | if let Some(m) = mode { s.attr.perm = m as u16; }
190 | if let Some(u) = uid { s.attr.uid = u; }
191 | if let Some(g) = gid { s.attr.gid = g; }
192 | if let Some(a) = atime { s.attr.atime = timeornow_to_systemtime(a); }
193 | if let Some(m) = mtime { s.attr.mtime = timeornow_to_systemtime(m); }
194 | if let Some(c) = ctime { s.attr.ctime = safe_systemtime(c); }
195 | if let Some(cr) = crtime { s.attr.crtime = safe_systemtime(cr); }
196 | if let Some(fg) = flags { s.attr.flags = fg; }
197 | reply.attr(&std::time::Duration::from_secs(1), &s.attr);
198 | }
199 | }
200 | } else {
201 | reply.error(libc::ENOENT);
202 | }
203 | }
204 | fn lookup(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEntry) {
205 | let name = name.to_str().unwrap_or("");
206 | let parent_node = self.inodes.get(&parent);
207 | if let Some(Node::Dir(dir)) = parent_node {
208 | if let Some(&child_ino) = dir.children.get(name) {
209 | if let Some(node) = self.inodes.get(&child_ino) {
210 | let attr = match node {
211 | Node::File(f) => f.attr,
212 | Node::Dir(d) => d.attr,
213 | Node::Symlink(s) => s.attr,
214 | };
215 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0);
216 | return;
217 | }
218 | }
219 | }
220 | reply.error(libc::ENOENT);
221 | }
222 | fn getattr(&mut self, ino: u64, reply: fuser::ReplyAttr) {
223 | if let Some(node) = self.inodes.get(&ino) {
224 | let attr = match node {
225 | Node::File(f) => f.attr,
226 | Node::Dir(d) => d.attr,
227 | Node::Symlink(s) => s.attr,
228 | };
229 | reply.attr(&std::time::Duration::from_secs(1), &attr);
230 | } else {
231 | reply.error(libc::ENOENT);
232 | }
233 | }
234 | fn readdir(&mut self, ino: u64, offset: i64, mut reply: fuser::ReplyDirectory) {
235 | if let Some(Node::Dir(dir)) = self.inodes.get(&ino) {
236 | let mut entries = vec![(ROOT_INODE, fuser::FileType::Directory, ".".to_string()), (ROOT_INODE, fuser::FileType::Directory, "..".to_string())];
237 | for (name, &child_ino) in &dir.children {
238 | if self.osx_mode && name.starts_with("._") {
239 | continue;
240 | }
241 | let node = self.inodes.get(&child_ino).unwrap();
242 | let kind = match node {
243 | Node::File(_) => fuser::FileType::RegularFile,
244 | Node::Dir(_) => fuser::FileType::Directory,
245 | Node::Symlink(_) => fuser::FileType::Symlink,
246 | };
247 | entries.push((child_ino, kind, name.clone()));
248 | }
249 | for (i, (ino, kind, name)) in entries.into_iter().enumerate().skip(offset as usize) {
250 | if reply.add(ino, (i + 1) as i64, kind, name) {
251 | break;
252 | }
253 | }
254 | reply.ok();
255 | } else {
256 | reply.error(libc::ENOENT);
257 | }
258 | }
259 | fn mkdir(&mut self, parent: u64, name: &OsStr, mode: u32, umask: u32, reply: fuser::ReplyEntry) {
260 | let name_str = name.to_str().unwrap_or("");
261 | if self.osx_mode && name_str.starts_with("._") {
262 | reply.error(libc::EACCES);
263 | return;
264 | }
265 | let already_exists = if let Some(Node::Dir(dir)) = self.inodes.get(&parent) {
266 | dir.children.contains_key(name_str)
267 | } else {
268 | reply.error(libc::ENOENT);
269 | return;
270 | };
271 | if already_exists {
272 | reply.error(libc::EEXIST);
273 | return;
274 | }
275 | let ino = self.alloc_inode();
276 | let now = SystemTime::now();
277 | let attr = fuser::FileAttr {
278 | ino,
279 | size: 0,
280 | blocks: 0,
281 | atime: now,
282 | mtime: now,
283 | ctime: now,
284 | crtime: now,
285 | kind: fuser::FileType::Directory,
286 | perm: (mode & !umask & 0o7777) as u16,
287 | nlink: 2,
288 | uid: unsafe { libc::geteuid() },
289 | gid: unsafe { libc::getegid() },
290 | rdev: 0,
291 | flags: 0,
292 | blksize: 512,
293 | };
294 | let new_dir = Node::Dir(InMemoryDir {
295 | children: BTreeMap::new(),
296 | attr,
297 | });
298 | if let Some(Node::Dir(dir)) = self.inodes.get_mut(&parent) {
299 | dir.children.insert(name_str.to_string(), ino);
300 | }
301 | self.inodes.insert(ino, new_dir);
302 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0);
303 | }
304 | fn create(&mut self, parent: u64, name: &OsStr, mode: u32, _flags: u32, umask: i32, reply: fuser::ReplyCreate) {
305 | let name_str = name.to_str().unwrap_or("");
306 | if self.osx_mode && name_str.starts_with("._") {
307 | reply.error(libc::EACCES);
308 | return;
309 | }
310 | let already_exists = if let Some(Node::Dir(dir)) = self.inodes.get(&parent) {
311 | dir.children.contains_key(name_str)
312 | } else {
313 | reply.error(libc::ENOENT);
314 | return;
315 | };
316 | if already_exists {
317 | reply.error(libc::EEXIST);
318 | return;
319 | }
320 | let ino = self.alloc_inode();
321 | let now = SystemTime::now();
322 | let attr = fuser::FileAttr {
323 | ino,
324 | size: 0,
325 | blocks: 0,
326 | atime: now,
327 | mtime: now,
328 | ctime: now,
329 | crtime: now,
330 | kind: fuser::FileType::RegularFile,
331 | perm: (mode & !(umask as u32) & 0o7777) as u16,
332 | nlink: 1,
333 | uid: unsafe { libc::geteuid() },
334 | gid: unsafe { libc::getegid() },
335 | rdev: 0,
336 | flags: 0,
337 | blksize: 512,
338 | };
339 | let new_file = Node::File(InMemoryFile {
340 | data: vec![],
341 | attr,
342 | });
343 | if let Some(Node::Dir(dir)) = self.inodes.get_mut(&parent) {
344 | dir.children.insert(name_str.to_string(), ino);
345 | }
346 | self.inodes.insert(ino, new_file);
347 | reply.created(&std::time::Duration::from_secs(1), &attr, 0, 0, 0);
348 | }
349 | fn read(&mut self, ino: u64, offset: i64, size: u32, reply: fuser::ReplyData) {
350 | if let Some(Node::File(file)) = self.inodes.get(&ino) {
351 | let data = &file.data;
352 | let end = std::cmp::min((offset as usize) + (size as usize), data.len());
353 | let start = std::cmp::min(offset as usize, data.len());
354 | reply.data(&data[start..end]);
355 | } else {
356 | reply.error(libc::ENOENT);
357 | }
358 | }
359 | fn write(&mut self, ino: u64, offset: i64, data: &[u8], reply: fuser::ReplyWrite) {
360 | if let Some(Node::File(file)) = self.inodes.get_mut(&ino) {
361 | let offset = offset as usize;
362 | if file.data.len() < offset + data.len() {
363 | file.data.resize(offset + data.len(), 0);
364 | }
365 | file.data[offset..offset + data.len()].copy_from_slice(data);
366 | file.attr.size = file.data.len() as u64;
367 | reply.written(data.len() as u32);
368 | } else {
369 | reply.error(libc::ENOENT);
370 | }
371 | }
372 | fn unlink(&mut self, parent: u64, name: &OsStr, reply: fuser::ReplyEmpty) {
373 | let name_str = name.to_str().unwrap_or("");
374 | let target_ino = if let Some(Node::Dir(parent_dir)) = self.inodes.get(&parent) {
375 | parent_dir.children.get(name_str).copied()
376 | } else {
377 | reply.error(libc::ENOENT);
378 | return;
379 | };
380 | let ino = match target_ino {
381 | Some(ino) => ino,
382 | None => {
383 | reply.error(libc::ENOENT);
384 | return;
385 | }
386 | };
387 | match self.inodes.get(&ino) {
388 | Some(Node::File(_)) | Some(Node::Symlink(_)) => {
389 | if let Some(Node::Dir(parent_dir)) = self.inodes.get_mut(&parent) {
390 | parent_dir.children.remove(name_str);
391 | }
392 | self.inodes.remove(&ino);
393 | reply.ok();
394 | }
395 | Some(Node::Dir(_)) => {
396 | reply.error(libc::EISDIR);
397 | }
398 | None => {
399 | reply.error(libc::ENOENT);
400 | }
401 | }
402 | }
403 | fn rename(&mut self, parent: u64, name: &OsStr, newparent: u64, newname: &OsStr, _flags: u32, reply: fuser::ReplyEmpty) {
404 | let name_str = name.to_str().unwrap_or("");
405 | let newname_str = newname.to_str().unwrap_or("");
406 | // Get source parent dir
407 | let src_is_dir = matches!(self.inodes.get(&parent), Some(Node::Dir(_)));
408 | let dst_is_dir = matches!(self.inodes.get(&newparent), Some(Node::Dir(_)));
409 | if !src_is_dir || !dst_is_dir {
410 | reply.error(libc::ENOTDIR);
411 | return;
412 | }
413 | // Check source exists and get inode
414 | let ino = {
415 | let src_parent = match self.inodes.get(&parent) {
416 | Some(Node::Dir(dir)) => dir,
417 | _ => { reply.error(libc::ENOTDIR); return; }
418 | };
419 | match src_parent.children.get(name_str) {
420 | Some(&ino) => ino,
421 | None => { reply.error(libc::ENOENT); return; }
422 | }
423 | };
424 | // Check dest exists
425 | let dest_exists = {
426 | let dst_parent = match self.inodes.get(&newparent) {
427 | Some(Node::Dir(dir)) => dir,
428 | _ => { reply.error(libc::ENOTDIR); return; }
429 | };
430 | dst_parent.children.contains_key(newname_str)
431 | };
432 | if dest_exists {
433 | reply.error(libc::EEXIST);
434 | return;
435 | }
436 | // Now do the mutation
437 | if let Some(Node::Dir(src_parent)) = self.inodes.get_mut(&parent) {
438 | src_parent.children.remove(name_str);
439 | }
440 | if let Some(Node::Dir(dst_parent)) = self.inodes.get_mut(&newparent) {
441 | dst_parent.children.insert(newname_str.to_string(), ino);
442 | }
443 | reply.ok();
444 | }
445 | fn symlink(&mut self, parent: u64, name: &OsStr, link: &std::path::Path, reply: fuser::ReplyEntry) {
446 | let name_str = name.to_str().unwrap_or("");
447 | if self.osx_mode && name_str.starts_with("._") {
448 | reply.error(libc::EACCES);
449 | return;
450 | }
451 | let already_exists = if let Some(Node::Dir(dir)) = self.inodes.get(&parent) {
452 | dir.children.contains_key(name_str)
453 | } else {
454 | reply.error(libc::ENOENT);
455 | return;
456 | };
457 | if already_exists {
458 | reply.error(libc::EEXIST);
459 | return;
460 | }
461 | let ino = self.alloc_inode();
462 | let now = SystemTime::now();
463 | let target = link.to_string_lossy().to_string();
464 | let attr = fuser::FileAttr {
465 | ino,
466 | size: target.len() as u64,
467 | blocks: 0,
468 | atime: now,
469 | mtime: now,
470 | ctime: now,
471 | crtime: now,
472 | kind: fuser::FileType::Symlink,
473 | perm: 0o777,
474 | nlink: 1,
475 | uid: unsafe { libc::geteuid() },
476 | gid: unsafe { libc::getegid() },
477 | rdev: 0,
478 | flags: 0,
479 | blksize: 512,
480 | };
481 | let symlink = Node::Symlink(InMemorySymlink { target, attr });
482 | if let Some(Node::Dir(dir)) = self.inodes.get_mut(&parent) {
483 | dir.children.insert(name_str.to_string(), ino);
484 | }
485 | self.inodes.insert(ino, symlink);
486 | reply.entry(&std::time::Duration::from_secs(1), &attr, 0);
487 | }
488 | fn readlink(&mut self, ino: u64, reply: fuser::ReplyData) {
489 | if let Some(Node::Symlink(s)) = self.inodes.get(&ino) {
490 | reply.data(s.target.as_bytes());
491 | } else {
492 | reply.error(libc::EINVAL);
493 | }
494 | }
495 | }
--------------------------------------------------------------------------------
/packages/dofs-rust-client/tests/integration_stress.rs:
--------------------------------------------------------------------------------
1 | use std::process::{Command, Stdio};
2 | use std::time::{Duration, Instant};
3 | use std::fs::{self, File, create_dir, read_dir, remove_dir, OpenOptions, rename, remove_file, metadata};
4 | use std::io::{Read, Write};
5 | use std::io::Seek;
6 | use prettytable::{Table, Row, Cell};
7 | use libc;
8 | use std::os::unix::fs::symlink;
9 | use rand::{Rng, SeedableRng};
10 | use std::sync::{Arc, Barrier};
11 | use std::thread;
12 |
13 | const MOUNTPOINT: &str = "./mnt";
14 | const TEST_FILE: &str = "./mnt/testfile";
15 | const TEST_DIR: &str = "./mnt/testdir";
16 |
17 | #[derive(Clone)]
18 | struct ProviderTestResult {
19 | elapsed: Duration,
20 | success: bool,
21 | error: Option,
22 | }
23 |
24 | struct StressTest {
25 | name: &'static str,
26 | func: fn() -> Result<(), String>,
27 | skip_providers: Option<&'static [&'static str]>,
28 | }
29 |
30 | fn run_fuse_with_provider(provider: &str, db_path: Option<&str>) -> std::process::Child {
31 | let mut cmd = Command::new("cargo");
32 | cmd.args(["run", "--quiet", "--", "--mode-osx", "--provider", provider]);
33 | if let Some(path) = db_path {
34 | cmd.args(["--db-path", path]);
35 | }
36 | cmd.stdout(Stdio::null())
37 | .stderr(Stdio::null())
38 | .spawn()
39 | .expect("Failed to start fuse process")
40 | }
41 |
42 | fn wait_for_mount() {
43 | for _ in 0..40 {
44 | if let Ok(mut file) = File::open(format!("{}/.fuse_ready", MOUNTPOINT)) {
45 | let mut contents = String::new();
46 | if file.read_to_string(&mut contents).is_ok() {
47 | println!("Found .fuse_ready with contents: {}", contents);
48 | return;
49 | }
50 | }
51 | std::thread::sleep(Duration::from_millis(100));
52 | }
53 | panic!("Mountpoint not available or .fuse_ready not present");
54 | }
55 |
56 | fn wait_for_unmount() {
57 | for _ in 0..40 {
58 | if std::fs::metadata(format!("{}/.fuse_ready", MOUNTPOINT)).is_err() {
59 | return;
60 | }
61 | std::thread::sleep(Duration::from_millis(100));
62 | }
63 | panic!("Mountpoint still present or .fuse_ready still exists");
64 | }
65 |
66 | fn clean_setup(db_path: Option<&str>) {
67 | let _ = fs::remove_file("cf-fuse-simple.db");
68 | let _ = fs::remove_file("cf-fuse-chunked.db");
69 | if let Some(path) = db_path {
70 | let _ = fs::remove_file(path);
71 | }
72 | let _ = fs::remove_dir_all(MOUNTPOINT);
73 | let _ = fs::create_dir_all(MOUNTPOINT);
74 | }
75 |
76 | fn file_create_write_read_delete() -> Result<(), String> {
77 | // Create file
78 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?;
79 | // Write data
80 | let data = vec![42u8; 1024 * 1024];
81 | file.write_all(&data).map_err(|e| format!("write: {e}"))?;
82 | drop(file);
83 | // Read data
84 | let mut file = File::open(TEST_FILE).map_err(|e| format!("open: {e}"))?;
85 | let mut buf = Vec::new();
86 | file.read_to_end(&mut buf).map_err(|e| format!("read: {e}"))?;
87 | if buf != data {
88 | return Err("data mismatch".to_string());
89 | }
90 | // Remove file
91 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?;
92 | Ok(())
93 | }
94 |
95 | fn dir_create_list_delete() -> Result<(), String> {
96 | // Create directory
97 | create_dir(TEST_DIR).map_err(|e| format!("create_dir: {e}"))?;
98 | // List directory
99 | let entries: Vec<_> = read_dir("./mnt").map_err(|e| format!("read_dir: {e}"))?.collect();
100 | if !entries.iter().filter_map(|e| e.as_ref().ok()).any(|e| e.file_name() == "testdir") {
101 | return Err("directory not found in listing".to_string());
102 | }
103 | // Remove directory
104 | remove_dir(TEST_DIR).map_err(|e| format!("remove_dir: {e}"))?;
105 | Ok(())
106 | }
107 |
108 | fn file_append_read_delete() -> Result<(), String> {
109 | // Create file and write initial data
110 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?;
111 | let data1 = vec![1u8; 512 * 1024];
112 | file.write_all(&data1).map_err(|e| format!("write1: {e}"))?;
113 | drop(file);
114 | // Append data
115 | let mut file = OpenOptions::new().append(true).open(TEST_FILE).map_err(|e| format!("open append: {e}"))?;
116 | let data2 = vec![2u8; 512 * 1024];
117 | file.write_all(&data2).map_err(|e| format!("write2: {e}"))?;
118 | drop(file);
119 | // Read back and check
120 | let mut file = File::open(TEST_FILE).map_err(|e| format!("open: {e}"))?;
121 | let mut buf = Vec::new();
122 | file.read_to_end(&mut buf).map_err(|e| format!("read: {e}"))?;
123 | if buf.len() != 1024 * 1024 || &buf[..512*1024] != &data1[..] || &buf[512*1024..] != &data2[..] {
124 | return Err("data mismatch after append".to_string());
125 | }
126 | // Remove file
127 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?;
128 | Ok(())
129 | }
130 |
131 | fn file_truncate_shrink_read_delete() -> Result<(), String> {
132 | use std::fs::OpenOptions;
133 | // Create file and write data
134 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?;
135 | let data = vec![7u8; 1024 * 1024];
136 | file.write_all(&data).map_err(|e| format!("write: {e}"))?;
137 | drop(file);
138 | // Truncate to half
139 | let file = OpenOptions::new().write(true).open(TEST_FILE).map_err(|e| format!("open: {e}"))?;
140 | file.set_len(512 * 1024).map_err(|e| format!("truncate: {e}"))?;
141 | drop(file);
142 | // Read back and check
143 | let mut file = File::open(TEST_FILE).map_err(|e| format!("open: {e}"))?;
144 | let mut buf = Vec::new();
145 | file.read_to_end(&mut buf).map_err(|e| format!("read: {e}"))?;
146 | if buf.len() != 512 * 1024 || !buf.iter().all(|&b| b == 7) {
147 | return Err("data mismatch after truncate".to_string());
148 | }
149 | // Remove file
150 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?;
151 | Ok(())
152 | }
153 |
154 | fn file_truncate_grow_read_delete() -> Result<(), String> {
155 | use std::fs::OpenOptions;
156 | // Create file and write small data
157 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?;
158 | let data = vec![9u8; 512 * 1024];
159 | file.write_all(&data).map_err(|e| format!("write: {e}"))?;
160 | drop(file);
161 | // Grow file to 1MB
162 | let file = OpenOptions::new().write(true).open(TEST_FILE).map_err(|e| format!("open: {e}"))?;
163 | file.set_len(1024 * 1024).map_err(|e| format!("truncate: {e}"))?;
164 | drop(file);
165 | // Read back and check
166 | let mut file = File::open(TEST_FILE).map_err(|e| format!("open: {e}"))?;
167 | let mut buf = Vec::new();
168 | file.read_to_end(&mut buf).map_err(|e| format!("read: {e}"))?;
169 | if buf.len() != 1024 * 1024 || &buf[..512*1024] != &data[..] || !buf[512*1024..].iter().all(|&b| b == 0) {
170 | return Err("data mismatch after grow".to_string());
171 | }
172 | // Remove file
173 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?;
174 | Ok(())
175 | }
176 |
177 | fn file_rename_check_delete() -> Result<(), String> {
178 | const RENAMED_FILE: &str = "./mnt/testfile_renamed";
179 | // Create file
180 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?;
181 | file.write_all(b"hello").map_err(|e| format!("write: {e}"))?;
182 | drop(file);
183 | // Rename file
184 | rename(TEST_FILE, RENAMED_FILE).map_err(|e| format!("rename: {e}"))?;
185 | // Check new name exists
186 | metadata(RENAMED_FILE).map_err(|e| format!("metadata: {e}"))?;
187 | // Remove file
188 | remove_file(RENAMED_FILE).map_err(|e| format!("remove: {e}"))?;
189 | Ok(())
190 | }
191 |
192 | fn symlink_create_read_delete() -> Result<(), String> {
193 | const SYMLINK_PATH: &str = "./mnt/testfile_symlink";
194 | // Create file to point to
195 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?;
196 | file.write_all(b"symlink target").map_err(|e| format!("write: {e}"))?;
197 | drop(file);
198 | // Create symlink
199 | symlink(TEST_FILE, SYMLINK_PATH).map_err(|e| format!("symlink: {e}"))?;
200 | // Read symlink
201 | let target = fs::read_link(SYMLINK_PATH).map_err(|e| format!("read_link: {e}"))?;
202 | if target != std::path::Path::new(TEST_FILE) {
203 | return Err("symlink target mismatch".to_string());
204 | }
205 | // Remove symlink
206 | fs::remove_file(SYMLINK_PATH).map_err(|e| format!("remove symlink: {e}"))?;
207 | // Remove target file
208 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?;
209 | Ok(())
210 | }
211 |
212 | fn file_create_write_read_delete_size(size: usize) -> Result<(), String> {
213 | // Create file
214 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?;
215 | // Write data of given size
216 | let data = vec![55u8; size];
217 | file.write_all(&data).map_err(|e| format!("write: {e}"))?;
218 | drop(file);
219 | // Read data
220 | let mut file = File::open(TEST_FILE).map_err(|e| format!("open: {e}"))?;
221 | let mut buf = Vec::new();
222 | file.read_to_end(&mut buf).map_err(|e| format!("read: {e}"))?;
223 | if buf != data {
224 | return Err("data mismatch".to_string());
225 | }
226 | drop(file);
227 | // Random access write: overwrite 10 random positions with unique values
228 | let mut rng = rand::rngs::StdRng::seed_from_u64(42);
229 | let mut file = std::fs::OpenOptions::new().read(true).write(true).open(TEST_FILE).map_err(|e| format!("open for random write: {e}"))?;
230 | let mut random_indices = vec![];
231 | for i in 0..10 {
232 | let idx = rng.gen_range(0..size);
233 | random_indices.push(idx);
234 | file.seek(std::io::SeekFrom::Start(idx as u64)).map_err(|e| format!("seek: {e}"))?;
235 | file.write_all(&[i as u8]).map_err(|e| format!("random write: {e}"))?;
236 | }
237 | drop(file);
238 | // Random access read: verify the 10 random positions
239 | let mut file = std::fs::OpenOptions::new().read(true).open(TEST_FILE).map_err(|e| format!("open for random read: {e}"))?;
240 | for (i, &idx) in random_indices.iter().enumerate() {
241 | file.seek(std::io::SeekFrom::Start(idx as u64)).map_err(|e| format!("seek: {e}"))?;
242 | let mut b = [0u8; 1];
243 | file.read_exact(&mut b).map_err(|e| format!("random read: {e}"))?;
244 | if b[0] != i as u8 {
245 | return Err(format!("random access data mismatch at {idx}: expected {} got {}", i as u8, b[0]));
246 | }
247 | }
248 | drop(file);
249 | // Remove file
250 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?;
251 | Ok(())
252 | }
253 |
254 | fn file_create_write_read_delete_large() -> Result<(), String> {
255 | // 100MB
256 | file_create_write_read_delete_size(100 * 1024 * 1024)
257 | }
258 |
259 | fn concurrent_file_access() -> Result<(), String> {
260 | let num_threads = 8;
261 | let iterations = 1000;
262 | let barrier = Arc::new(Barrier::new(num_threads));
263 | // Create file
264 | let mut file = File::create(TEST_FILE).map_err(|e| format!("create: {e}"))?;
265 | file.write_all(&[0u8; 4096]).map_err(|e| format!("init write: {e}"))?;
266 | drop(file);
267 | let mut handles = vec![];
268 | for tid in 0..num_threads {
269 | let barrier = barrier.clone();
270 | handles.push(thread::spawn(move || {
271 | barrier.wait();
272 | for i in 0..iterations {
273 | let mut file = OpenOptions::new().read(true).write(true).open(TEST_FILE).map_err(|e| format!("open: {e}"))?;
274 | let pos = ((tid * 512 + i) % 4096) as u64;
275 | file.seek(std::io::SeekFrom::Start(pos)).map_err(|e| format!("seek: {e}"))?;
276 | let val = (tid as u8) ^ (i as u8);
277 | file.write_all(&[val]).map_err(|e| format!("write: {e}"))?;
278 | }
279 | Ok::<(), String>(())
280 | }));
281 | }
282 | for h in handles {
283 | h.join().map_err(|_| "thread panic".to_string())??;
284 | }
285 | fs::remove_file(TEST_FILE).map_err(|e| format!("remove: {e}"))?;
286 | Ok(())
287 | }
288 |
289 | fn dir_rename_check_delete() -> Result<(), String> {
290 | const DIR1: &str = "./mnt/testdir1";
291 | const DIR2: &str = "./mnt/testdir2";
292 | const FILE_IN_DIR: &str = "./mnt/testdir1/file";
293 | // Create directory and file inside
294 | create_dir(DIR1).map_err(|e| format!("create_dir: {e}"))?;
295 | let mut file = File::create(FILE_IN_DIR).map_err(|e| format!("create file: {e}"))?;
296 | file.write_all(b"dir rename test").map_err(|e| format!("write: {e}"))?;
297 | drop(file);
298 | // Rename directory
299 | rename(DIR1, DIR2).map_err(|e| format!("rename dir: {e}"))?;
300 | // Check file is accessible at new path
301 | let mut file = File::open("./mnt/testdir2/file").map_err(|e| format!("open after rename: {e}"))?;
302 | let mut buf = String::new();
303 | file.read_to_string(&mut buf).map_err(|e| format!("read: {e}"))?;
304 | if buf != "dir rename test" {
305 | return Err("file content mismatch after dir rename".to_string());
306 | }
307 | // Remove file and directory
308 | fs::remove_file("./mnt/testdir2/file").map_err(|e| format!("remove file: {e}"))?;
309 | remove_dir(DIR2).map_err(|e| format!("remove dir: {e}"))?;
310 | Ok(())
311 | }
312 |
313 | fn nested_dir_create_write_read_recursive_delete() -> Result<(), String> {
314 | let dir1 = "./mnt/dir1";
315 | let dir2 = "./mnt/dir1/dir2";
316 | let dir3 = "./mnt/dir1/dir2/dir3";
317 | let file1 = format!("{}/file1", dir1);
318 | let file2 = format!("{}/file2", dir2);
319 | let file3 = format!("{}/file3", dir3);
320 | // Create nested directories
321 | fs::create_dir_all(&dir3).map_err(|e| format!("create_dir_all: {e}"))?;
322 | // Create files at each level
323 | let mut f1 = File::create(&file1).map_err(|e| format!("create file1: {e}"))?;
324 | let mut f2 = File::create(&file2).map_err(|e| format!("create file2: {e}"))?;
325 | let mut f3 = File::create(&file3).map_err(|e| format!("create file3: {e}"))?;
326 | f1.write_all(b"file1 data").map_err(|e| format!("write file1: {e}"))?;
327 | f2.write_all(b"file2 data").map_err(|e| format!("write file2: {e}"))?;
328 | f3.write_all(b"file3 data").map_err(|e| format!("write file3: {e}"))?;
329 | drop((f1, f2, f3));
330 | // Read back and check
331 | let mut buf = String::new();
332 | File::open(&file1).map_err(|e| format!("open file1: {e}"))?.read_to_string(&mut buf).map_err(|e| format!("read file1: {e}"))?;
333 | if buf != "file1 data" { return Err("file1 content mismatch".to_string()); }
334 | buf.clear();
335 | File::open(&file2).map_err(|e| format!("open file2: {e}"))?.read_to_string(&mut buf).map_err(|e| format!("read file2: {e}"))?;
336 | if buf != "file2 data" { return Err("file2 content mismatch".to_string()); }
337 | buf.clear();
338 | File::open(&file3).map_err(|e| format!("open file3: {e}"))?.read_to_string(&mut buf).map_err(|e| format!("read file3: {e}"))?;
339 | if buf != "file3 data" { return Err("file3 content mismatch".to_string()); }
340 | // Recursively delete top-level directory
341 | fs::remove_dir_all(dir1).map_err(|e| format!("remove_dir_all: {e}"))?;
342 | // Verify all gone
343 | if fs::metadata(dir1).is_ok() || fs::metadata(dir2).is_ok() || fs::metadata(dir3).is_ok() {
344 | return Err("directories not fully deleted".to_string());
345 | }
346 | if fs::metadata(&file1).is_ok() || fs::metadata(&file2).is_ok() || fs::metadata(&file3).is_ok() {
347 | return Err("files not fully deleted".to_string());
348 | }
349 | Ok(())
350 | }
351 |
352 | #[test]
353 | fn integration_stress() {
354 | let providers = [
355 | ("memory", "MemoryProvider", None),
356 | ("sqlite_simple", "SqliteSimpleProvider", Some("test-sqlite-simple.db")),
357 | ("sqlite_chunked", "SqliteChunkedProvider", Some("test-sqlite-chunked.db")),
358 | ];
359 | let stress_tests = [
360 | StressTest { name: "file_create_write_read_delete", func: file_create_write_read_delete, skip_providers: None },
361 | StressTest { name: "file_create_write_read_delete_large", func: file_create_write_read_delete_large, skip_providers: Some(&["sqlite_simple"]) },
362 | StressTest { name: "dir_create_list_delete", func: dir_create_list_delete, skip_providers: None },
363 | StressTest { name: "file_append_read_delete", func: file_append_read_delete, skip_providers: None },
364 | StressTest { name: "file_truncate_shrink_read_delete", func: file_truncate_shrink_read_delete, skip_providers: None },
365 | StressTest { name: "file_truncate_grow_read_delete", func: file_truncate_grow_read_delete, skip_providers: None },
366 | StressTest { name: "file_rename_check_delete", func: file_rename_check_delete, skip_providers: None },
367 | StressTest { name: "symlink_create_read_delete", func: symlink_create_read_delete, skip_providers: None },
368 | StressTest { name: "concurrent_file_access", func: concurrent_file_access, skip_providers: None },
369 | StressTest { name: "dir_rename_check_delete", func: dir_rename_check_delete, skip_providers: None },
370 | StressTest { name: "nested_dir_create_write_read_recursive_delete", func: nested_dir_create_write_read_recursive_delete, skip_providers: None },
371 | // Add more tests here
372 | ];
373 | let mut results = vec![vec![]; stress_tests.len()];
374 | for (prov_idx, (prov, prov_name, db_path)) in providers.iter().enumerate() {
375 | clean_setup(*db_path);
376 | let mut child = run_fuse_with_provider(prov, *db_path);
377 | wait_for_mount();
378 | for (test_idx, test) in stress_tests.iter().enumerate() {
379 | // Skip test for this provider if listed
380 | if let Some(skips) = test.skip_providers {
381 | if skips.iter().any(|&s| s == providers[prov_idx].0) {
382 | results[test_idx].push(ProviderTestResult {
383 | elapsed: Duration::from_micros(0),
384 | success: true,
385 | error: None,
386 | });
387 | continue;
388 | }
389 | }
390 | println!("running test: {} with provider: {}", test.name, prov_name);
391 | let start = Instant::now();
392 | let (success, error) = match (test.func)() {
393 | Ok(_) => (true, None),
394 | Err(e) => (false, Some(e)),
395 | };
396 | let elapsed = start.elapsed();
397 | results[test_idx].push(ProviderTestResult {
398 | elapsed,
399 | success,
400 | error,
401 | });
402 | }
403 | unsafe {
404 | libc::kill(child.id() as i32, libc::SIGINT);
405 | }
406 | let _ = child.wait();
407 | wait_for_unmount();
408 | }
409 | // Print summary table
410 | let mut table = Table::new();
411 | let mut header = vec!["operation".to_string()];
412 | for (_, prov_name, _) in providers.iter() {
413 | header.push(format!("{} (μs)", prov_name));
414 | }
415 | table.add_row(Row::new(header.iter().map(|s| Cell::new(s)).collect()));
416 | for (test_idx, test) in stress_tests.iter().enumerate() {
417 | let mut cells = vec![test.name.to_string()];
418 | // Collect all elapsed times for this test row (only successful and not skipped ones)
419 | let times: Vec