├── ic-oss.webp ├── ic-oss-sequence.webp ├── src ├── ic_oss_ts │ ├── eslint.config.js │ ├── .prettierrc.json │ ├── src │ │ ├── index.ts │ │ ├── types.ts │ │ ├── queue.ts │ │ ├── stream.test.ts │ │ ├── uploader.ts │ │ ├── cluster.canister.ts │ │ ├── bucket.canister.ts │ │ └── stream.ts │ ├── tsconfig.json │ ├── README.md │ ├── LICENSE │ ├── candid │ │ ├── ic_oss_bucket │ │ │ ├── index.js │ │ │ └── index.d.ts │ │ ├── ic_oss_cluster │ │ │ ├── index.js │ │ │ └── index.d.ts │ │ └── ic_object_store_canister │ │ │ ├── index.js │ │ │ ├── index.d.ts │ │ │ ├── ic_object_store_canister.did │ │ │ └── ic_object_store_canister.did.d.ts │ ├── package.json │ └── .eslintrc.js ├── ic_oss │ ├── src │ │ ├── lib.rs │ │ ├── agent.rs │ │ └── cluster.rs │ ├── Cargo.toml │ └── README.md ├── ic_object_store_canister │ ├── src │ │ ├── lib.rs │ │ ├── api_init.rs │ │ └── api_admin.rs │ ├── Cargo.toml │ ├── README.md │ └── ic_object_store_canister.did ├── ic_object_store │ ├── src │ │ ├── lib.rs │ │ └── agent.rs │ ├── Cargo.toml │ └── README.md ├── ic_oss_can │ ├── Cargo.toml │ ├── src │ │ ├── lib.rs │ │ └── types.rs │ └── README.md ├── ic_oss_cli │ ├── Cargo.toml │ ├── README.md │ └── src │ │ └── file.rs ├── ic_oss_types │ ├── Cargo.toml │ ├── README.md │ └── src │ │ ├── cluster.rs │ │ ├── lib.rs │ │ ├── folder.rs │ │ └── bucket.rs ├── ic_oss_cluster │ ├── Cargo.toml │ ├── src │ │ ├── api_auth.rs │ │ ├── schnorr.rs │ │ ├── ecdsa.rs │ │ ├── api_query.rs │ │ ├── init.rs │ │ └── lib.rs │ └── README.md ├── ic_oss_bucket │ ├── src │ │ ├── lib.rs │ │ ├── api_admin.rs │ │ └── api_init.rs │ ├── Cargo.toml │ └── README.md └── declarations │ ├── ic_oss_bucket │ ├── index.js │ └── index.d.ts │ ├── ic_oss_cluster │ ├── index.js │ └── index.d.ts │ └── ic_object_store_canister │ ├── index.js │ ├── index.d.ts │ ├── ic_object_store_canister.did │ └── ic_object_store_canister.did.d.ts ├── .prettierrc.json ├── examples ├── ai_canister │ ├── dfx.json │ ├── Makefile │ ├── Cargo.toml │ ├── README.md │ └── ai_canister.did ├── upload_js │ ├── package.json │ ├── README.md │ └── upload.js └── video_player │ └── index.html ├── .github └── workflows │ ├── publish-crates.yml │ ├── test.yml │ └── release.yml ├── .gitignore ├── canister_ids.json ├── package.json ├── tsconfig.json ├── Makefile ├── dfx.json ├── LICENSE-MIT ├── Cargo.toml ├── docs └── access_control.md └── README.md /ic-oss.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ldclabs/ic-oss/HEAD/ic-oss.webp -------------------------------------------------------------------------------- /ic-oss-sequence.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ldclabs/ic-oss/HEAD/ic-oss-sequence.webp -------------------------------------------------------------------------------- /src/ic_oss_ts/eslint.config.js: -------------------------------------------------------------------------------- 1 | import eslintConfigPrettier from "eslint-config-prettier" 2 | 3 | export default [ 4 | eslintConfigPrettier, 5 | ] -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "htmlWhitespaceSensitivity": "strict", 3 | "quoteProps": "preserve", 4 | "semi": false, 5 | "trailingComma": "none", 6 | "singleQuote": true 7 | } -------------------------------------------------------------------------------- /src/ic_oss/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod agent; 2 | pub mod bucket; 3 | pub mod cluster; 4 | 5 | #[cfg(test)] 6 | mod tests { 7 | 8 | #[test] 9 | fn it_works() {} 10 | } 11 | -------------------------------------------------------------------------------- /src/ic_oss_ts/.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "htmlWhitespaceSensitivity": "strict", 3 | "quoteProps": "preserve", 4 | "semi": false, 5 | "trailingComma": "none", 6 | "singleQuote": true 7 | } -------------------------------------------------------------------------------- /examples/ai_canister/dfx.json: -------------------------------------------------------------------------------- 1 | { 2 | "canisters": { 3 | "ai_canister": { 4 | "candid": "ai_canister.did", 5 | "package": "ai_canister", 6 | "type": "rust" 7 | } 8 | }, 9 | "defaults": { 10 | "build": { 11 | "args": "", 12 | "packtool": "" 13 | } 14 | }, 15 | "output_env_file": ".env", 16 | "version": 1 17 | } -------------------------------------------------------------------------------- /.github/workflows/publish-crates.yml: -------------------------------------------------------------------------------- 1 | name: Crates 2 | on: 3 | push: 4 | tags: 5 | - 'v*' 6 | jobs: 7 | publish: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | - uses: katyo/publish-crates@v2 12 | with: 13 | registry-token: ${{ secrets.CARGO_REGISTRY_TOKEN }} 14 | ignore-unpublished-changes: true 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Various IDEs and Editors 2 | .vscode/ 3 | .idea/ 4 | **/*~ 5 | 6 | # Mac OSX temporary files 7 | .DS_Store 8 | **/.DS_Store 9 | 10 | # dfx temporary files 11 | .dfx/ 12 | 13 | # generated files 14 | # **/declarations/ 15 | 16 | # rust 17 | target/ 18 | 19 | # frontend code 20 | pnpm-lock.yaml 21 | node_modules/ 22 | dist/ 23 | .svelte-kit/ 24 | 25 | # environment variables 26 | .env 27 | *.pem 28 | debug -------------------------------------------------------------------------------- /canister_ids.json: -------------------------------------------------------------------------------- 1 | { 2 | "__Candid_UI": { 3 | "local": "bkyz2-fmaaa-aaaaa-qaaaq-cai" 4 | }, 5 | "ic_object_store_canister": { 6 | "ic": "6at64-oyaaa-aaaap-anvza-cai", 7 | "local": "6at64-oyaaa-aaaap-anvza-cai" 8 | }, 9 | "ic_oss_bucket": { 10 | "ic": "mmrxu-fqaaa-aaaap-ahhna-cai", 11 | "local": "mmrxu-fqaaa-aaaap-ahhna-cai" 12 | }, 13 | "ic_oss_cluster": { 14 | "ic": "x5573-nqaaa-aaaap-ahopq-cai", 15 | "local": "x5573-nqaaa-aaaap-ahopq-cai" 16 | } 17 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "engines": { 3 | "node": ">=20.0.0" 4 | }, 5 | "name": "ic-oss", 6 | "scripts": { 7 | "build": "npm run build --workspaces --if-present", 8 | "prebuild": "npm run prebuild --workspaces --if-present", 9 | "pretest": "npm run prebuild --workspaces --if-present", 10 | "start": "npm start --workspaces --if-present", 11 | "test": "npm test --workspaces --if-present" 12 | }, 13 | "type": "module", 14 | "workspaces": [ 15 | "src/ic_oss_ts" 16 | ] 17 | } -------------------------------------------------------------------------------- /examples/upload_js/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "upload_js", 3 | "version": "1.0.0", 4 | "type": "module", 5 | "module": "ESNext", 6 | "main": "upload.js", 7 | "scripts": { 8 | "test": "echo \"Error: no test specified\" && exit 1" 9 | }, 10 | "author": "", 11 | "license": "ISC", 12 | "description": "", 13 | "dependencies": { 14 | "@dfinity/agent": "^3.4.3", 15 | "@dfinity/identity": "^3.4.3", 16 | "@dfinity/principal": "^3.4.3", 17 | "@dfinity/utils": "^4.0.1", 18 | "@ldclabs/ic_oss_ts": "^1.2.3" 19 | } 20 | } -------------------------------------------------------------------------------- /examples/ai_canister/Makefile: -------------------------------------------------------------------------------- 1 | BUILD_ENV := rust 2 | 3 | .PHONY: build-wasm build-did 4 | 5 | lint: 6 | @cargo fmt 7 | @cargo clippy --all-targets --all-features 8 | 9 | fix: 10 | @cargo clippy --fix --workspace --tests 11 | 12 | test: 13 | @cargo test --workspace -- --nocapture 14 | 15 | # cargo install ic-wasm 16 | build-wasm: 17 | @cargo build --release --target wasm32-unknown-unknown --package ai_canister 18 | 19 | # cargo install candid-extractor 20 | build-did: 21 | candid-extractor ../../target/wasm32-unknown-unknown/release/ai_canister.wasm > ai_canister.did 22 | -------------------------------------------------------------------------------- /src/ic_object_store_canister/src/lib.rs: -------------------------------------------------------------------------------- 1 | use candid::Principal; 2 | use ic_oss_types::object_store::*; 3 | use serde_bytes::ByteBuf; 4 | use std::collections::BTreeSet; 5 | 6 | mod api; 7 | mod api_admin; 8 | mod api_init; 9 | mod store; 10 | 11 | use api_init::InstallArgs; 12 | 13 | fn is_controller() -> Result<(), String> { 14 | let caller = ic_cdk::api::msg_caller(); 15 | if ic_cdk::api::is_controller(&caller) || store::state::is_controller(&caller) { 16 | Ok(()) 17 | } else { 18 | Err("user is not a controller".to_string()) 19 | } 20 | } 21 | 22 | ic_cdk::export_candid!(); 23 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: 3 | push: 4 | branches: ['main'] 5 | pull_request: 6 | branches: ['main'] 7 | jobs: 8 | test: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | - uses: dfinity/setup-dfx@main 13 | - name: cargo test 14 | run: | 15 | cargo clippy --verbose --all-targets --all-features 16 | cargo test --verbose --workspace -- --nocapture 17 | - name: npm test 18 | run: | 19 | cd src/ic_oss_ts 20 | npm install -g pnpm 21 | pnpm install 22 | pnpm test 23 | -------------------------------------------------------------------------------- /src/ic_object_store/src/lib.rs: -------------------------------------------------------------------------------- 1 | use rand::RngCore; 2 | 3 | pub mod agent; 4 | pub mod client; 5 | 6 | pub use agent::*; 7 | pub use client::*; 8 | 9 | /// Generates an array of random bytes of specified size. 10 | /// 11 | /// # Examples 12 | /// ``` 13 | /// use ic_object_store::rand_bytes; 14 | /// 15 | /// let random_bytes: [u8; 32] = rand_bytes(); 16 | /// assert_eq!(random_bytes.len(), 32); 17 | /// ``` 18 | pub fn rand_bytes() -> [u8; N] { 19 | let mut rng = rand::rng(); 20 | let mut bytes = [0u8; N]; 21 | rng.fill_bytes(&mut bytes); 22 | bytes 23 | } 24 | 25 | #[cfg(test)] 26 | mod tests { 27 | 28 | #[test] 29 | fn it_works() {} 30 | } 31 | -------------------------------------------------------------------------------- /src/ic_oss_can/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ic-oss-can" 3 | description = "A Rust library for implementing large file storage in ICP canisters" 4 | publish = true 5 | repository = "https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_can" 6 | version.workspace = true 7 | edition.workspace = true 8 | keywords.workspace = true 9 | categories.workspace = true 10 | license.workspace = true 11 | 12 | [dependencies] 13 | candid = { workspace = true } 14 | serde = { workspace = true } 15 | serde_bytes = { workspace = true } 16 | ciborium = { workspace = true } 17 | ic-cdk = { workspace = true } 18 | ic-stable-structures = { workspace = true } 19 | ic-oss-types = { path = "../ic_oss_types", version = "1" } 20 | -------------------------------------------------------------------------------- /src/ic_oss_ts/src/index.ts: -------------------------------------------------------------------------------- 1 | export type { 2 | BucketInfo, 3 | CreateFileInput, 4 | CreateFileOutput, 5 | CreateFolderInput, 6 | FileInfo, 7 | FolderInfo, 8 | FolderName, 9 | MoveInput, 10 | UpdateBucketInput, 11 | UpdateFileChunkInput, 12 | UpdateFileChunkOutput, 13 | UpdateFileInput, 14 | UpdateFileOutput, 15 | UpdateFolderInput 16 | } from '../candid/ic_oss_bucket/ic_oss_bucket.did.js' 17 | export type { 18 | ClusterInfo, 19 | Token 20 | } from '../candid/ic_oss_cluster/ic_oss_cluster.did.js' 21 | export * from './bucket.canister.js' 22 | export * from './cluster.canister.js' 23 | export * from './queue.js' 24 | export * from './stream.js' 25 | export * from './types.js' 26 | export * from './uploader.js' 27 | -------------------------------------------------------------------------------- /src/ic_oss_ts/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ESNext", 4 | "outDir": "./dist", 5 | "useDefineForClassFields": true, 6 | "lib": ["DOM", "DOM.Iterable", "ESNext", "ESNext.AsyncIterable"], 7 | "allowJs": false, 8 | "skipLibCheck": true, 9 | "allowSyntheticDefaultImports": true, 10 | "strict": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "module": "ESNext", 13 | "moduleResolution": "bundler", 14 | "resolveJsonModule": true, 15 | "isolatedModules": true, 16 | "sourceMap": true, 17 | "declaration": true, 18 | "types": ["@types/node"] 19 | }, 20 | "include": ["src/**/*.ts"], 21 | "exclude": ["src/**/*.test.ts", "node_modules", "dist", "debug"] 22 | } 23 | -------------------------------------------------------------------------------- /src/ic_oss_ts/README.md: -------------------------------------------------------------------------------- 1 | # `@ldclabs/ic_oss_ts` 2 | ![License](https://img.shields.io/crates/l/ic-oss.svg) 3 | [![Test](https://github.com/ldclabs/ic-oss/actions/workflows/test.yml/badge.svg)](https://github.com/ldclabs/ic-oss/actions/workflows/test.yml) 4 | [![NPM version](http://img.shields.io/npm/v/@ldclabs/ic_oss_ts.svg)](https://www.npmjs.com/package/@ldclabs/ic_oss_ts) 5 | 6 | [ic-oss](https://github.com/ldclabs/ic-oss) is a decentralized Object Storage Service on the Internet Computer. 7 | 8 | `@ldclabs/ic_oss_ts` is the Typescript version of the client SDK for the ic-oss cluster. 9 | 10 | ## License 11 | 12 | Copyright © 2024-2025 [LDC Labs](https://github.com/ldclabs). 13 | 14 | Licensed under the MIT License. See [LICENSE](../../LICENSE-MIT) for details. -------------------------------------------------------------------------------- /src/ic_oss/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ic-oss" 3 | description = "The Rust version of the client SDK for the ic-oss cluster." 4 | publish = true 5 | repository = "https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss" 6 | version.workspace = true 7 | edition.workspace = true 8 | keywords.workspace = true 9 | categories.workspace = true 10 | license.workspace = true 11 | 12 | [dependencies] 13 | bytes = { workspace = true } 14 | candid = { workspace = true } 15 | serde = { workspace = true } 16 | serde_bytes = { workspace = true } 17 | tokio = { workspace = true } 18 | tokio-util = { workspace = true } 19 | tokio-stream = { workspace = true } 20 | futures = { workspace = true } 21 | sha3 = { workspace = true } 22 | ic-agent = { workspace = true } 23 | ic-oss-types = { path = "../ic_oss_types", version = "1" } 24 | -------------------------------------------------------------------------------- /examples/ai_canister/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ai_canister" 3 | version = "0.1.0" 4 | edition = "2021" 5 | publish = false 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [lib] 10 | crate-type = ["cdylib"] 11 | 12 | [dependencies] 13 | candid = { workspace = true } 14 | ic-cdk = { workspace = true } 15 | ic-cdk-timers = { workspace = true } 16 | ic-stable-structures = { workspace = true } 17 | ciborium = { workspace = true } 18 | serde = { workspace = true } 19 | serde_bytes = { workspace = true } 20 | rand = { version = "0.8", features = ["getrandom"] } 21 | ic-oss-types = { path = "../../src/ic_oss_types", version = "1" } 22 | ic-oss-can = { path = "../../src/ic_oss_can", version = "1" } 23 | 24 | [dependencies.getrandom] 25 | features = ["custom"] 26 | version = "0.2" 27 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowImportingTsExtensions": true, 4 | "allowJs": true, 5 | "declaration": true, 6 | "emitDecoratorMetadata": true, 7 | "esModuleInterop": true, 8 | "exactOptionalPropertyTypes": true, 9 | "experimentalDecorators": true, 10 | "forceConsistentCasingInFileNames": true, 11 | "importHelpers": true, 12 | "inlineSourceMap": true, 13 | "isolatedModules": true, 14 | "lib": ["ES2022", "ES2022.Intl", "DOM", "DOM.Iterable"], 15 | "module": "ES2022", 16 | "moduleResolution": "Bundler", 17 | "noEmit": true, 18 | "noFallthroughCasesInSwitch": true, 19 | "noImplicitReturns": true, 20 | "noPropertyAccessFromIndexSignature": true, 21 | "noUncheckedIndexedAccess": true, 22 | "noUnusedLocals": true, 23 | "resolveJsonModule": true, 24 | "skipLibCheck": true, 25 | "strict": true, 26 | "target": "ESNext" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/ic_oss_cli/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ic-oss-cli" 3 | description = "A command-line tool implemented in Rust for the ic-oss cluster." 4 | publish = true 5 | repository = "https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_cli" 6 | version.workspace = true 7 | edition.workspace = true 8 | keywords.workspace = true 9 | categories.workspace = true 10 | license.workspace = true 11 | 12 | [dependencies] 13 | candid = { workspace = true, features = ["value", "printer"] } 14 | serde_bytes = { workspace = true } 15 | tokio = { workspace = true } 16 | sha3 = { workspace = true } 17 | hex = { workspace = true } 18 | ic-agent = { workspace = true } 19 | ic-oss = { path = "../ic_oss", version = "1" } 20 | ic-oss-types = { path = "../ic_oss_types", version = "1" } 21 | anyhow = "1" 22 | clap = { version = "=4.5", features = ["derive"] } 23 | infer = "0.15" 24 | mime-db = "1" 25 | ring = "0.17" 26 | pem = "3" 27 | chrono = { version = "0.4", features = ["serde"] } 28 | -------------------------------------------------------------------------------- /src/ic_oss_types/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ic-oss-types" 3 | description = "A Rust types library used for integrating with ic-oss cluster." 4 | publish = true 5 | repository = "https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_types" 6 | version.workspace = true 7 | edition.workspace = true 8 | keywords.workspace = true 9 | categories.workspace = true 10 | license.workspace = true 11 | 12 | [dependencies] 13 | base64 = { workspace = true } 14 | candid = { workspace = true } 15 | hex = { workspace = true } 16 | serde = { workspace = true } 17 | serde_bytes = { workspace = true } 18 | crc32fast = { workspace = true } 19 | num-traits = { workspace = true } 20 | url = { workspace = true } 21 | ciborium = { workspace = true } 22 | icrc-ledger-types = { workspace = true } 23 | k256 = { workspace = true } 24 | ed25519-dalek = { workspace = true } 25 | sha2 = { workspace = true } 26 | sha3 = { workspace = true } 27 | coset = { workspace = true } 28 | thiserror = { workspace = true } 29 | -------------------------------------------------------------------------------- /src/ic_oss_types/README.md: -------------------------------------------------------------------------------- 1 | # `ic-oss-types` 2 | ![License](https://img.shields.io/crates/l/ic-oss-types.svg) 3 | [![Crates.io](https://img.shields.io/crates/d/ic-oss-types.svg)](https://crates.io/crates/ic-oss-types) 4 | [![Test](https://github.com/ldclabs/ic-oss/actions/workflows/test.yml/badge.svg)](https://github.com/ldclabs/ic-oss/actions/workflows/test.yml) 5 | [![Docs.rs](https://img.shields.io/docsrs/ic-oss-types?label=docs.rs)](https://docs.rs/ic-oss-types) 6 | [![Latest Version](https://img.shields.io/crates/v/ic-oss-types.svg)](https://crates.io/crates/ic-oss-types) 7 | 8 | [ic-oss](https://github.com/ldclabs/ic-oss) is a decentralized Object Storage Service on the Internet Computer. 9 | 10 | `ic-oss-type` is a Rust types library used for integrating with [ic-oss](https://github.com/ldclabs/ic-oss) cluster. 11 | 12 | ## License 13 | 14 | Copyright © 2024-2025 [LDC Labs](https://github.com/ldclabs). 15 | 16 | Licensed under the MIT License. See [LICENSE](../../LICENSE-MIT) for details. -------------------------------------------------------------------------------- /src/ic_oss/README.md: -------------------------------------------------------------------------------- 1 | # `ic-oss` 2 | ![License](https://img.shields.io/crates/l/ic-oss.svg) 3 | [![Crates.io](https://img.shields.io/crates/d/ic-oss.svg)](https://crates.io/crates/ic-oss) 4 | [![Test](https://github.com/ldclabs/ic-oss/actions/workflows/test.yml/badge.svg)](https://github.com/ldclabs/ic-oss/actions/workflows/test.yml) 5 | [![Docs.rs](https://img.shields.io/docsrs/ic-oss?label=docs.rs)](https://docs.rs/ic-oss) 6 | [![Latest Version](https://img.shields.io/crates/v/ic-oss.svg)](https://crates.io/crates/ic-oss) 7 | 8 | [ic-oss](https://github.com/ldclabs/ic-oss) is a decentralized Object Storage Service on the Internet Computer. 9 | 10 | `ic-oss` is the Rust version of the client SDK for the ic-oss cluster. 11 | 12 | ## Documentation 13 | 14 | For detailed documentation, please visit: https://docs.rs/ic-oss 15 | 16 | ## License 17 | 18 | Copyright © 2024-2025 [LDC Labs](https://github.com/ldclabs). 19 | 20 | Licensed under the MIT License. See [LICENSE](../../LICENSE-MIT) for details. 21 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BUILD_ENV := rust 2 | 3 | .PHONY: build-wasm build-did 4 | 5 | lint: 6 | @cargo fmt 7 | @cargo clippy --all-targets --all-features 8 | 9 | fix: 10 | @cargo clippy --fix --workspace --tests 11 | 12 | test: 13 | @cargo test --workspace -- --nocapture 14 | 15 | # cargo install ic-wasm 16 | build-wasm: 17 | @cargo build --release --target wasm32-unknown-unknown --package ic_object_store_canister 18 | @cargo build --release --target wasm32-unknown-unknown --package ic_oss_bucket 19 | @cargo build --release --target wasm32-unknown-unknown --package ic_oss_cluster 20 | 21 | # cargo install candid-extractor 22 | build-did: 23 | candid-extractor target/wasm32-unknown-unknown/release/ic_object_store_canister.wasm > src/ic_object_store_canister/ic_object_store_canister.did 24 | candid-extractor target/wasm32-unknown-unknown/release/ic_oss_bucket.wasm > src/ic_oss_bucket/ic_oss_bucket.did 25 | candid-extractor target/wasm32-unknown-unknown/release/ic_oss_cluster.wasm > src/ic_oss_cluster/ic_oss_cluster.did 26 | dfx generate 27 | -------------------------------------------------------------------------------- /src/ic_object_store_canister/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ic_object_store_canister" 3 | description = "A Object Store of Apache Arrow on the Internet Computer." 4 | publish = false 5 | repository = "https://github.com/ldclabs/ic-oss/tree/main/src/ic_object_store_canister" 6 | version.workspace = true 7 | edition.workspace = true 8 | keywords.workspace = true 9 | categories.workspace = true 10 | license.workspace = true 11 | 12 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 13 | 14 | [lib] 15 | crate-type = ["cdylib"] 16 | 17 | [dependencies] 18 | ic-oss-types = { path = "../ic_oss_types", version = "1" } 19 | candid = { workspace = true, features = ["value", "printer"] } 20 | ciborium = { workspace = true } 21 | ic-cdk = { workspace = true } 22 | serde = { workspace = true } 23 | serde_bytes = { workspace = true } 24 | object_store = { workspace = true, default-features = false } 25 | ic-stable-structures = { workspace = true } 26 | ic-dummy-getrandom-for-wasm = { workspace = true } 27 | -------------------------------------------------------------------------------- /src/ic_object_store/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ic_object_store" 3 | description = "The Rust version of the client SDK for the IC Object Store canister." 4 | publish = true 5 | repository = "https://github.com/ldclabs/ic-oss/tree/main/src/ic_object_store" 6 | version = "1.3.0" 7 | edition.workspace = true 8 | keywords.workspace = true 9 | categories.workspace = true 10 | license.workspace = true 11 | 12 | [dependencies] 13 | ic-oss-types = { path = "../ic_oss_types", version = "1" } 14 | async-trait = { workspace = true } 15 | async-stream = { workspace = true } 16 | futures = { workspace = true } 17 | bytes = { workspace = true } 18 | candid = { workspace = true } 19 | serde_bytes = { workspace = true } 20 | rand = { workspace = true } 21 | ic-agent = { workspace = true } 22 | object_store = { workspace = true } 23 | chrono = { workspace = true } 24 | aes-gcm = { workspace = true } 25 | ic_cose_types = { workspace = true } 26 | 27 | [dev-dependencies] 28 | tokio = { workspace = true } 29 | object_store = { workspace = true, features = ["integration", "rand"] } 30 | -------------------------------------------------------------------------------- /dfx.json: -------------------------------------------------------------------------------- 1 | { 2 | "canisters": { 3 | "ic_object_store_canister": { 4 | "candid": "src/ic_object_store_canister/ic_object_store_canister.did", 5 | "declarations": { 6 | "node_compatibility": true 7 | }, 8 | "package": "ic_object_store_canister", 9 | "optimize": "cycles", 10 | "type": "rust" 11 | }, 12 | "ic_oss_bucket": { 13 | "candid": "src/ic_oss_bucket/ic_oss_bucket.did", 14 | "declarations": { 15 | "node_compatibility": true 16 | }, 17 | "package": "ic_oss_bucket", 18 | "optimize": "cycles", 19 | "type": "rust" 20 | }, 21 | "ic_oss_cluster": { 22 | "candid": "src/ic_oss_cluster/ic_oss_cluster.did", 23 | "declarations": { 24 | "node_compatibility": true 25 | }, 26 | "package": "ic_oss_cluster", 27 | "optimize": "cycles", 28 | "type": "rust" 29 | } 30 | }, 31 | "defaults": { 32 | "build": { 33 | "args": "", 34 | "packtool": "" 35 | } 36 | }, 37 | "output_env_file": ".env", 38 | "version": 1 39 | } -------------------------------------------------------------------------------- /src/ic_oss_cluster/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ic_oss_cluster" 3 | description = "An ICP smart contract and the manager of the ic-oss cluster." 4 | publish = false 5 | repository = "https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_cluster" 6 | version.workspace = true 7 | edition.workspace = true 8 | keywords.workspace = true 9 | categories.workspace = true 10 | license.workspace = true 11 | 12 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 13 | 14 | [lib] 15 | crate-type = ["cdylib"] 16 | 17 | [dependencies] 18 | ic-oss-types = { path = "../ic_oss_types", version = "1" } 19 | candid = { workspace = true, features = ["value", "printer"] } 20 | futures = { workspace = true } 21 | ic-cdk = { workspace = true } 22 | ic-cdk-timers = { workspace = true } 23 | ic-stable-structures = { workspace = true } 24 | ciborium = { workspace = true } 25 | hex = { workspace = true } 26 | serde = { workspace = true } 27 | serde_bytes = { workspace = true } 28 | ed25519-dalek = { workspace = true } 29 | ic-dummy-getrandom-for-wasm = { workspace = true } 30 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024-2025 LDC Labs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/ic_oss_cluster/src/api_auth.rs: -------------------------------------------------------------------------------- 1 | use candid::Principal; 2 | use ic_oss_types::cose::Token; 3 | use serde_bytes::ByteBuf; 4 | 5 | use crate::{api_admin, store}; 6 | 7 | #[ic_cdk::update] 8 | async fn access_token(audience: Principal) -> Result { 9 | let token = get_token(ic_cdk::api::msg_caller(), audience)?; 10 | 11 | api_admin::admin_sign_access_token(token).await 12 | } 13 | 14 | #[ic_cdk::update] 15 | async fn ed25519_access_token(audience: Principal) -> Result { 16 | let token = get_token(ic_cdk::api::msg_caller(), audience)?; 17 | 18 | api_admin::admin_ed25519_access_token(token).await 19 | } 20 | 21 | fn get_token(subject: Principal, audience: Principal) -> Result { 22 | match store::auth::get_all_policies(&subject) { 23 | None => Err("no policies found".to_string()), 24 | Some(pt) => { 25 | let policies = pt.0.get(&audience).ok_or("no policies found")?; 26 | Ok(Token { 27 | subject, 28 | audience, 29 | policies: policies.to_owned(), 30 | }) 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/ic_oss_ts/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 LDC Labs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/ic_oss_bucket/src/lib.rs: -------------------------------------------------------------------------------- 1 | use candid::Principal; 2 | use ic_cdk::management_canister as mgt; 3 | use serde_bytes::{ByteArray, ByteBuf}; 4 | use std::collections::BTreeSet; 5 | 6 | mod api_admin; 7 | mod api_http; 8 | mod api_init; 9 | mod api_query; 10 | mod api_update; 11 | mod permission; 12 | mod store; 13 | 14 | use api_init::CanisterArgs; 15 | use ic_oss_types::{bucket::*, file::*, folder::*}; 16 | 17 | const MILLISECONDS: u64 = 1_000_000; 18 | const SECONDS: u64 = 1_000_000_000; 19 | 20 | static ANONYMOUS: Principal = Principal::anonymous(); 21 | 22 | fn is_controller() -> Result<(), String> { 23 | let caller = ic_cdk::api::msg_caller(); 24 | if ic_cdk::api::is_controller(&caller) || store::state::is_controller(&caller) { 25 | Ok(()) 26 | } else { 27 | Err("user is not a controller".to_string()) 28 | } 29 | } 30 | 31 | pub fn validate_principals(principals: &BTreeSet) -> Result<(), String> { 32 | if principals.is_empty() { 33 | return Err("principals cannot be empty".to_string()); 34 | } 35 | if principals.contains(&ANONYMOUS) { 36 | return Err("anonymous user is not allowed".to_string()); 37 | } 38 | Ok(()) 39 | } 40 | 41 | ic_cdk::export_candid!(); 42 | -------------------------------------------------------------------------------- /src/ic_oss_bucket/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ic_oss_bucket" 3 | description = "A decentralized Object Storage Service bucket on the Internet Computer, part of IC-OSS." 4 | publish = false 5 | repository = "https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_bucket" 6 | version.workspace = true 7 | edition.workspace = true 8 | keywords.workspace = true 9 | categories.workspace = true 10 | license.workspace = true 11 | 12 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 13 | 14 | [lib] 15 | crate-type = ["cdylib"] 16 | 17 | [dependencies] 18 | ic-oss-types = { path = "../ic_oss_types", version = "1" } 19 | candid = { workspace = true, features = ["value", "printer"] } 20 | ciborium = { workspace = true } 21 | ic-cdk = { workspace = true } 22 | hex = { workspace = true } 23 | serde = { workspace = true } 24 | serde_bytes = { workspace = true } 25 | base64 = { workspace = true } 26 | once_cell = { workspace = true } 27 | ic-stable-structures = { workspace = true } 28 | ic-http-certification = { workspace = true } 29 | lazy_static = "1.4" 30 | hyperx = { git = "https://github.com/ldclabs/hyperx", rev = "4b9bd373b8c4d29a32e59912bf598ba69273c032" } 31 | ic-dummy-getrandom-for-wasm = { workspace = true } 32 | -------------------------------------------------------------------------------- /src/ic_object_store/src/agent.rs: -------------------------------------------------------------------------------- 1 | use ic_agent::{Agent, Identity}; 2 | use ic_oss_types::format_error; 3 | use std::sync::Arc; 4 | 5 | /// Creates and configures an IC agent with the given host URL and identity. 6 | /// 7 | /// # Arguments 8 | /// * `host` - The IC host URL (e.g., "https://ic0.app" or "http://localhost:4943") 9 | /// * `identity` - Arc-wrapped identity for authentication 10 | /// 11 | /// # Returns 12 | /// Result containing the configured Agent or an error string 13 | /// 14 | /// # Notes 15 | /// - Automatically fetches root key for local development (http:// URLs) 16 | /// - Enables query signature verification by default 17 | pub async fn build_agent(host: &str, identity: Arc) -> Result { 18 | let agent = Agent::builder() 19 | .with_url(host) 20 | .with_arc_identity(identity) 21 | .with_verify_query_signatures(false); 22 | 23 | let agent = if host.starts_with("https://") { 24 | agent 25 | .with_background_dynamic_routing() 26 | .build() 27 | .map_err(format_error)? 28 | } else { 29 | agent.build().map_err(format_error)? 30 | }; 31 | 32 | if host.starts_with("http://") { 33 | // ignore errors for local development 34 | let _ = agent.fetch_root_key().await; 35 | } 36 | 37 | Ok(agent) 38 | } 39 | -------------------------------------------------------------------------------- /src/ic_oss_cluster/src/schnorr.rs: -------------------------------------------------------------------------------- 1 | use ic_cdk::management_canister as mgt; 2 | 3 | pub use mgt::SchnorrAlgorithm; 4 | 5 | pub async fn sign_with_schnorr( 6 | key_name: String, 7 | alg: mgt::SchnorrAlgorithm, 8 | derivation_path: Vec>, 9 | message: Vec, 10 | ) -> Result, String> { 11 | let args = mgt::SignWithSchnorrArgs { 12 | message, 13 | derivation_path, 14 | key_id: mgt::SchnorrKeyId { 15 | algorithm: alg, 16 | name: key_name, 17 | }, 18 | aux: None, 19 | }; 20 | 21 | let rt = mgt::sign_with_schnorr(&args) 22 | .await 23 | .map_err(|err| format!("sign_with_ecdsa failed: {:?}", err))?; 24 | 25 | Ok(rt.signature) 26 | } 27 | 28 | pub async fn schnorr_public_key( 29 | key_name: String, 30 | alg: mgt::SchnorrAlgorithm, 31 | derivation_path: Vec>, 32 | ) -> Result { 33 | let args = mgt::SchnorrPublicKeyArgs { 34 | canister_id: None, 35 | derivation_path, 36 | key_id: mgt::SchnorrKeyId { 37 | algorithm: alg, 38 | name: key_name, 39 | }, 40 | }; 41 | 42 | let rt = mgt::schnorr_public_key(&args) 43 | .await 44 | .map_err(|err| format!("schnorr_public_key failed {:?}", err))?; 45 | Ok(rt) 46 | } 47 | -------------------------------------------------------------------------------- /src/ic_oss_cluster/src/ecdsa.rs: -------------------------------------------------------------------------------- 1 | use ic_cdk::management_canister as mgt; 2 | 3 | pub async fn sign_with_ecdsa( 4 | key_name: String, 5 | derivation_path: Vec>, 6 | message_hash: Vec, 7 | ) -> Result, String> { 8 | if message_hash.len() != 32 { 9 | return Err("message must be 32 bytes".to_string()); 10 | } 11 | let args = mgt::SignWithEcdsaArgs { 12 | message_hash, 13 | derivation_path, 14 | key_id: mgt::EcdsaKeyId { 15 | curve: mgt::EcdsaCurve::Secp256k1, 16 | name: key_name, 17 | }, 18 | }; 19 | 20 | let rt = mgt::sign_with_ecdsa(&args) 21 | .await 22 | .map_err(|err| format!("sign_with_ecdsa failed {:?}", err))?; 23 | 24 | Ok(rt.signature) 25 | } 26 | 27 | pub async fn ecdsa_public_key( 28 | key_name: String, 29 | derivation_path: Vec>, 30 | ) -> Result { 31 | let args = mgt::EcdsaPublicKeyArgs { 32 | canister_id: None, 33 | derivation_path, 34 | key_id: mgt::EcdsaKeyId { 35 | curve: mgt::EcdsaCurve::Secp256k1, 36 | name: key_name, 37 | }, 38 | }; 39 | 40 | let rt = mgt::ecdsa_public_key(&args) 41 | .await 42 | .map_err(|err| format!("ecdsa_public_key failed {:?}", err))?; 43 | 44 | Ok(rt) 45 | } 46 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | push: 4 | tags: 5 | - 'v*' 6 | jobs: 7 | test: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | - uses: Swatinem/rust-cache@v2 12 | # - uses: dfinity/setup-dfx@main 13 | - name: Build canisters 14 | run: | 15 | rustup target add wasm32-unknown-unknown 16 | cargo install ic-wasm 17 | mkdir out 18 | cargo build --target wasm32-unknown-unknown --release --locked -p ic_object_store_canister -p ic_oss_bucket -p ic_oss_cluster 19 | for CAN in ic_object_store_canister ic_oss_bucket ic_oss_cluster 20 | do 21 | cp "target/wasm32-unknown-unknown/release/$CAN.wasm" out/ 22 | cp "src/$CAN/$CAN.did" "out/$CAN.did" 23 | WASM="out/$CAN.wasm" 24 | ic-wasm $WASM -o $WASM metadata candid:service -f "out/$CAN.did" -v public 25 | ic-wasm $WASM -o $WASM shrink 26 | ic-wasm $WASM -o $WASM optimize O3 --inline-functions-with-loops 27 | gzip $WASM 28 | SHA256="$(sha256sum < "out/$CAN.wasm.gz" | sed 's/ .*$//g')" 29 | echo $SHA256 > "out/$CAN.wasm.gz.$SHA256.txt" 30 | done 31 | ls -lah out 32 | - name: Release 33 | uses: softprops/action-gh-release@v2 34 | with: 35 | files: out/* 36 | -------------------------------------------------------------------------------- /src/ic_oss_ts/src/types.ts: -------------------------------------------------------------------------------- 1 | import type { Principal } from '@dfinity/principal' 2 | import type { CanisterOptions as Options } from '@dfinity/utils' 3 | 4 | export interface CanisterOptions extends Omit, 'canisterId'> { 5 | canisterId: Principal 6 | unwrapResult?: typeof resultOk 7 | } 8 | 9 | export interface Ok { 10 | Ok: T 11 | } 12 | 13 | export interface Err { 14 | Err: T 15 | } 16 | 17 | export type Result = Ok | Err 18 | 19 | export function resultOk(res: Result): T { 20 | if ('Err' in res) { 21 | throw res.Err 22 | } 23 | 24 | return res.Ok 25 | } 26 | 27 | export type FileChunk = [number, Uint8Array] 28 | 29 | export interface FileConfig { 30 | content: ReadableStream | Blob | File | Uint8Array | ArrayBuffer | string 31 | name: string 32 | contentType: string 33 | size?: number 34 | /** 35 | * Folder that file will be uploaded to 36 | * @default 0, root folder 37 | */ 38 | parent?: number 39 | /** 40 | * File hash generation will be skipped if hash is provided 41 | */ 42 | hash?: Uint8Array 43 | } 44 | 45 | export interface UploadFileChunksResult { 46 | id: number 47 | filled: number 48 | uploadedChunks: number[] 49 | hash: Uint8Array | null 50 | } 51 | 52 | export interface Progress { 53 | filled: number 54 | size?: number // total size of file, may be unknown 55 | chunkIndex: number 56 | concurrency: number 57 | } 58 | -------------------------------------------------------------------------------- /src/declarations/ic_oss_bucket/index.js: -------------------------------------------------------------------------------- 1 | import { Actor, HttpAgent } from "@dfinity/agent"; 2 | 3 | // Imports and re-exports candid interface 4 | import { idlFactory } from "./ic_oss_bucket.did.js"; 5 | export { idlFactory } from "./ic_oss_bucket.did.js"; 6 | 7 | /* CANISTER_ID is replaced by webpack based on node environment 8 | * Note: canister environment variable will be standardized as 9 | * process.env.CANISTER_ID_ 10 | * beginning in dfx 0.15.0 11 | */ 12 | export const canisterId = 13 | process.env.CANISTER_ID_IC_OSS_BUCKET; 14 | 15 | export const createActor = (canisterId, options = {}) => { 16 | const agent = options.agent || new HttpAgent({ ...options.agentOptions }); 17 | 18 | if (options.agent && options.agentOptions) { 19 | console.warn( 20 | "Detected both agent and agentOptions passed to createActor. Ignoring agentOptions and proceeding with the provided agent." 21 | ); 22 | } 23 | 24 | // Fetch root key for certificate validation during development 25 | if (process.env.DFX_NETWORK !== "ic") { 26 | agent.fetchRootKey().catch((err) => { 27 | console.warn( 28 | "Unable to fetch root key. Check to ensure that your local replica is running" 29 | ); 30 | console.error(err); 31 | }); 32 | } 33 | 34 | // Creates an actor with using the candid interface and the HttpAgent 35 | return Actor.createActor(idlFactory, { 36 | agent, 37 | canisterId, 38 | ...options.actorOptions, 39 | }); 40 | }; 41 | -------------------------------------------------------------------------------- /src/declarations/ic_oss_cluster/index.js: -------------------------------------------------------------------------------- 1 | import { Actor, HttpAgent } from "@dfinity/agent"; 2 | 3 | // Imports and re-exports candid interface 4 | import { idlFactory } from "./ic_oss_cluster.did.js"; 5 | export { idlFactory } from "./ic_oss_cluster.did.js"; 6 | 7 | /* CANISTER_ID is replaced by webpack based on node environment 8 | * Note: canister environment variable will be standardized as 9 | * process.env.CANISTER_ID_ 10 | * beginning in dfx 0.15.0 11 | */ 12 | export const canisterId = 13 | process.env.CANISTER_ID_IC_OSS_CLUSTER; 14 | 15 | export const createActor = (canisterId, options = {}) => { 16 | const agent = options.agent || new HttpAgent({ ...options.agentOptions }); 17 | 18 | if (options.agent && options.agentOptions) { 19 | console.warn( 20 | "Detected both agent and agentOptions passed to createActor. Ignoring agentOptions and proceeding with the provided agent." 21 | ); 22 | } 23 | 24 | // Fetch root key for certificate validation during development 25 | if (process.env.DFX_NETWORK !== "ic") { 26 | agent.fetchRootKey().catch((err) => { 27 | console.warn( 28 | "Unable to fetch root key. Check to ensure that your local replica is running" 29 | ); 30 | console.error(err); 31 | }); 32 | } 33 | 34 | // Creates an actor with using the candid interface and the HttpAgent 35 | return Actor.createActor(idlFactory, { 36 | agent, 37 | canisterId, 38 | ...options.actorOptions, 39 | }); 40 | }; 41 | -------------------------------------------------------------------------------- /src/ic_oss_ts/candid/ic_oss_bucket/index.js: -------------------------------------------------------------------------------- 1 | import { Actor, HttpAgent } from "@dfinity/agent"; 2 | 3 | // Imports and re-exports candid interface 4 | import { idlFactory } from "./ic_oss_bucket.did.js"; 5 | export { idlFactory } from "./ic_oss_bucket.did.js"; 6 | 7 | /* CANISTER_ID is replaced by webpack based on node environment 8 | * Note: canister environment variable will be standardized as 9 | * process.env.CANISTER_ID_ 10 | * beginning in dfx 0.15.0 11 | */ 12 | export const canisterId = 13 | process.env.CANISTER_ID_IC_OSS_BUCKET; 14 | 15 | export const createActor = (canisterId, options = {}) => { 16 | const agent = options.agent || new HttpAgent({ ...options.agentOptions }); 17 | 18 | if (options.agent && options.agentOptions) { 19 | console.warn( 20 | "Detected both agent and agentOptions passed to createActor. Ignoring agentOptions and proceeding with the provided agent." 21 | ); 22 | } 23 | 24 | // Fetch root key for certificate validation during development 25 | if (process.env.DFX_NETWORK !== "ic") { 26 | agent.fetchRootKey().catch((err) => { 27 | console.warn( 28 | "Unable to fetch root key. Check to ensure that your local replica is running" 29 | ); 30 | console.error(err); 31 | }); 32 | } 33 | 34 | // Creates an actor with using the candid interface and the HttpAgent 35 | return Actor.createActor(idlFactory, { 36 | agent, 37 | canisterId, 38 | ...options.actorOptions, 39 | }); 40 | }; 41 | -------------------------------------------------------------------------------- /src/ic_oss_ts/candid/ic_oss_cluster/index.js: -------------------------------------------------------------------------------- 1 | import { Actor, HttpAgent } from "@dfinity/agent"; 2 | 3 | // Imports and re-exports candid interface 4 | import { idlFactory } from "./ic_oss_cluster.did.js"; 5 | export { idlFactory } from "./ic_oss_cluster.did.js"; 6 | 7 | /* CANISTER_ID is replaced by webpack based on node environment 8 | * Note: canister environment variable will be standardized as 9 | * process.env.CANISTER_ID_ 10 | * beginning in dfx 0.15.0 11 | */ 12 | export const canisterId = 13 | process.env.CANISTER_ID_IC_OSS_CLUSTER; 14 | 15 | export const createActor = (canisterId, options = {}) => { 16 | const agent = options.agent || new HttpAgent({ ...options.agentOptions }); 17 | 18 | if (options.agent && options.agentOptions) { 19 | console.warn( 20 | "Detected both agent and agentOptions passed to createActor. Ignoring agentOptions and proceeding with the provided agent." 21 | ); 22 | } 23 | 24 | // Fetch root key for certificate validation during development 25 | if (process.env.DFX_NETWORK !== "ic") { 26 | agent.fetchRootKey().catch((err) => { 27 | console.warn( 28 | "Unable to fetch root key. Check to ensure that your local replica is running" 29 | ); 30 | console.error(err); 31 | }); 32 | } 33 | 34 | // Creates an actor with using the candid interface and the HttpAgent 35 | return Actor.createActor(idlFactory, { 36 | agent, 37 | canisterId, 38 | ...options.actorOptions, 39 | }); 40 | }; 41 | -------------------------------------------------------------------------------- /src/declarations/ic_object_store_canister/index.js: -------------------------------------------------------------------------------- 1 | import { Actor, HttpAgent } from "@dfinity/agent"; 2 | 3 | // Imports and re-exports candid interface 4 | import { idlFactory } from "./ic_object_store_canister.did.js"; 5 | export { idlFactory } from "./ic_object_store_canister.did.js"; 6 | 7 | /* CANISTER_ID is replaced by webpack based on node environment 8 | * Note: canister environment variable will be standardized as 9 | * process.env.CANISTER_ID_ 10 | * beginning in dfx 0.15.0 11 | */ 12 | export const canisterId = 13 | process.env.CANISTER_ID_IC_OBJECT_STORE_CANISTER; 14 | 15 | export const createActor = (canisterId, options = {}) => { 16 | const agent = options.agent || new HttpAgent({ ...options.agentOptions }); 17 | 18 | if (options.agent && options.agentOptions) { 19 | console.warn( 20 | "Detected both agent and agentOptions passed to createActor. Ignoring agentOptions and proceeding with the provided agent." 21 | ); 22 | } 23 | 24 | // Fetch root key for certificate validation during development 25 | if (process.env.DFX_NETWORK !== "ic") { 26 | agent.fetchRootKey().catch((err) => { 27 | console.warn( 28 | "Unable to fetch root key. Check to ensure that your local replica is running" 29 | ); 30 | console.error(err); 31 | }); 32 | } 33 | 34 | // Creates an actor with using the candid interface and the HttpAgent 35 | return Actor.createActor(idlFactory, { 36 | agent, 37 | canisterId, 38 | ...options.actorOptions, 39 | }); 40 | }; 41 | -------------------------------------------------------------------------------- /src/ic_oss_ts/candid/ic_object_store_canister/index.js: -------------------------------------------------------------------------------- 1 | import { Actor, HttpAgent } from "@dfinity/agent"; 2 | 3 | // Imports and re-exports candid interface 4 | import { idlFactory } from "./ic_object_store_canister.did.js"; 5 | export { idlFactory } from "./ic_object_store_canister.did.js"; 6 | 7 | /* CANISTER_ID is replaced by webpack based on node environment 8 | * Note: canister environment variable will be standardized as 9 | * process.env.CANISTER_ID_ 10 | * beginning in dfx 0.15.0 11 | */ 12 | export const canisterId = 13 | process.env.CANISTER_ID_IC_OBJECT_STORE_CANISTER; 14 | 15 | export const createActor = (canisterId, options = {}) => { 16 | const agent = options.agent || new HttpAgent({ ...options.agentOptions }); 17 | 18 | if (options.agent && options.agentOptions) { 19 | console.warn( 20 | "Detected both agent and agentOptions passed to createActor. Ignoring agentOptions and proceeding with the provided agent." 21 | ); 22 | } 23 | 24 | // Fetch root key for certificate validation during development 25 | if (process.env.DFX_NETWORK !== "ic") { 26 | agent.fetchRootKey().catch((err) => { 27 | console.warn( 28 | "Unable to fetch root key. Check to ensure that your local replica is running" 29 | ); 30 | console.error(err); 31 | }); 32 | } 33 | 34 | // Creates an actor with using the candid interface and the HttpAgent 35 | return Actor.createActor(idlFactory, { 36 | agent, 37 | canisterId, 38 | ...options.actorOptions, 39 | }); 40 | }; 41 | -------------------------------------------------------------------------------- /src/ic_oss_ts/src/queue.ts: -------------------------------------------------------------------------------- 1 | export type Task = ( 2 | aborter: AbortController, 3 | concurrency: number 4 | ) => Promise 5 | 6 | export class ConcurrencyQueue { 7 | #concurrency: number 8 | #total: number = 0 9 | #aborter: AbortController = new AbortController() 10 | #reject: (reason: unknown) => void = () => {} 11 | #queue: [Task, () => void][] = [] 12 | #pending: Set = new Set() 13 | #results: Set> = new Set() 14 | 15 | constructor(concurrency: number) { 16 | this.#concurrency = concurrency 17 | } 18 | 19 | #next() { 20 | if (this.#pending.size < this.#concurrency && this.#queue.length > 0) { 21 | const [fn, resolve] = this.#queue.shift()! 22 | this.#pending.add(fn) 23 | const result = fn(this.#aborter, this.#pending.size) 24 | this.#results.add(result) 25 | 26 | result 27 | .then(() => (this.#total += 1)) 28 | .catch((err) => this.#abort(err)) 29 | .finally(() => { 30 | this.#pending.delete(fn) 31 | this.#results.delete(result) 32 | this.#next() 33 | }) 34 | 35 | resolve() 36 | this.#next() 37 | } 38 | } 39 | 40 | #abort(reason: unknown) { 41 | this.#aborter.abort(reason) 42 | this.#reject(reason) 43 | } 44 | 45 | push(fn: Task): Promise { 46 | return new Promise((resolve, reject) => { 47 | this.#reject = reject 48 | this.#queue.push([fn, resolve]) 49 | this.#next() 50 | }) 51 | } 52 | 53 | wait(): Promise { 54 | return new Promise((resolve, reject) => { 55 | this.#reject = reject 56 | 57 | Promise.all(this.#results) 58 | .then(() => resolve(this.#total)) 59 | .catch(reject) 60 | }) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/ic_oss_ts/src/stream.test.ts: -------------------------------------------------------------------------------- 1 | import { open } from 'node:fs/promises' 2 | import { expect, test } from 'vitest' 3 | import { type ReadableStream } from 'web-streams-polyfill' 4 | import { 5 | readableStreamAsyncIterator, 6 | readAll, 7 | streamToFixedChunkSizeReadable, 8 | uint8ArrayToFixedChunkSizeReadable 9 | } from './stream' 10 | 11 | test('uint8ArrayToFixedChunkSizeReadable', async () => { 12 | const src = new Uint8Array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 13 | let stream = uint8ArrayToFixedChunkSizeReadable(3, src) 14 | 15 | let data = await readAll(stream, 10) 16 | expect(data).toEqual(src) 17 | 18 | stream = uint8ArrayToFixedChunkSizeReadable(3, src) 19 | await expect(() => readAll(stream, 8)).rejects.toThrow( 20 | 'failed to read all data' 21 | ) 22 | 23 | stream = uint8ArrayToFixedChunkSizeReadable(3, src) 24 | await expect(() => readAll(stream, 12)).rejects.toThrow( 25 | 'failed to read all data' 26 | ) 27 | }) 28 | 29 | test('streamToFixedChunkSizeReadable', async () => { 30 | for (const name of [ 31 | 'package.json', 32 | 'tsconfig.json', 33 | '.prettierrc.json', 34 | '.eslintrc.js' 35 | ]) { 36 | let fs = await open(name, 'r') 37 | const stat = await fs.stat() 38 | const src = await fs.readFile() 39 | fs.close() 40 | 41 | fs = await open(name, 'r') 42 | const stream = streamToFixedChunkSizeReadable( 43 | 64, 44 | fs.readableWebStream() as any as ReadableStream, 45 | fs 46 | ) 47 | 48 | let offset = 0 49 | const data = new Uint8Array(stat.size) 50 | for await (const chunk of readableStreamAsyncIterator(stream)) { 51 | data.set(chunk, offset) 52 | offset += chunk.byteLength 53 | } 54 | 55 | expect(Buffer.from(data)).toEqual(src) 56 | } 57 | }) 58 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "src/ic_oss", 4 | "src/ic_object_store", 5 | "src/ic_object_store_canister", 6 | "src/ic_oss_bucket", 7 | "src/ic_oss_cluster", 8 | "src/ic_oss_types", 9 | "src/ic_oss_can", 10 | "src/ic_oss_cli", 11 | "examples/ai_canister", 12 | ] 13 | resolver = "2" 14 | 15 | [profile.release] 16 | debug = false 17 | lto = true 18 | strip = true 19 | opt-level = 's' 20 | 21 | [workspace.package] 22 | version = "1.2.3" 23 | edition = "2021" 24 | repository = "https://github.com/ldclabs/ic-oss" 25 | keywords = ["object_store", "storage", "oss", "s3", "icp"] 26 | categories = ["web-programming"] 27 | license = "MIT OR Apache-2.0" 28 | 29 | [workspace.dependencies] 30 | async-trait = "0.1" 31 | async-stream = "0.3" 32 | bytes = "1" 33 | base64 = "0.22" 34 | candid = "0.10" 35 | ciborium = "0.2" 36 | coset = "0.4" 37 | futures = "0.3" 38 | serde = "1" 39 | serde_bytes = "0.11" 40 | object_store = { version = "0.12", default-features = false } 41 | chrono = { version = "0.4", default-features = false } 42 | tokio = { version = "1", features = ["full"] } 43 | tokio-stream = "0.1" 44 | tokio-util = { version = "0.7", features = ["codec"] } 45 | k256 = { version = "0.13", features = ["ecdsa"] } 46 | ed25519-dalek = "2" 47 | hex = "0.4" 48 | sha2 = "0.10" 49 | sha3 = "0.10" 50 | aes-gcm = "0.10" 51 | num-traits = "0.2" 52 | ic-cdk = "0.19" 53 | ic-cdk-timers = "1.0" 54 | ic-stable-structures = "0.7" 55 | icrc-ledger-types = "0.1" 56 | ic-http-certification = { version = "3.0" } 57 | ic_cose_types = "0.9" 58 | ic-agent = "0.44" 59 | anyhow = "1" 60 | crc32fast = "1.4" 61 | url = "2.5" 62 | once_cell = "1.21" 63 | rand = "0.9" 64 | ic-dummy-getrandom-for-wasm = "0.1" 65 | getrandom = { version = "0.3" } 66 | thiserror = "2" 67 | 68 | [workspace.metadata.cargo-shear] 69 | ignored = ["ic-dummy-getrandom-for-wasm"] 70 | -------------------------------------------------------------------------------- /src/declarations/ic_oss_bucket/index.d.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | ActorSubclass, 3 | HttpAgentOptions, 4 | ActorConfig, 5 | Agent, 6 | } from "@dfinity/agent"; 7 | import type { Principal } from "@dfinity/principal"; 8 | import type { IDL } from "@dfinity/candid"; 9 | 10 | import { _SERVICE } from './ic_oss_bucket.did'; 11 | 12 | export declare const idlFactory: IDL.InterfaceFactory; 13 | export declare const canisterId: string; 14 | 15 | export declare interface CreateActorOptions { 16 | /** 17 | * @see {@link Agent} 18 | */ 19 | agent?: Agent; 20 | /** 21 | * @see {@link HttpAgentOptions} 22 | */ 23 | agentOptions?: HttpAgentOptions; 24 | /** 25 | * @see {@link ActorConfig} 26 | */ 27 | actorOptions?: ActorConfig; 28 | } 29 | 30 | /** 31 | * Intializes an {@link ActorSubclass}, configured with the provided SERVICE interface of a canister. 32 | * @constructs {@link ActorSubClass} 33 | * @param {string | Principal} canisterId - ID of the canister the {@link Actor} will talk to 34 | * @param {CreateActorOptions} options - see {@link CreateActorOptions} 35 | * @param {CreateActorOptions["agent"]} options.agent - a pre-configured agent you'd like to use. Supercedes agentOptions 36 | * @param {CreateActorOptions["agentOptions"]} options.agentOptions - options to set up a new agent 37 | * @see {@link HttpAgentOptions} 38 | * @param {CreateActorOptions["actorOptions"]} options.actorOptions - options for the Actor 39 | * @see {@link ActorConfig} 40 | */ 41 | export declare const createActor: ( 42 | canisterId: string | Principal, 43 | options?: CreateActorOptions 44 | ) => ActorSubclass<_SERVICE>; 45 | 46 | /** 47 | * Intialized Actor using default settings, ready to talk to a canister using its candid interface 48 | * @constructs {@link ActorSubClass} 49 | */ 50 | export declare const ic_oss_bucket: ActorSubclass<_SERVICE>; 51 | -------------------------------------------------------------------------------- /src/ic_oss_ts/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@ldclabs/ic_oss_ts", 3 | "version": "1.2.3", 4 | "type": "module", 5 | "description": "The Typescript version of the client SDK for the ic-oss cluster.", 6 | "license": "MIT", 7 | "homepage": "https://github.com/ldclabs/ic-oss", 8 | "repository": { 9 | "type": "git", 10 | "url": "git+https://github.com/ldclabs/ic-oss.git" 11 | }, 12 | "engines": { 13 | "node": ">=18.16.0" 14 | }, 15 | "browser": { 16 | "node:fs/promises": false, 17 | "node:path": false, 18 | "mime/lite": false 19 | }, 20 | "files": [ 21 | "candid", 22 | "dist", 23 | "src", 24 | "package.json", 25 | "LICENSE", 26 | "README.md" 27 | ], 28 | "main": "./dist/index.js", 29 | "typings": "./dist/index.d.ts", 30 | "scripts": { 31 | "prebuild": "dfx generate && rm -rf candid && cp -r '../declarations' './candid'", 32 | "build": "rm -rf dist && tsc -p tsconfig.json", 33 | "format": "prettier --write \"src/**/*.{json,js,jsx,ts,tsx,css,scss}\"", 34 | "test": "vitest run", 35 | "ncu": "npx npm-check-updates -u" 36 | }, 37 | "dependencies": { 38 | "@dfinity/agent": ">=3.4.0", 39 | "@dfinity/candid": ">=3.4.0", 40 | "@dfinity/principal": ">=3.4.0", 41 | "@dfinity/utils": ">=4.0.0", 42 | "@noble/hashes": ">=1.8.0", 43 | "mime": "^4.1.0", 44 | "web-streams-polyfill": "^4.2.0" 45 | }, 46 | "devDependencies": { 47 | "@types/eslint": "^9.6.1", 48 | "@types/node": "24.10.1", 49 | "@typescript-eslint/eslint-plugin": "^8.48.1", 50 | "@typescript-eslint/parser": "^8.48.1", 51 | "eslint": "^9.39.1", 52 | "eslint-config-prettier": "^10.1.8", 53 | "eslint-plugin-import": "^2.32.0", 54 | "eslint-plugin-prettier": "^5.5.4", 55 | "prettier": "^3.7.4", 56 | "typescript": "^5.9.3", 57 | "vitest": "^4.0.15" 58 | } 59 | } -------------------------------------------------------------------------------- /src/declarations/ic_oss_cluster/index.d.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | ActorSubclass, 3 | HttpAgentOptions, 4 | ActorConfig, 5 | Agent, 6 | } from "@dfinity/agent"; 7 | import type { Principal } from "@dfinity/principal"; 8 | import type { IDL } from "@dfinity/candid"; 9 | 10 | import { _SERVICE } from './ic_oss_cluster.did'; 11 | 12 | export declare const idlFactory: IDL.InterfaceFactory; 13 | export declare const canisterId: string; 14 | 15 | export declare interface CreateActorOptions { 16 | /** 17 | * @see {@link Agent} 18 | */ 19 | agent?: Agent; 20 | /** 21 | * @see {@link HttpAgentOptions} 22 | */ 23 | agentOptions?: HttpAgentOptions; 24 | /** 25 | * @see {@link ActorConfig} 26 | */ 27 | actorOptions?: ActorConfig; 28 | } 29 | 30 | /** 31 | * Intializes an {@link ActorSubclass}, configured with the provided SERVICE interface of a canister. 32 | * @constructs {@link ActorSubClass} 33 | * @param {string | Principal} canisterId - ID of the canister the {@link Actor} will talk to 34 | * @param {CreateActorOptions} options - see {@link CreateActorOptions} 35 | * @param {CreateActorOptions["agent"]} options.agent - a pre-configured agent you'd like to use. Supercedes agentOptions 36 | * @param {CreateActorOptions["agentOptions"]} options.agentOptions - options to set up a new agent 37 | * @see {@link HttpAgentOptions} 38 | * @param {CreateActorOptions["actorOptions"]} options.actorOptions - options for the Actor 39 | * @see {@link ActorConfig} 40 | */ 41 | export declare const createActor: ( 42 | canisterId: string | Principal, 43 | options?: CreateActorOptions 44 | ) => ActorSubclass<_SERVICE>; 45 | 46 | /** 47 | * Intialized Actor using default settings, ready to talk to a canister using its candid interface 48 | * @constructs {@link ActorSubClass} 49 | */ 50 | export declare const ic_oss_cluster: ActorSubclass<_SERVICE>; 51 | -------------------------------------------------------------------------------- /src/ic_oss_ts/candid/ic_oss_bucket/index.d.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | ActorSubclass, 3 | HttpAgentOptions, 4 | ActorConfig, 5 | Agent, 6 | } from "@dfinity/agent"; 7 | import type { Principal } from "@dfinity/principal"; 8 | import type { IDL } from "@dfinity/candid"; 9 | 10 | import { _SERVICE } from './ic_oss_bucket.did'; 11 | 12 | export declare const idlFactory: IDL.InterfaceFactory; 13 | export declare const canisterId: string; 14 | 15 | export declare interface CreateActorOptions { 16 | /** 17 | * @see {@link Agent} 18 | */ 19 | agent?: Agent; 20 | /** 21 | * @see {@link HttpAgentOptions} 22 | */ 23 | agentOptions?: HttpAgentOptions; 24 | /** 25 | * @see {@link ActorConfig} 26 | */ 27 | actorOptions?: ActorConfig; 28 | } 29 | 30 | /** 31 | * Intializes an {@link ActorSubclass}, configured with the provided SERVICE interface of a canister. 32 | * @constructs {@link ActorSubClass} 33 | * @param {string | Principal} canisterId - ID of the canister the {@link Actor} will talk to 34 | * @param {CreateActorOptions} options - see {@link CreateActorOptions} 35 | * @param {CreateActorOptions["agent"]} options.agent - a pre-configured agent you'd like to use. Supercedes agentOptions 36 | * @param {CreateActorOptions["agentOptions"]} options.agentOptions - options to set up a new agent 37 | * @see {@link HttpAgentOptions} 38 | * @param {CreateActorOptions["actorOptions"]} options.actorOptions - options for the Actor 39 | * @see {@link ActorConfig} 40 | */ 41 | export declare const createActor: ( 42 | canisterId: string | Principal, 43 | options?: CreateActorOptions 44 | ) => ActorSubclass<_SERVICE>; 45 | 46 | /** 47 | * Intialized Actor using default settings, ready to talk to a canister using its candid interface 48 | * @constructs {@link ActorSubClass} 49 | */ 50 | export declare const ic_oss_bucket: ActorSubclass<_SERVICE>; 51 | -------------------------------------------------------------------------------- /src/ic_oss_ts/candid/ic_oss_cluster/index.d.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | ActorSubclass, 3 | HttpAgentOptions, 4 | ActorConfig, 5 | Agent, 6 | } from "@dfinity/agent"; 7 | import type { Principal } from "@dfinity/principal"; 8 | import type { IDL } from "@dfinity/candid"; 9 | 10 | import { _SERVICE } from './ic_oss_cluster.did'; 11 | 12 | export declare const idlFactory: IDL.InterfaceFactory; 13 | export declare const canisterId: string; 14 | 15 | export declare interface CreateActorOptions { 16 | /** 17 | * @see {@link Agent} 18 | */ 19 | agent?: Agent; 20 | /** 21 | * @see {@link HttpAgentOptions} 22 | */ 23 | agentOptions?: HttpAgentOptions; 24 | /** 25 | * @see {@link ActorConfig} 26 | */ 27 | actorOptions?: ActorConfig; 28 | } 29 | 30 | /** 31 | * Intializes an {@link ActorSubclass}, configured with the provided SERVICE interface of a canister. 32 | * @constructs {@link ActorSubClass} 33 | * @param {string | Principal} canisterId - ID of the canister the {@link Actor} will talk to 34 | * @param {CreateActorOptions} options - see {@link CreateActorOptions} 35 | * @param {CreateActorOptions["agent"]} options.agent - a pre-configured agent you'd like to use. Supercedes agentOptions 36 | * @param {CreateActorOptions["agentOptions"]} options.agentOptions - options to set up a new agent 37 | * @see {@link HttpAgentOptions} 38 | * @param {CreateActorOptions["actorOptions"]} options.actorOptions - options for the Actor 39 | * @see {@link ActorConfig} 40 | */ 41 | export declare const createActor: ( 42 | canisterId: string | Principal, 43 | options?: CreateActorOptions 44 | ) => ActorSubclass<_SERVICE>; 45 | 46 | /** 47 | * Intialized Actor using default settings, ready to talk to a canister using its candid interface 48 | * @constructs {@link ActorSubClass} 49 | */ 50 | export declare const ic_oss_cluster: ActorSubclass<_SERVICE>; 51 | -------------------------------------------------------------------------------- /src/declarations/ic_object_store_canister/index.d.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | ActorSubclass, 3 | HttpAgentOptions, 4 | ActorConfig, 5 | Agent, 6 | } from "@dfinity/agent"; 7 | import type { Principal } from "@dfinity/principal"; 8 | import type { IDL } from "@dfinity/candid"; 9 | 10 | import { _SERVICE } from './ic_object_store_canister.did'; 11 | 12 | export declare const idlFactory: IDL.InterfaceFactory; 13 | export declare const canisterId: string; 14 | 15 | export declare interface CreateActorOptions { 16 | /** 17 | * @see {@link Agent} 18 | */ 19 | agent?: Agent; 20 | /** 21 | * @see {@link HttpAgentOptions} 22 | */ 23 | agentOptions?: HttpAgentOptions; 24 | /** 25 | * @see {@link ActorConfig} 26 | */ 27 | actorOptions?: ActorConfig; 28 | } 29 | 30 | /** 31 | * Intializes an {@link ActorSubclass}, configured with the provided SERVICE interface of a canister. 32 | * @constructs {@link ActorSubClass} 33 | * @param {string | Principal} canisterId - ID of the canister the {@link Actor} will talk to 34 | * @param {CreateActorOptions} options - see {@link CreateActorOptions} 35 | * @param {CreateActorOptions["agent"]} options.agent - a pre-configured agent you'd like to use. Supercedes agentOptions 36 | * @param {CreateActorOptions["agentOptions"]} options.agentOptions - options to set up a new agent 37 | * @see {@link HttpAgentOptions} 38 | * @param {CreateActorOptions["actorOptions"]} options.actorOptions - options for the Actor 39 | * @see {@link ActorConfig} 40 | */ 41 | export declare const createActor: ( 42 | canisterId: string | Principal, 43 | options?: CreateActorOptions 44 | ) => ActorSubclass<_SERVICE>; 45 | 46 | /** 47 | * Intialized Actor using default settings, ready to talk to a canister using its candid interface 48 | * @constructs {@link ActorSubClass} 49 | */ 50 | export declare const ic_object_store_canister: ActorSubclass<_SERVICE>; 51 | -------------------------------------------------------------------------------- /src/ic_oss_ts/candid/ic_object_store_canister/index.d.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | ActorSubclass, 3 | HttpAgentOptions, 4 | ActorConfig, 5 | Agent, 6 | } from "@dfinity/agent"; 7 | import type { Principal } from "@dfinity/principal"; 8 | import type { IDL } from "@dfinity/candid"; 9 | 10 | import { _SERVICE } from './ic_object_store_canister.did'; 11 | 12 | export declare const idlFactory: IDL.InterfaceFactory; 13 | export declare const canisterId: string; 14 | 15 | export declare interface CreateActorOptions { 16 | /** 17 | * @see {@link Agent} 18 | */ 19 | agent?: Agent; 20 | /** 21 | * @see {@link HttpAgentOptions} 22 | */ 23 | agentOptions?: HttpAgentOptions; 24 | /** 25 | * @see {@link ActorConfig} 26 | */ 27 | actorOptions?: ActorConfig; 28 | } 29 | 30 | /** 31 | * Intializes an {@link ActorSubclass}, configured with the provided SERVICE interface of a canister. 32 | * @constructs {@link ActorSubClass} 33 | * @param {string | Principal} canisterId - ID of the canister the {@link Actor} will talk to 34 | * @param {CreateActorOptions} options - see {@link CreateActorOptions} 35 | * @param {CreateActorOptions["agent"]} options.agent - a pre-configured agent you'd like to use. Supercedes agentOptions 36 | * @param {CreateActorOptions["agentOptions"]} options.agentOptions - options to set up a new agent 37 | * @see {@link HttpAgentOptions} 38 | * @param {CreateActorOptions["actorOptions"]} options.actorOptions - options for the Actor 39 | * @see {@link ActorConfig} 40 | */ 41 | export declare const createActor: ( 42 | canisterId: string | Principal, 43 | options?: CreateActorOptions 44 | ) => ActorSubclass<_SERVICE>; 45 | 46 | /** 47 | * Intialized Actor using default settings, ready to talk to a canister using its candid interface 48 | * @constructs {@link ActorSubClass} 49 | */ 50 | export declare const ic_object_store_canister: ActorSubclass<_SERVICE>; 51 | -------------------------------------------------------------------------------- /src/ic_oss_types/src/cluster.rs: -------------------------------------------------------------------------------- 1 | use candid::{CandidType, Principal}; 2 | use serde::{Deserialize, Serialize}; 3 | use serde_bytes::{ByteArray, ByteBuf}; 4 | use std::collections::BTreeSet; 5 | 6 | #[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize)] 7 | pub struct ClusterInfo { 8 | pub name: String, 9 | pub ecdsa_key_name: String, 10 | pub schnorr_key_name: String, 11 | pub ecdsa_token_public_key: String, 12 | pub schnorr_ed25519_token_public_key: String, 13 | pub weak_ed25519_token_public_key: String, 14 | pub token_expiration: u64, // in seconds 15 | pub managers: BTreeSet, 16 | pub committers: BTreeSet, 17 | pub subject_authz_total: u64, 18 | pub bucket_latest_version: ByteArray<32>, 19 | pub bucket_wasm_total: u64, 20 | pub bucket_deployed_total: u64, 21 | pub bucket_deployment_logs: u64, 22 | pub governance_canister: Option, 23 | } 24 | 25 | #[derive(CandidType, Clone, Debug, Deserialize, Serialize)] 26 | pub struct WasmInfo { 27 | pub created_at: u64, // in milliseconds 28 | pub created_by: Principal, 29 | pub description: String, 30 | pub wasm: ByteBuf, 31 | pub hash: ByteArray<32>, // sha256 hash of the wasm data 32 | } 33 | 34 | #[derive(CandidType, Clone, Debug, Deserialize, Serialize)] 35 | pub struct AddWasmInput { 36 | pub description: String, 37 | pub wasm: ByteBuf, 38 | } 39 | 40 | #[derive(CandidType, Clone, Debug, Deserialize, Serialize)] 41 | pub struct DeployWasmInput { 42 | pub canister: Principal, 43 | pub args: Option, 44 | } 45 | 46 | #[derive(CandidType, Clone, Debug, Deserialize, Serialize)] 47 | pub struct BucketDeploymentInfo { 48 | pub deploy_at: u64, // in milliseconds 49 | pub canister: Principal, 50 | pub prev_hash: ByteArray<32>, 51 | pub wasm_hash: ByteArray<32>, 52 | pub args: Option, 53 | pub error: Option, 54 | } 55 | -------------------------------------------------------------------------------- /src/ic_oss_types/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc(html_root_url = "https://docs.rs/ic-oss-types/latest")] 2 | #![allow(clippy::needless_doctest_main)] 3 | 4 | use candid::{Nat, Principal}; 5 | use ciborium::into_writer; 6 | use num_traits::cast::ToPrimitive; 7 | use serde::Serialize; 8 | use sha3::Digest; 9 | use std::collections::{BTreeMap, BTreeSet}; 10 | 11 | pub mod bucket; 12 | pub mod cluster; 13 | pub mod cose; 14 | pub mod file; 15 | pub mod folder; 16 | pub mod object_store; 17 | pub mod permission; 18 | 19 | pub static ANONYMOUS: Principal = Principal::anonymous(); 20 | pub const MILLISECONDS: u64 = 1_000_000u64; 21 | 22 | // should update to ICRC3Map 23 | pub type MapValue = 24 | BTreeMap; 25 | 26 | pub fn format_error(err: T) -> String 27 | where 28 | T: std::fmt::Debug, 29 | { 30 | format!("{:?}", err) 31 | } 32 | 33 | pub fn crc32(data: &[u8]) -> u32 { 34 | let mut h = crc32fast::Hasher::new(); 35 | h.update(data); 36 | h.finalize() 37 | } 38 | 39 | pub fn nat_to_u64(nat: &Nat) -> u64 { 40 | nat.0.to_u64().unwrap_or(0) 41 | } 42 | 43 | // to_cbor_bytes returns the CBOR encoding of the given object that implements the Serialize trait. 44 | pub fn to_cbor_bytes(obj: &impl Serialize) -> Vec { 45 | let mut buf: Vec = Vec::new(); 46 | into_writer(obj, &mut buf).expect("failed to encode in CBOR format"); 47 | buf 48 | } 49 | 50 | pub fn sha3_256(data: &[u8]) -> [u8; 32] { 51 | let mut hasher = sha3::Sha3_256::new(); 52 | hasher.update(data); 53 | hasher.finalize().into() 54 | } 55 | 56 | pub fn validate_principals(principals: &BTreeSet) -> Result<(), String> { 57 | if principals.is_empty() { 58 | return Err("principals cannot be empty".to_string()); 59 | } 60 | if principals.contains(&ANONYMOUS) { 61 | return Err("anonymous user is not allowed".to_string()); 62 | } 63 | Ok(()) 64 | } 65 | -------------------------------------------------------------------------------- /src/ic_oss_ts/.eslintrc.js: -------------------------------------------------------------------------------- 1 | /** @type {import('eslint').Linter.Config} */ 2 | module.exports = { 3 | root: true, 4 | env: { 5 | browser: true, 6 | esnext: true, 7 | node: true 8 | }, 9 | extends: [ 10 | 'eslint:recommended', 11 | 'standard', 12 | 'prettier/@typescript-eslint', 13 | 'plugin:@typescript-eslint/recommended', 14 | 'plugin:import/recommended', 15 | 'plugin:prettier/recommended', 16 | 'prettier' 17 | ], 18 | parser: '@typescript-eslint/parser', 19 | parserOptions: { 20 | ecmaVersion: 'latest', 21 | sourceType: 'module', 22 | project: 'tsconfig.json', 23 | extraFileExtensions: [] 24 | }, 25 | plugins: ['@typescript-eslint', 'import', 'prettier'], 26 | rules: { 27 | '@typescript-eslint/consistent-type-exports': [ 28 | 'error', 29 | { fixMixedExportsWithInlineTypeSpecifier: true } 30 | ], 31 | '@typescript-eslint/consistent-type-imports': [ 32 | 'error', 33 | { fixStyle: 'inline-type-imports' } 34 | ], 35 | '@typescript-eslint/no-empty-function': 'off', 36 | '@typescript-eslint/no-empty-interface': 'off', 37 | '@typescript-eslint/no-unused-vars': 'off', 38 | 'import/named': 'off', 39 | 'import/newline-after-import': 'error', 40 | 'import/no-unresolved': 'off', 41 | 'import/order': [ 42 | 'error', 43 | { 44 | groups: [ 45 | ['builtin', 'external', 'internal'], 46 | 'parent', 47 | ['sibling', 'index'] 48 | ], 49 | 'newlines-between': 'never', 50 | alphabetize: { order: 'ignore' } 51 | } 52 | ], 53 | 'no-console': 'warn', 54 | 'no-restricted-imports': [ 55 | 'error', 56 | { 57 | 'paths': [] 58 | } 59 | ], 60 | 'no-useless-rename': 'error', 61 | 'object-shorthand': ['error', 'always'] 62 | }, 63 | settings: { 64 | 'import/internal-regex': '^#' 65 | }, 66 | ignorePatterns: ['dist', 'node_modules', 'examples', 'scripts'] 67 | } 68 | -------------------------------------------------------------------------------- /src/ic_oss/src/agent.rs: -------------------------------------------------------------------------------- 1 | use candid::{ 2 | utils::{encode_args, ArgumentEncoder}, 3 | CandidType, Decode, Principal, 4 | }; 5 | use ic_agent::{Agent, Identity}; 6 | use ic_oss_types::format_error; 7 | use std::sync::Arc; 8 | 9 | pub async fn build_agent(host: &str, identity: Arc) -> Result { 10 | let agent = Agent::builder() 11 | .with_url(host) 12 | .with_arc_identity(identity) 13 | .with_verify_query_signatures(false); 14 | 15 | let agent = if host.starts_with("https://") { 16 | agent 17 | .with_background_dynamic_routing() 18 | .build() 19 | .map_err(format_error)? 20 | } else { 21 | agent.build().map_err(format_error)? 22 | }; 23 | 24 | if host.starts_with("http://") { 25 | // ignore errors 26 | let _ = agent.fetch_root_key().await; 27 | } 28 | 29 | Ok(agent) 30 | } 31 | 32 | pub async fn update_call( 33 | agent: &Agent, 34 | canister_id: &Principal, 35 | method_name: &str, 36 | args: In, 37 | ) -> Result 38 | where 39 | In: ArgumentEncoder + Send, 40 | Out: CandidType + for<'a> candid::Deserialize<'a>, 41 | { 42 | let input = encode_args(args).map_err(format_error)?; 43 | let res = agent 44 | .update(canister_id, method_name) 45 | .with_arg(input) 46 | .call_and_wait() 47 | .await 48 | .map_err(format_error)?; 49 | let output = Decode!(res.as_slice(), Out).map_err(format_error)?; 50 | Ok(output) 51 | } 52 | 53 | pub async fn query_call( 54 | agent: &Agent, 55 | canister_id: &Principal, 56 | method_name: &str, 57 | args: In, 58 | ) -> Result 59 | where 60 | In: ArgumentEncoder + Send, 61 | Out: CandidType + for<'a> candid::Deserialize<'a>, 62 | { 63 | let input = encode_args(args).map_err(format_error)?; 64 | let res = agent 65 | .query(canister_id, method_name) 66 | .with_arg(input) 67 | .call() 68 | .await 69 | .map_err(format_error)?; 70 | let output = Decode!(res.as_slice(), Out).map_err(format_error)?; 71 | Ok(output) 72 | } 73 | -------------------------------------------------------------------------------- /src/ic_oss_cli/README.md: -------------------------------------------------------------------------------- 1 | # `ic-oss-cli` 2 | ![License](https://img.shields.io/crates/l/ic-oss-cli.svg) 3 | [![Crates.io](https://img.shields.io/crates/d/ic-oss-cli.svg)](https://crates.io/crates/ic-oss-cli) 4 | [![Test](https://github.com/ldclabs/ic-oss/actions/workflows/test.yml/badge.svg)](https://github.com/ldclabs/ic-oss/actions/workflows/test.yml) 5 | [![Docs.rs](https://img.shields.io/docsrs/ic-oss-cli?label=docs.rs)](https://docs.rs/ic-oss-cli) 6 | [![Latest Version](https://img.shields.io/crates/v/ic-oss-cli.svg)](https://crates.io/crates/ic-oss-cli) 7 | 8 | A command-line tool implemented in Rust for [ic-oss](https://github.com/ldclabs/ic-oss), a decentralized Object Storage Service on the Internet Computer. 9 | 10 | ## Installation 11 | 12 | ### Via Cargo 13 | ```sh 14 | cargo install ic-oss-cli 15 | # get help info 16 | ic-oss-cli --help 17 | ``` 18 | 19 | ### From Source 20 | ```sh 21 | git clone https://github.com/ldclabs/ic-oss.git 22 | cd ic-oss 23 | cargo build -p ic-oss-cli --release 24 | # get help info 25 | target/release/ic-oss-cli --help 26 | ``` 27 | 28 | ## Quick Start 29 | 30 | ### Identity Management 31 | ```sh 32 | # Generate a new identity 33 | ic-oss-cli identity --new --path myid.pem 34 | 35 | # Expected output: 36 | # principal: lxph3-nvpsv-yrevd-im4ug-qywcl-5ir34-rpsbs-6olvf-qtugo-iy5ai-jqe 37 | # new identity: myid.pem 38 | ``` 39 | 40 | ### File Operations 41 | ```sh 42 | # Upload to local canister 43 | ic-oss-cli -i myid.pem put -b mmrxu-fqaaa-aaaap-ahhna-cai --path test.tar.gz 44 | 45 | # Upload to mainnet canister 46 | ic-oss-cli -i myid.pem put -b mmrxu-fqaaa-aaaap-ahhna-cai --path test.tar.gz --ic 47 | 48 | # Add WASM to cluster 49 | ic-oss-cli -i debug/uploader.pem cluster-add-wasm \ 50 | -c x5573-nqaaa-aaaap-ahopq-cai \ 51 | --path target/wasm32-unknown-unknown/release/ic_oss_bucket.wasm 52 | ``` 53 | 54 | ## Documentation 55 | For detailed usage instructions: 56 | ```sh 57 | ic-oss-cli --help 58 | ic-oss-cli identity --help 59 | ic-oss-cli upload --help 60 | ``` 61 | 62 | ## License 63 | 64 | Copyright © 2024-2025 [LDC Labs](https://github.com/ldclabs). 65 | 66 | Licensed under the MIT License. See [LICENSE](../../LICENSE-MIT) for details. -------------------------------------------------------------------------------- /src/ic_object_store_canister/src/api_init.rs: -------------------------------------------------------------------------------- 1 | use candid::{CandidType, Principal}; 2 | use serde::Deserialize; 3 | 4 | use crate::store; 5 | 6 | #[derive(Clone, Debug, CandidType, Deserialize)] 7 | pub enum InstallArgs { 8 | Init(InitArgs), 9 | Upgrade(UpgradeArgs), 10 | } 11 | 12 | #[derive(Clone, Debug, CandidType, Deserialize)] 13 | pub struct InitArgs { 14 | name: String, 15 | governance_canister: Option, 16 | } 17 | 18 | #[derive(Clone, Debug, CandidType, Deserialize)] 19 | pub struct UpgradeArgs { 20 | name: Option, // seconds 21 | governance_canister: Option, 22 | } 23 | 24 | #[ic_cdk::init] 25 | fn init(args: Option) { 26 | store::state::with_mut(|s| { 27 | s.name = "ICObjectStore".to_string(); 28 | }); 29 | 30 | match args { 31 | Some(InstallArgs::Init(args)) => { 32 | store::state::with_mut(|s| { 33 | s.name = args.name; 34 | s.governance_canister = args.governance_canister; 35 | }); 36 | } 37 | Some(InstallArgs::Upgrade(_)) => { 38 | ic_cdk::trap( 39 | "cannot initialize the canister with an Upgrade args. Please provide an Init args.", 40 | ); 41 | } 42 | _ => {} 43 | } 44 | } 45 | 46 | #[ic_cdk::pre_upgrade] 47 | fn pre_upgrade() { 48 | store::state::save(); 49 | } 50 | 51 | #[ic_cdk::post_upgrade] 52 | fn post_upgrade(args: Option) { 53 | store::state::load(); 54 | 55 | match args { 56 | Some(InstallArgs::Upgrade(args)) => { 57 | store::state::with_mut(|s| { 58 | if let Some(name) = args.name { 59 | s.name = name; 60 | } 61 | if let Some(governance_canister) = args.governance_canister { 62 | s.governance_canister = Some(governance_canister); 63 | } 64 | }); 65 | } 66 | Some(InstallArgs::Init(_)) => { 67 | ic_cdk::trap( 68 | "cannot upgrade the canister with an Init args. Please provide an Upgrade args.", 69 | ); 70 | } 71 | _ => {} 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /examples/upload_js/README.md: -------------------------------------------------------------------------------- 1 | # Example: `upload_js` 2 | 3 | [ic-oss](https://github.com/ldclabs/ic-oss) is a decentralized Object Storage Service on the Internet Computer. 4 | 5 | `upload_js` is a demonstration project used to show how to implement large file storage in the ICP canister. By using `ic-oss-can` to include the `ic_oss_fs!` macro in your canister, an `fs` module and a set of Candid file system APIs will be automatically generated. You can use the `ic-oss-cli` tool to upload files to the ICP canister. 6 | 7 | For more information about `ic-oss-can`, please refer to [ic-oss-can](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_can). 8 | 9 | ## Running the project locally 10 | 11 | If you want to test your project locally, you can use the following commands: 12 | 13 | ```bash 14 | cd examples/upload_js 15 | # Starts the replica, running in the background 16 | dfx start --background 17 | 18 | # deploy the canister 19 | dfx deploy ai_canister 20 | # canister: aovwi-4maaa-aaaaa-qaagq-cai 21 | 22 | dfx canister call ai_canister state '()' 23 | 24 | MYID=$(dfx identity get-principal) 25 | ic-oss-cli -i debug/uploader.pem identity 26 | # principal: nprym-ylvyz-ig3fr-lgcmn-zzzt4-tyuix-3v6bm-fsel7-6lq6x-zh2w7-zqe 27 | 28 | dfx canister call ai_canister admin_set_managers "(vec {principal \"$MYID\"; principal \"nprym-ylvyz-ig3fr-lgcmn-zzzt4-tyuix-3v6bm-fsel7-6lq6x-zh2w7-zqe\"})" 29 | 30 | dfx canister call ai_canister set_max_file_size "(10737418240)" # 10GB 31 | dfx canister call ai_canister admin_set_visibility "(1)" # public 32 | 33 | ic-oss-cli -i debug/uploader.pem put -b aovwi-4maaa-aaaaa-qaagq-cai --path Qwen1.5-0.5B-Chat/config.json 34 | # ... file id: 1 ... 35 | ic-oss-cli -i debug/uploader.pem put -b aovwi-4maaa-aaaaa-qaagq-cai --path Qwen1.5-0.5B-Chat/tokenizer.json 36 | # ... file id: 2 ... 37 | ic-oss-cli -i debug/uploader.pem put -b aovwi-4maaa-aaaaa-qaagq-cai --path Qwen1.5-0.5B-Chat/model.safetensors 38 | # ... file id: 3 ... 39 | 40 | dfx canister call ai_canister admin_load_model '(record {config_id=1;tokenizer_id=2;model_id=3})' 41 | 42 | dfx canister call ai_canister list_files '(0, null, null, null)' 43 | ``` 44 | 45 | ## License 46 | 47 | Copyright © 2024-2025 [LDC Labs](https://github.com/ldclabs). 48 | 49 | Licensed under the MIT License. See [LICENSE](../../LICENSE-MIT) for details. 50 | -------------------------------------------------------------------------------- /examples/ai_canister/README.md: -------------------------------------------------------------------------------- 1 | # Example: `ai_canister` 2 | 3 | [ic-oss](https://github.com/ldclabs/ic-oss) is a decentralized Object Storage Service on the Internet Computer. 4 | 5 | `ai_canister` is a demonstration project used to show how to implement large file storage in the ICP canister. By using `ic-oss-can` to include the `ic_oss_fs!` macro in your canister, an `fs` module and a set of Candid file system APIs will be automatically generated. You can use the `ic-oss-cli` tool to upload files to the ICP canister. 6 | 7 | For more information about `ic-oss-can`, please refer to [ic-oss-can](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_can). 8 | 9 | ## Running the project locally 10 | 11 | If you want to test your project locally, you can use the following commands: 12 | 13 | ```bash 14 | cd examples/ai_canister 15 | # Starts the replica, running in the background 16 | dfx start --background 17 | 18 | # deploy the canister 19 | dfx deploy ai_canister 20 | # canister: aovwi-4maaa-aaaaa-qaagq-cai 21 | 22 | dfx canister call ai_canister state '()' 23 | 24 | MYID=$(dfx identity get-principal) 25 | ic-oss-cli -i debug/uploader.pem identity 26 | # principal: nprym-ylvyz-ig3fr-lgcmn-zzzt4-tyuix-3v6bm-fsel7-6lq6x-zh2w7-zqe 27 | 28 | dfx canister call ai_canister admin_set_managers "(vec {principal \"$MYID\"; principal \"nprym-ylvyz-ig3fr-lgcmn-zzzt4-tyuix-3v6bm-fsel7-6lq6x-zh2w7-zqe\"})" 29 | 30 | dfx canister call ai_canister set_max_file_size "(10737418240)" # 10GB 31 | dfx canister call ai_canister admin_set_visibility "(1)" # public 32 | 33 | ic-oss-cli -i debug/uploader.pem put -b aovwi-4maaa-aaaaa-qaagq-cai --path Qwen1.5-0.5B-Chat/config.json 34 | # ... file id: 1 ... 35 | ic-oss-cli -i debug/uploader.pem put -b aovwi-4maaa-aaaaa-qaagq-cai --path Qwen1.5-0.5B-Chat/tokenizer.json 36 | # ... file id: 2 ... 37 | ic-oss-cli -i debug/uploader.pem put -b aovwi-4maaa-aaaaa-qaagq-cai --path Qwen1.5-0.5B-Chat/model.safetensors 38 | # ... file id: 3 ... 39 | 40 | dfx canister call ai_canister admin_load_model '(record {config_id=1;tokenizer_id=2;model_id=3})' 41 | 42 | dfx canister call ai_canister list_files '(0, null, null, null)' 43 | ``` 44 | 45 | ## License 46 | 47 | Copyright © 2024-2025 [LDC Labs](https://github.com/ldclabs). 48 | 49 | Licensed under the MIT License. See [LICENSE](../../LICENSE-MIT) for details. 50 | -------------------------------------------------------------------------------- /src/ic_oss_types/src/folder.rs: -------------------------------------------------------------------------------- 1 | use candid::CandidType; 2 | use serde::{Deserialize, Serialize}; 3 | use std::collections::BTreeSet; 4 | 5 | use crate::file::valid_file_name; 6 | 7 | #[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] 8 | pub struct FolderInfo { 9 | pub id: u32, 10 | pub parent: u32, // 0: root 11 | pub name: String, 12 | pub created_at: u64, // unix timestamp in milliseconds 13 | pub updated_at: u64, // unix timestamp in milliseconds 14 | pub status: i8, // -1: archived; 0: readable and writable; 1: readonly 15 | pub files: BTreeSet, // length <= max_children 16 | pub folders: BTreeSet, // length <= max_children 17 | } 18 | 19 | #[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] 20 | pub struct FolderName { 21 | pub id: u32, 22 | pub name: String, 23 | } 24 | 25 | #[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize)] 26 | pub struct CreateFolderInput { 27 | pub parent: u32, 28 | pub name: String, 29 | } 30 | 31 | impl CreateFolderInput { 32 | pub fn validate(&self) -> Result<(), String> { 33 | if !valid_file_name(&self.name) { 34 | return Err("invalid folder name".to_string()); 35 | } 36 | 37 | Ok(()) 38 | } 39 | } 40 | 41 | #[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize)] 42 | pub struct CreateFolderOutput { 43 | pub id: u32, 44 | pub created_at: u64, 45 | } 46 | 47 | #[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize)] 48 | pub struct UpdateFolderInput { 49 | pub id: u32, 50 | pub name: Option, 51 | pub status: Option, // when set to 1, the file must be fully filled, and hash must be provided 52 | } 53 | 54 | impl UpdateFolderInput { 55 | pub fn validate(&self) -> Result<(), String> { 56 | if let Some(name) = &self.name { 57 | if !valid_file_name(name) { 58 | return Err("invalid folder name".to_string()); 59 | } 60 | } 61 | 62 | if let Some(status) = self.status { 63 | if !(-1i8..=1i8).contains(&status) { 64 | return Err("status should be -1, 0 or 1".to_string()); 65 | } 66 | } 67 | Ok(()) 68 | } 69 | } 70 | 71 | #[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize)] 72 | pub struct UpdateFolderOutput { 73 | pub updated_at: u64, 74 | } 75 | -------------------------------------------------------------------------------- /src/ic_oss_can/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod store; 2 | pub mod types; 3 | 4 | #[cfg(test)] 5 | mod test { 6 | 7 | use ic_stable_structures::{ 8 | memory_manager::{MemoryId, MemoryManager, VirtualMemory}, 9 | DefaultMemoryImpl, StableBTreeMap, 10 | }; 11 | use std::cell::RefCell; 12 | 13 | use crate::ic_oss_fs; 14 | use crate::types::{Chunk, FileId, FileMetadata}; 15 | 16 | type Memory = VirtualMemory; 17 | 18 | const FS_DATA_MEMORY_ID: MemoryId = MemoryId::new(0); 19 | 20 | thread_local! { 21 | 22 | static MEMORY_MANAGER: RefCell> = 23 | RefCell::new(MemoryManager::init(DefaultMemoryImpl::default())); 24 | 25 | 26 | // `FS_CHUNKS_STORE`` is needed by `ic_oss_can::ic_oss_fs` macro 27 | static FS_CHUNKS_STORE: RefCell> = RefCell::new( 28 | StableBTreeMap::init( 29 | MEMORY_MANAGER.with_borrow(|m| m.get(FS_DATA_MEMORY_ID)), 30 | ) 31 | ); 32 | } 33 | 34 | // need to define `FS_CHUNKS_STORE` before `ic_oss_can::ic_oss_fs!()` 35 | ic_oss_fs!(); 36 | 37 | #[test] 38 | fn test_ic_oss_fs() { 39 | let files = fs::list_files(u32::MAX, 2); 40 | assert!(files.is_empty()); 41 | 42 | fs::add_file(FileMetadata { 43 | name: "f1".to_string(), 44 | size: 100, 45 | ..Default::default() 46 | }) 47 | .unwrap(); 48 | 49 | assert!(fs::get_file(0).is_none()); 50 | assert_eq!(fs::get_file(1).unwrap().name, "f1"); 51 | 52 | fs::add_file(FileMetadata { 53 | name: "f2".to_string(), 54 | size: 100, 55 | ..Default::default() 56 | }) 57 | .unwrap(); 58 | 59 | fs::add_file(FileMetadata { 60 | name: "f3".to_string(), 61 | size: 100, 62 | ..Default::default() 63 | }) 64 | .unwrap(); 65 | 66 | fs::add_file(FileMetadata { 67 | name: "f4".to_string(), 68 | size: 100, 69 | ..Default::default() 70 | }) 71 | .unwrap(); 72 | 73 | let files = fs::list_files(u32::MAX, 2); 74 | assert_eq!( 75 | files.iter().map(|f| f.name.clone()).collect::>(), 76 | vec!["f4", "f3"] 77 | ); 78 | 79 | let files = fs::list_files(files.last().unwrap().id, 10); 80 | assert_eq!( 81 | files.iter().map(|f| f.name.clone()).collect::>(), 82 | vec!["f2", "f1"] 83 | ); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/ic_object_store_canister/src/api_admin.rs: -------------------------------------------------------------------------------- 1 | use candid::{pretty::candid::value::pp_value, CandidType, IDLValue, Principal}; 2 | use ic_oss_types::validate_principals; 3 | use std::collections::BTreeSet; 4 | 5 | use crate::{is_controller, store}; 6 | 7 | #[ic_cdk::update(guard = "is_controller")] 8 | fn admin_add_managers(args: BTreeSet) -> Result<(), String> { 9 | validate_principals(&args)?; 10 | store::state::with_mut(|s| { 11 | s.managers.extend(args); 12 | Ok(()) 13 | }) 14 | } 15 | 16 | #[ic_cdk::update(guard = "is_controller")] 17 | fn admin_remove_managers(args: BTreeSet) -> Result<(), String> { 18 | validate_principals(&args)?; 19 | store::state::with_mut(|s| { 20 | s.managers.retain(|v| !args.contains(v)); 21 | Ok(()) 22 | }) 23 | } 24 | 25 | #[ic_cdk::update(guard = "is_controller")] 26 | fn admin_add_auditors(args: BTreeSet) -> Result<(), String> { 27 | validate_principals(&args)?; 28 | store::state::with_mut(|s| { 29 | s.auditors.extend(args); 30 | Ok(()) 31 | }) 32 | } 33 | 34 | #[ic_cdk::update(guard = "is_controller")] 35 | fn admin_remove_auditors(args: BTreeSet) -> Result<(), String> { 36 | validate_principals(&args)?; 37 | store::state::with_mut(|s| { 38 | s.auditors.retain(|v| !args.contains(v)); 39 | Ok(()) 40 | }) 41 | } 42 | 43 | #[ic_cdk::update(guard = "is_controller")] 44 | fn admin_clear() -> Result<(), String> { 45 | store::state::clear(); 46 | Ok(()) 47 | } 48 | 49 | #[ic_cdk::update] 50 | fn validate_admin_add_managers(args: BTreeSet) -> Result { 51 | validate_principals(&args)?; 52 | pretty_format(&args) 53 | } 54 | 55 | #[ic_cdk::update] 56 | fn validate_admin_remove_managers(args: BTreeSet) -> Result { 57 | validate_principals(&args)?; 58 | pretty_format(&args) 59 | } 60 | 61 | #[ic_cdk::update] 62 | fn validate_admin_add_auditors(args: BTreeSet) -> Result { 63 | validate_principals(&args)?; 64 | pretty_format(&args) 65 | } 66 | 67 | #[ic_cdk::update] 68 | fn validate_admin_remove_auditors(args: BTreeSet) -> Result { 69 | validate_principals(&args)?; 70 | pretty_format(&args) 71 | } 72 | 73 | #[ic_cdk::update] 74 | fn validate_admin_clear() -> Result { 75 | Ok("ok".to_string()) 76 | } 77 | 78 | fn pretty_format(data: &T) -> Result 79 | where 80 | T: CandidType, 81 | { 82 | let val = IDLValue::try_from_candid_type(data).map_err(|err| format!("{err:?}"))?; 83 | let doc = pp_value(7, &val); 84 | 85 | Ok(format!("{}", doc.pretty(120))) 86 | } 87 | -------------------------------------------------------------------------------- /examples/video_player/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Video tool 7 | 24 | 25 | 26 | 27 |

Video tool

28 | 29 |
30 | 32 | 33 |
34 | 35 |
36 | 39 |
40 | 41 |
42 |

Info:

43 |

44 |

45 |
46 | 47 | 87 | 88 | 89 | -------------------------------------------------------------------------------- /src/ic_object_store_canister/README.md: -------------------------------------------------------------------------------- 1 | # `IC Object Store` 2 | 3 | Native Rust implementation of Apache Arrow object store on the Internet Computer. 4 | 5 | More detail: https://github.com/apache/arrow-rs-object-store 6 | 7 | ## Features 8 | 9 | - Full implementation of Apache Arrow object store APIs. 10 | - AES256-GCM encryption. 11 | 12 | ## Demo 13 | 14 | Try it online: https://a4gq6-oaaaa-aaaab-qaa4q-cai.raw.icp0.io/?id=6at64-oyaaa-aaaap-anvza-cai 15 | 16 | ## Quick Start 17 | 18 | ### Local Deployment 19 | 20 | 1. Deploy the canister: 21 | ```bash 22 | # dfx canister create --specified-id 6at64-oyaaa-aaaap-anvza-cai ic_object_store_canister 23 | dfx deploy ic_object_store_canister 24 | ``` 25 | 26 | Or with custom configuration: 27 | ```bash 28 | dfx deploy ic_object_store_canister --argument "(opt variant {Init = 29 | record { 30 | name = \"LDC Labs\"; 31 | governance_canister = null; 32 | } 33 | })" 34 | ``` 35 | 36 | 2. Set up permissions: 37 | ```bash 38 | # Get your principal 39 | MYID=$(dfx identity get-principal) 40 | # Get the uploader principal 41 | ic-oss-cli -i debug/uploader.pem identity 42 | # principal: jjn6g-sh75l-r3cxb-wxrkl-frqld-6p6qq-d4ato-wske5-op7s5-n566f-bqe 43 | 44 | # Add managers 45 | dfx canister call ic_object_store_canister admin_add_managers "(vec {principal \"$MYID\"; principal \"jjn6g-sh75l-r3cxb-wxrkl-frqld-6p6qq-d4ato-wske5-op7s5-n566f-bqe\"})" 46 | 47 | dfx canister call ic_object_store_canister get_state '()' 48 | ``` 49 | 50 | ## API Reference 51 | 52 | The canister exposes a comprehensive Candid API. Key endpoints include: 53 | 54 | ```candid 55 | # Object Operations 56 | put_opts : (text, blob, PutOptions) -> (Result) 57 | head : (text) -> (Result) query 58 | get_opts : (text, GetOptions) -> (Result) query 59 | get_ranges : (text, vec record { nat64; nat64 }) -> (Result) query 60 | copy : (text, text) -> (Result) 61 | rename : (text, text) -> (Result) 62 | list : (opt text) -> (Result) query 63 | list_with_delimiter : (opt text) -> (Result) query 64 | list_with_offset : (opt text, text) -> (Result) query 65 | create_multipart : (text) -> (Result) 66 | put_part : (text, text, nat64, blob) -> (Result) 67 | complete_multipart : (text, text, PutMultipartOptions) -> (Result) 68 | 69 | # Admin Operations 70 | admin_add_managers : (vec principal) -> (Result) 71 | admin_remove_managers : (vec principal) -> (Result) 72 | ``` 73 | 74 | Full Candid API definition: [ic_object_store_canister.did](https://github.com/ldclabs/ic-oss/tree/main/src/ic_object_store_canister/ic_object_store_canister.did) 75 | 76 | ## License 77 | Copyright © 2024-2025 [LDC Labs](https://github.com/ldclabs). 78 | 79 | `ldclabs/ic-oss` is licensed under the MIT License. See [LICENSE](../../LICENSE-MIT) for the full license text. 80 | -------------------------------------------------------------------------------- /examples/upload_js/upload.js: -------------------------------------------------------------------------------- 1 | import { Ed25519KeyIdentity } from '@dfinity/identity' 2 | import { createAgent } from '@dfinity/utils' 3 | import { BucketCanister, Uploader } from '@ldclabs/ic_oss_ts' 4 | 5 | const IS_LOCAL = true 6 | const apiHost = IS_LOCAL ? 'http://127.0.0.1:4943' : 'https://icp-api.io' 7 | const bucketCanister = 'mmrxu-fqaaa-aaaap-ahhna-cai' 8 | 9 | // The principal is generated by generateIdentity() 10 | // 'pxfqr-x3orr-z5yip-7yzdd-hyxgd-dktgh-3awsk-ohzma-lfjzi-753j7-tae' 11 | // update the principal as manager 12 | // dfx canister call ic_oss_bucket admin_set_managers "(vec {principal \"$MYID\"; principal \"pxfqr-x3orr-z5yip-7yzdd-hyxgd-dktgh-3awsk-ohzma-lfjzi-753j7-tae\"})" 13 | const idJSON = 14 | '["302a300506032b6570032100f6f7b1317cca7be2c3f6049da6932aadbd5549d4fd7d7d29290dead0b85d1f96","5b3770cbfd16d3ac610cc3cda0bc292a448f2c78d6634de6ee280df0a65e4c04"]' 15 | 16 | const files = [ 17 | // { 18 | // parent: 0, 19 | // content: '../../debug/test.tar.gz', // a large file 20 | // name: '', 21 | // contentType: '' 22 | // }, 23 | { 24 | parent: 0, 25 | content: 'package.json', 26 | name: '', 27 | contentType: '' 28 | }, 29 | { 30 | parent: 0, 31 | content: 'package-lock.json', 32 | name: '', 33 | contentType: '' 34 | }, 35 | { 36 | parent: 0, 37 | content: 'README.md', 38 | name: '', 39 | contentType: '' 40 | } 41 | ] 42 | 43 | async function main() { 44 | // generateIdentity() 45 | await uploadFiles(files) 46 | } 47 | 48 | async function uploadFiles(files) { 49 | const identity = Ed25519KeyIdentity.fromJSON(idJSON) 50 | const agent = await createAgent({ 51 | identity, 52 | fetchRootKey: IS_LOCAL, 53 | host: apiHost, 54 | verifyQuerySignatures: true 55 | }) 56 | const bucketClient = BucketCanister.create({ 57 | agent, 58 | canisterId: bucketCanister 59 | }) 60 | console.log('Bucket info:\n', await bucketClient.getBucketInfo()) 61 | console.log('Bucket files in root folder:\n', await bucketClient.listFiles(0)) 62 | 63 | const uploader = new Uploader(bucketClient) 64 | 65 | for (const file of files) { 66 | const result = await uploader.upload(file, (progress) => { 67 | console.log(`Upload ${file.name}:`, progress) 68 | }) 69 | 70 | console.log(`Uploaded ${file.name}:`, result) 71 | } 72 | 73 | console.log('Bucket files in root folder:\n', await bucketClient.listFiles(0)) 74 | } 75 | 76 | main().catch(console.error) 77 | 78 | // 1.239G: 45 min, 2.528 T 79 | // 1MB: 2.128s, 0.00199 T 80 | 81 | function generateIdentity() { 82 | const id = Ed25519KeyIdentity.generate() 83 | const principal = id.getPrincipal().toText() 84 | console.log('Principal', id.getPrincipal().toText()) 85 | 86 | const json = id.toJSON() 87 | console.log(JSON.stringify(json, null, 0)) 88 | } 89 | -------------------------------------------------------------------------------- /examples/ai_canister/ai_canister.did: -------------------------------------------------------------------------------- 1 | type BTreeMap = vec record { 2 | text; 3 | variant { 4 | Int : int; 5 | Map : BTreeMap; 6 | Nat : nat; 7 | Nat64 : nat64; 8 | Blob : blob; 9 | Text : text; 10 | Array : vec Value; 11 | }; 12 | }; 13 | type CreateFileInput = record { 14 | status : opt int8; 15 | content : opt blob; 16 | custom : opt vec record { text; Value }; 17 | hash : opt blob; 18 | name : text; 19 | crc32 : opt nat32; 20 | size : opt nat64; 21 | content_type : text; 22 | parent : nat32; 23 | }; 24 | type CreateFileOutput = record { id : nat32; created_at : nat64 }; 25 | type FileInfo = record { 26 | ex : opt vec record { text; Value }; 27 | id : nat32; 28 | status : int8; 29 | updated_at : nat64; 30 | custom : opt vec record { text; Value }; 31 | hash : opt blob; 32 | name : text; 33 | size : nat64; 34 | content_type : text; 35 | created_at : nat64; 36 | filled : nat64; 37 | chunks : nat32; 38 | parent : nat32; 39 | }; 40 | type LoadModelInput = record { 41 | tokenizer_id : nat32; 42 | config_id : nat32; 43 | model_id : nat32; 44 | }; 45 | type Result = variant { Ok : nat64; Err : text }; 46 | type Result_1 = variant { Ok; Err : text }; 47 | type Result_2 = variant { Ok : CreateFileOutput; Err : text }; 48 | type Result_3 = variant { Ok : bool; Err : text }; 49 | type Result_4 = variant { Ok : vec FileInfo; Err : text }; 50 | type Result_5 = variant { Ok : State; Err }; 51 | type Result_6 = variant { Ok : UpdateFileChunkOutput; Err : text }; 52 | type Result_7 = variant { Ok : UpdateFileOutput; Err : text }; 53 | type State = record { 54 | ai_config : nat32; 55 | ai_model : nat32; 56 | ai_tokenizer : nat32; 57 | }; 58 | type UpdateFileChunkInput = record { 59 | id : nat32; 60 | chunk_index : nat32; 61 | content : blob; 62 | crc32 : opt nat32; 63 | }; 64 | type UpdateFileChunkOutput = record { updated_at : nat64; filled : nat64 }; 65 | type UpdateFileInput = record { 66 | id : nat32; 67 | status : opt int8; 68 | custom : opt vec record { text; Value }; 69 | hash : opt blob; 70 | name : opt text; 71 | content_type : opt text; 72 | }; 73 | type UpdateFileOutput = record { updated_at : nat64 }; 74 | type Value = variant { 75 | Int : int; 76 | Map : BTreeMap; 77 | Nat : nat; 78 | Nat64 : nat64; 79 | Blob : blob; 80 | Text : text; 81 | Array : vec Value; 82 | }; 83 | service : () -> { 84 | admin_load_model : (LoadModelInput) -> (Result); 85 | admin_set_managers : (vec principal) -> (Result_1); 86 | admin_set_visibility : (nat8) -> (Result_1); 87 | create_file : (CreateFileInput, opt blob) -> (Result_2); 88 | delete_file : (nat32, opt blob) -> (Result_3); 89 | list_files : (nat32, opt nat32, opt nat32, opt blob) -> (Result_4) query; 90 | set_max_file_size : (nat64) -> (Result_1); 91 | state : () -> (Result_5) query; 92 | update_file_chunk : (UpdateFileChunkInput, opt blob) -> (Result_6); 93 | update_file_info : (UpdateFileInput, opt blob) -> (Result_7); 94 | } 95 | -------------------------------------------------------------------------------- /src/ic_oss_cluster/src/api_query.rs: -------------------------------------------------------------------------------- 1 | use candid::{Nat, Principal}; 2 | use ic_cdk::management_canister as mgt; 3 | use ic_oss_types::{ 4 | cluster::{BucketDeploymentInfo, ClusterInfo, WasmInfo}, 5 | format_error, nat_to_u64, 6 | }; 7 | use serde_bytes::ByteArray; 8 | use std::collections::BTreeMap; 9 | 10 | use crate::{is_controller_or_manager, store}; 11 | 12 | #[ic_cdk::query] 13 | fn get_cluster_info() -> Result { 14 | Ok(store::state::get_cluster_info()) 15 | } 16 | 17 | #[ic_cdk::query] 18 | fn get_bucket_wasm(hash: ByteArray<32>) -> Result { 19 | store::wasm::get_wasm(&hash) 20 | .map(|w| WasmInfo { 21 | created_at: w.created_at, 22 | created_by: w.created_by, 23 | description: w.description, 24 | wasm: w.wasm, 25 | hash, 26 | }) 27 | .ok_or_else(|| "NotFound: wasm not found".to_string()) 28 | } 29 | 30 | #[ic_cdk::query] 31 | fn get_deployed_buckets() -> Result, String> { 32 | Ok(store::wasm::get_deployed_buckets()) 33 | } 34 | 35 | #[ic_cdk::query] 36 | fn get_buckets() -> Result, String> { 37 | store::state::with(|s| Ok(s.bucket_deployed_list.keys().cloned().collect())) 38 | } 39 | 40 | #[ic_cdk::update(guard = "is_controller_or_manager")] 41 | async fn get_canister_status( 42 | canister: Option, 43 | ) -> Result { 44 | let self_id = ic_cdk::api::canister_self(); 45 | let canister = canister.unwrap_or(self_id); 46 | if canister != self_id { 47 | store::state::with(|s| { 48 | if !s.bucket_deployed_list.contains_key(&canister) { 49 | return Err("NotFound: bucket not found".to_string()); 50 | } 51 | Ok(()) 52 | })?; 53 | } 54 | 55 | let res = mgt::canister_status(&mgt::CanisterStatusArgs { 56 | canister_id: canister, 57 | }) 58 | .await 59 | .map_err(format_error)?; 60 | Ok(res) 61 | } 62 | 63 | #[ic_cdk::query(guard = "is_controller_or_manager")] 64 | fn bucket_deployment_logs( 65 | prev: Option, 66 | take: Option, 67 | ) -> Result, String> { 68 | let prev = prev.as_ref().map(nat_to_u64); 69 | let take = take.as_ref().map(nat_to_u64).unwrap_or(10).min(1000) as usize; 70 | Ok(store::wasm::bucket_deployment_logs(prev, take)) 71 | } 72 | 73 | #[ic_cdk::query(guard = "is_controller_or_manager")] 74 | fn get_subject_policies(subject: Principal) -> Result, String> { 75 | store::auth::get_all_policies(&subject) 76 | .map(|ps| ps.0) 77 | .ok_or_else(|| "NotFound: subject not found".to_string()) 78 | } 79 | 80 | #[ic_cdk::query(guard = "is_controller_or_manager")] 81 | fn get_subject_policies_for(subject: Principal, audience: Principal) -> Result { 82 | match store::auth::get_all_policies(&subject) { 83 | None => Err("NotFound: subject not found".to_string()), 84 | Some(ps) => { 85 | ps.0.get(&audience) 86 | .cloned() 87 | .ok_or_else(|| "NotFound: policies not found".to_string()) 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/ic_object_store/README.md: -------------------------------------------------------------------------------- 1 | # `ic_object_store` 2 | ![License](https://img.shields.io/crates/l/ic_object_store.svg) 3 | [![Crates.io](https://img.shields.io/crates/d/ic_object_store.svg)](https://crates.io/crates/ic_object_store) 4 | [![Test](https://github.com/ldclabs/ic-oss/actions/workflows/test.yml/badge.svg)](https://github.com/ldclabs/ic-oss/actions/workflows/test.yml) 5 | [![Docs.rs](https://img.shields.io/docsrs/ic_object_store?label=docs.rs)](https://docs.rs/ic_object_store) 6 | [![Latest Version](https://img.shields.io/crates/v/ic_object_store.svg)](https://crates.io/crates/ic_object_store) 7 | 8 | [IC Object Store](https://github.com/ldclabs/ic-oss/tree/main/src/ic_object_store_canister) is a native Rust implementation of Apache Arrow object store on the Internet Computer. 9 | 10 | `ic_object_store` is the Rust version of the client SDK for the IC Object Store canister. 11 | 12 | ## Overview 13 | 14 | This library provides a Rust client SDK for interacting with the IC Object Store canister, which implements the Apache [Object Store](https://github.com/apache/arrow-rs-object-store) interface on the Internet Computer. It allows developers to seamlessly integrate with the decentralized storage capabilities of the Internet Computer using familiar Apache Object Store APIs. 15 | 16 | ## Features 17 | 18 | - Full implementation of Apache Arrow object store APIs 19 | - Secure data storage with AES256-GCM encryption 20 | - Asynchronous stream operations for efficient data handling 21 | - Seamless integration with the Internet Computer ecosystem 22 | - Compatible with the broader Apache ecosystem 23 | 24 | ## Installation 25 | 26 | Add this to your `Cargo.toml`: 27 | 28 | ```toml 29 | [dependencies] 30 | ic_object_store = "1.1" 31 | ``` 32 | 33 | ## Usage 34 | 35 | ```rust 36 | use ic_object_store::{Client, ObjectStoreClient, build_agent}; 37 | use object_store::ObjectStore; 38 | 39 | let secret = [8u8; 32]; 40 | // backend: IC Object Store Canister 41 | let canister = Principal::from_text("6at64-oyaaa-aaaap-anvza-cai").unwrap(); 42 | let sk = SigningKey::from(secret); 43 | let id = BasicIdentity::from_signing_key(sk); 44 | println!("id: {:?}", id.sender().unwrap().to_text()); 45 | // jjn6g-sh75l-r3cxb-wxrkl-frqld-6p6qq-d4ato-wske5-op7s5-n566f-bqe 46 | 47 | let agent = build_agent("https://ic0.app", Arc::new(id)) 48 | .await 49 | .unwrap(); 50 | let client = Arc::new(Client::new(Arc::new(agent), canister, Some(secret))); 51 | let storage = ObjectStoreClient::new(client); 52 | 53 | let path = Path::from("test/hello.txt"); 54 | let payload = "Hello Anda!".as_bytes().to_vec(); 55 | let res = storage 56 | .put_opts(&path, payload.into(), Default::default()) 57 | .await 58 | .unwrap(); 59 | println!("put result: {:?}", res); 60 | 61 | let res = storage.get_opts(&path, Default::default()).await.unwrap(); 62 | println!("get result: {:?}", res); 63 | ``` 64 | 65 | ## Documentation 66 | 67 | For detailed documentation, please visit: https://docs.rs/ic_object_store 68 | 69 | ## Related Projects 70 | 71 | - [IC Object Store Canister](https://github.com/ldclabs/ic-oss/tree/main/src/ic_object_store_canister) - The canister implementation 72 | - [IC-OSS](https://github.com/ldclabs/ic-oss) - A decentralized Object Storage Service on the Internet Computer 73 | 74 | ## License 75 | 76 | Copyright © 2024-2025 [LDC Labs](https://github.com/ldclabs). 77 | 78 | Licensed under the MIT License. See [LICENSE](../../LICENSE-MIT) for details. 79 | -------------------------------------------------------------------------------- /src/ic_oss_types/src/bucket.rs: -------------------------------------------------------------------------------- 1 | use candid::{CandidType, Principal}; 2 | use serde::{Deserialize, Serialize}; 3 | use serde_bytes::{ByteArray, ByteBuf}; 4 | use std::collections::BTreeSet; 5 | 6 | use crate::file::MAX_FILE_SIZE; 7 | 8 | #[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] 9 | pub struct BucketInfo { 10 | pub name: String, 11 | pub file_id: u32, 12 | pub folder_id: u32, 13 | pub max_file_size: u64, 14 | pub max_folder_depth: u8, 15 | pub max_children: u16, 16 | pub max_custom_data_size: u16, 17 | pub enable_hash_index: bool, 18 | pub status: i8, // -1: archived; 0: readable and writable; 1: readonly 19 | pub visibility: u8, // 0: private; 1: public 20 | pub total_files: u64, 21 | pub total_chunks: u64, 22 | pub total_folders: u64, 23 | pub managers: BTreeSet, // managers can read and write 24 | // auditors can read and list even if the bucket is private 25 | pub auditors: BTreeSet, 26 | // used to verify the request token signed with SECP256K1 27 | pub trusted_ecdsa_pub_keys: Vec, 28 | // used to verify the request token signed with ED25519 29 | pub trusted_eddsa_pub_keys: Vec>, 30 | pub governance_canister: Option, 31 | } 32 | 33 | #[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize)] 34 | pub struct UpdateBucketInput { 35 | pub name: Option, 36 | pub max_file_size: Option, 37 | pub max_folder_depth: Option, 38 | pub max_children: Option, 39 | pub max_custom_data_size: Option, 40 | pub enable_hash_index: Option, 41 | pub status: Option, // -1: archived; 0: readable and writable; 1: readonly 42 | pub visibility: Option, // 0: private; 1: public 43 | pub trusted_ecdsa_pub_keys: Option>, 44 | pub trusted_eddsa_pub_keys: Option>>, 45 | } 46 | 47 | impl UpdateBucketInput { 48 | pub fn validate(&self) -> Result<(), String> { 49 | if let Some(name) = &self.name { 50 | if name.trim().is_empty() { 51 | return Err("invalid bucket name".to_string()); 52 | } 53 | } 54 | if let Some(max_file_size) = self.max_file_size { 55 | if max_file_size == 0 { 56 | return Err("max_file_size should be greater than 0".to_string()); 57 | } 58 | if max_file_size < MAX_FILE_SIZE { 59 | return Err(format!( 60 | "max_file_size should be greater than or equal to {}", 61 | MAX_FILE_SIZE 62 | )); 63 | } 64 | } 65 | 66 | if let Some(max_folder_depth) = self.max_folder_depth { 67 | if max_folder_depth == 0 { 68 | return Err("max_folder_depth should be greater than 0".to_string()); 69 | } 70 | } 71 | 72 | if let Some(max_children) = self.max_children { 73 | if max_children == 0 { 74 | return Err("max_children should be greater than 0".to_string()); 75 | } 76 | } 77 | 78 | if let Some(max_custom_data_size) = self.max_custom_data_size { 79 | if max_custom_data_size == 0 { 80 | return Err("max_custom_data_size should be greater than 0".to_string()); 81 | } 82 | } 83 | 84 | if let Some(status) = self.status { 85 | if !(-1i8..=1i8).contains(&status) { 86 | return Err("status should be -1, 0 or 1".to_string()); 87 | } 88 | } 89 | 90 | if let Some(visibility) = self.visibility { 91 | if visibility != 0 && visibility != 1 { 92 | return Err("visibility should be 0 or 1".to_string()); 93 | } 94 | } 95 | Ok(()) 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/ic_oss_can/README.md: -------------------------------------------------------------------------------- 1 | # `ic-oss-can` 2 | 3 | ![License](https://img.shields.io/crates/l/ic-oss.svg) 4 | [![Crates.io](https://img.shields.io/crates/d/ic-oss-can.svg)](https://crates.io/crates/ic-oss-can) 5 | [![CI](https://github.com/ldclabs/ic-oss/actions/workflows/ci.yml/badge.svg)](https://github.com/ldclabs/ic-oss/actions/workflows/ci.yml) 6 | [![Docs.rs](https://img.shields.io/docsrs/ic-oss-can?label=docs.rs)](https://docs.rs/ic-oss-can) 7 | [![Latest Version](https://img.shields.io/crates/v/ic-oss-can.svg)](https://crates.io/crates/ic-oss-can) 8 | 9 | A Rust library for implementing large file storage in Internet Computer (ICP) canisters. Part of the [ic-oss](https://github.com/ldclabs/ic-oss). 10 | 11 | ## Features 12 | 13 | - Simple integration with the `ic_oss_fs!` macro 14 | - Automatic generation of filesystem APIs in Candid format 15 | - Using given `FS_CHUNKS_STORE` stable storage 16 | - File chunk management and retrieval 17 | - Access control with manager roles 18 | - Compatible with `ic-oss-cli` for file uploads 19 | 20 | ## Quick Start 21 | 22 | Add the following dependencies to your `Cargo.toml`: 23 | 24 | ```toml 25 | [dependencies] 26 | ic-oss-can = "0.9" 27 | ic-oss-types = "0.9" 28 | ``` 29 | 30 | ### Basic Implementation 31 | 32 | ```rust 33 | use ic_stable_structures::{ 34 | memory_manager::{MemoryId, MemoryManager, VirtualMemory}, 35 | DefaultMemoryImpl, StableBTreeMap, 36 | }; 37 | use std::cell::RefCell; 38 | 39 | use ic_oss_can::ic_oss_fs; 40 | use ic_oss_can::types::{Chunk, FileId, FileMetadata}; 41 | 42 | type Memory = VirtualMemory; 43 | 44 | const FS_DATA_MEMORY_ID: MemoryId = MemoryId::new(0); 45 | 46 | thread_local! { 47 | 48 | static MEMORY_MANAGER: RefCell> = 49 | RefCell::new(MemoryManager::init(DefaultMemoryImpl::default())); 50 | 51 | 52 | // `FS_CHUNKS_STORE`` is needed by `ic_oss_can::ic_oss_fs!` macro 53 | static FS_CHUNKS_STORE: RefCell> = RefCell::new( 54 | StableBTreeMap::init( 55 | MEMORY_MANAGER.with_borrow(|m| m.get(FS_DATA_MEMORY_ID)), 56 | ) 57 | ); 58 | } 59 | 60 | // need to define `FS_CHUNKS_STORE` before `ic_oss_can::ic_oss_fs!()` 61 | ic_oss_fs!(); 62 | ``` 63 | 64 | ## Available APIs 65 | 66 | ### Rust Module APIs 67 | 68 | ```rust 69 | // File Management 70 | fs::get_file(id: u32) -> Option; 71 | fs::list_files(prev: u32, take: u32) -> Vec; 72 | fs::add_file(file: FileMetadata) -> Result; 73 | fs::update_file(change: UpdateFileInput, now_ms: u64) -> Result<(), String>; 74 | fs::delete_file(id: u32) -> Result; 75 | 76 | // Chunk Operations 77 | fs::get_chunk(id: u32, chunk_index: u32) -> Option; 78 | fs::get_full_chunks(id: u32) -> Result, String>; 79 | fs::update_chunk(id: u32, chunk_index: u32, now_ms: u64, chunk: Vec) -> Result; 80 | 81 | // Configuration 82 | fs::set_max_file_size(size: u64); 83 | fs::set_visibility(visibility: u8); 84 | fs::set_managers(managers: BTreeSet); 85 | fs::is_manager(caller: &Principal) -> bool; 86 | fs::with(f: impl FnOnce(&Files) -> R) -> R; 87 | fs::load(); 88 | fs::save(); 89 | ``` 90 | 91 | ### Candid Interface 92 | 93 | ```candid 94 | create_file : (CreateFileInput, opt blob) -> (Result_2); 95 | delete_file : (nat32, opt blob) -> (Result_3); 96 | list_files : (nat32, opt nat32, opt nat32, opt blob) -> (Result_4) query; 97 | update_file_chunk : (UpdateFileChunkInput, opt blob) -> (Result_6); 98 | update_file_info : (UpdateFileInput, opt blob) -> (Result_7); 99 | ``` 100 | 101 | For complete API definitions and examples, see: 102 | - [Full Example](https://github.com/ldclabs/ic-oss/tree/main/examples/ai_canister) 103 | 104 | ## License 105 | Copyright © 2024-2025 [LDC Labs](https://github.com/ldclabs). 106 | 107 | Licensed under the MIT License. See [LICENSE](../../LICENSE-MIT) for details. 108 | -------------------------------------------------------------------------------- /src/ic_oss_cluster/README.md: -------------------------------------------------------------------------------- 1 | # `ic_oss_cluster` 2 | 3 | A decentralized Object Storage Service manager on the Internet Computer, part of [ic-oss](https://github.com/ldclabs/ic-oss). 4 | 5 | ## Features 6 | 7 | - Bucket permission policies management and access_token issuance 8 | - Bucket deployment management 9 | - Bucket recharge management 10 | 11 | ## Demo 12 | 13 | Try it online: https://a4gq6-oaaaa-aaaab-qaa4q-cai.raw.icp0.io/?id=x5573-nqaaa-aaaap-ahopq-cai 14 | 15 | ## Quick Start 16 | 17 | ### Deploy Locally 18 | 19 | ```bash 20 | # Deploy the cluster 21 | # dfx canister create --specified-id x5573-nqaaa-aaaap-ahopq-cai ic_oss_cluster 22 | dfx deploy ic_oss_cluster --argument "(opt variant {Init = 23 | record { 24 | name = \"LDC Labs\"; 25 | ecdsa_key_name = \"dfx_test_key\"; 26 | schnorr_key_name = \"dfx_test_key\"; 27 | token_expiration = 3600; 28 | bucket_topup_threshold = 1_000_000_000_000; 29 | bucket_topup_amount = 5_000_000_000_000; 30 | } 31 | })" 32 | 33 | # Get cluster info 34 | dfx canister call ic_oss_cluster get_cluster_info '()' 35 | ``` 36 | 37 | ### Common Operations 38 | 39 | ```bash 40 | # Add managers 41 | MYID=$(dfx identity get-principal) 42 | 43 | ic-oss-cli -i debug/uploader.pem identity 44 | # principal: nprym-ylvyz-ig3fr-lgcmn-zzzt4-tyuix-3v6bm-fsel7-6lq6x-zh2w7-zqe 45 | 46 | dfx canister call ic_oss_cluster admin_add_managers "(vec {principal \"$MYID\"; principal \"nprym-ylvyz-ig3fr-lgcmn-zzzt4-tyuix-3v6bm-fsel7-6lq6x-zh2w7-zqe\"})" 47 | 48 | # Add a wasm file to the cluster: 49 | ic-oss-cli -i debug/uploader.pem cluster-add-wasm -c x5573-nqaaa-aaaap-ahopq-cai --path debug/ic_oss_bucket.wasm.gz --description "ic_oss_bucket v0.9.8" 50 | 51 | # create a bucket with default settings 52 | dfx canister call ic_oss_cluster admin_create_bucket '(null, null)' 53 | # (variant { Ok = principal "yta6k-5x777-77774-aaaaa-cai" }) 54 | 55 | # Get bucket status 56 | dfx canister call ic_oss_cluster get_canister_status '(opt principal "yta6k-5x777-77774-aaaaa-cai")' 57 | 58 | # Get bucket deployment logs 59 | dfx canister call ic_oss_cluster bucket_deployment_logs '(null, null)' 60 | 61 | dfx canister call ic_oss_cluster get_deployed_buckets '()' 62 | 63 | dfx canister call ic_oss_cluster get_buckets '()' 64 | ``` 65 | 66 | ### Access Control Examples 67 | 68 | ```bash 69 | # Sign access token 70 | dfx canister call ic_oss_cluster admin_ed25519_access_token '(record { 71 | subject = principal "USER_ID"; 72 | audience = principal "YOUR_BUCKET_ID"; 73 | scope = "Folder.*:1 Bucket.Read.*"; 74 | })' 75 | 76 | # Attach policies 77 | dfx canister call ic_oss_cluster admin_attach_policies '(record { 78 | subject = principal "USER_ID"; 79 | audience = principal "YOUR_BUCKET_ID"; 80 | scope = "Folder.* Bucket.List.*"; 81 | })' 82 | ``` 83 | 84 | ## API Reference 85 | 86 | The canister exposes a comprehensive Candid API. Key endpoints include: 87 | 88 | ```candid 89 | # Permissions Operations 90 | admin_attach_policies : (Token) -> (Result_1) 91 | get_subject_policies : (principal) -> (Result_10) query 92 | admin_ed25519_access_token : (Token) -> (Result) 93 | admin_weak_access_token : (Token, nat64, nat64) -> (Result) query 94 | access_token : (principal) -> (Result) 95 | 96 | # Buckets Operations 97 | admin_add_wasm : (AddWasmInput, opt blob) -> (Result_1) 98 | admin_create_bucket : (opt CanisterSettings, opt blob) -> (Result_3) 99 | admin_deploy_bucket : (DeployWasmInput, opt blob) -> (Result_1) 100 | admin_upgrade_all_buckets : (opt blob) -> (Result_1) 101 | admin_topup_all_buckets : () -> (Result_4) 102 | bucket_deployment_logs : (opt nat, opt nat) -> (Result_5) query 103 | 104 | # Admin Operations 105 | admin_add_managers : (vec principal) -> (Result_1) 106 | admin_add_committers : (vec principal) -> (Result_1) 107 | ``` 108 | 109 | Full Candid API definition: [ic_oss_bucket.did](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_cluster/ic_oss_cluster.did) 110 | 111 | ## License 112 | 113 | Copyright © 2024-2025 [LDC Labs](https://github.com/ldclabs). 114 | 115 | Licensed under the MIT License. See [LICENSE](../../LICENSE-MIT) for details. -------------------------------------------------------------------------------- /src/ic_oss_cluster/src/init.rs: -------------------------------------------------------------------------------- 1 | use candid::{CandidType, Principal}; 2 | use serde::Deserialize; 3 | use std::time::Duration; 4 | 5 | use crate::store; 6 | 7 | #[derive(Clone, Debug, CandidType, Deserialize)] 8 | pub enum ChainArgs { 9 | Init(InitArgs), 10 | Upgrade(UpgradeArgs), 11 | } 12 | 13 | #[derive(Clone, Debug, CandidType, Deserialize)] 14 | pub struct InitArgs { 15 | name: String, 16 | ecdsa_key_name: String, // Use "dfx_test_key" for local replica and "test_key_1" for a testing key for testnet and mainnet 17 | schnorr_key_name: String, // Use "dfx_test_key" for local replica and "test_key_1" for a testing key for testnet and mainnet 18 | token_expiration: u64, // in seconds 19 | bucket_topup_threshold: u128, 20 | bucket_topup_amount: u128, 21 | governance_canister: Option, 22 | } 23 | 24 | #[derive(Clone, Debug, CandidType, Deserialize)] 25 | pub struct UpgradeArgs { 26 | name: Option, 27 | token_expiration: Option, // in seconds 28 | bucket_topup_threshold: Option, 29 | bucket_topup_amount: Option, 30 | governance_canister: Option, 31 | } 32 | 33 | #[ic_cdk::init] 34 | fn init(args: Option) { 35 | match args.expect("init args is missing") { 36 | ChainArgs::Init(args) => { 37 | store::state::with_mut(|s| { 38 | s.name = args.name; 39 | s.ecdsa_key_name = args.ecdsa_key_name; 40 | s.schnorr_key_name = args.schnorr_key_name; 41 | s.token_expiration = if args.token_expiration == 0 { 42 | 3600 43 | } else { 44 | args.token_expiration 45 | }; 46 | s.bucket_topup_threshold = args.bucket_topup_threshold; 47 | s.bucket_topup_amount = args.bucket_topup_amount; 48 | s.governance_canister = args.governance_canister; 49 | }); 50 | } 51 | ChainArgs::Upgrade(_) => { 52 | ic_cdk::trap( 53 | "cannot initialize the canister with an Upgrade args. Please provide an Init args.", 54 | ); 55 | } 56 | } 57 | 58 | ic_cdk_timers::set_timer(Duration::from_secs(0), store::state::try_init_public_key()); 59 | } 60 | 61 | #[ic_cdk::pre_upgrade] 62 | fn pre_upgrade() { 63 | store::state::save(); 64 | } 65 | 66 | #[ic_cdk::post_upgrade] 67 | fn post_upgrade(args: Option) { 68 | store::state::load(); 69 | 70 | match args { 71 | Some(ChainArgs::Upgrade(args)) => { 72 | store::state::with_mut(|s| { 73 | if let Some(name) = args.name { 74 | s.name = name; 75 | } 76 | if let Some(token_expiration) = args.token_expiration { 77 | s.token_expiration = if token_expiration == 0 { 78 | 3600 79 | } else { 80 | token_expiration 81 | }; 82 | } 83 | if let Some(bucket_topup_threshold) = args.bucket_topup_threshold { 84 | s.bucket_topup_threshold = bucket_topup_threshold; 85 | } 86 | if let Some(bucket_topup_amount) = args.bucket_topup_amount { 87 | s.bucket_topup_amount = bucket_topup_amount; 88 | } 89 | if let Some(governance_canister) = args.governance_canister { 90 | s.governance_canister = Some(governance_canister); 91 | } 92 | }); 93 | } 94 | Some(ChainArgs::Init(_)) => { 95 | ic_cdk::trap( 96 | "cannot upgrade the canister with an Init args. Please provide an Upgrade args.", 97 | ); 98 | } 99 | _ => {} 100 | } 101 | 102 | store::state::with_mut(|s| { 103 | if s.schnorr_key_name.is_empty() { 104 | s.schnorr_key_name = s.ecdsa_key_name.clone(); 105 | } 106 | }); 107 | 108 | ic_cdk_timers::set_timer(Duration::from_secs(0), store::state::try_init_public_key()); 109 | } 110 | -------------------------------------------------------------------------------- /src/ic_oss_cli/src/file.rs: -------------------------------------------------------------------------------- 1 | use chrono::prelude::*; 2 | use ic_oss_types::{file::*, format_error}; 3 | use serde_bytes::ByteArray; 4 | use sha3::{Digest, Sha3_256}; 5 | use tokio::io::AsyncReadExt; 6 | use tokio::{time, time::Duration}; 7 | 8 | pub async fn upload_file( 9 | cli: &ic_oss::bucket::Client, 10 | enable_hash_index: bool, 11 | parent: u32, 12 | file: &str, 13 | retry: u8, 14 | ) -> Result<(), String> { 15 | let file_path = std::path::Path::new(file); 16 | let metadata = std::fs::metadata(file_path).map_err(format_error)?; 17 | if !metadata.is_file() { 18 | return Err(format!("not a file: {:?}", file)); 19 | } 20 | 21 | let file_size = metadata.len(); 22 | let content_type = infer::get_from_path(file_path) 23 | .map_err(format_error)? 24 | .map(|f| f.mime_type()); 25 | 26 | let content_type = if let Some(content_type) = content_type { 27 | content_type 28 | } else { 29 | mime_db::lookup(file).unwrap_or("application/octet-stream") 30 | }; 31 | 32 | let hash: Option> = if enable_hash_index { 33 | let fs = tokio::fs::File::open(&file_path) 34 | .await 35 | .map_err(format_error)?; 36 | Some(pre_sum_hash(fs).await?.into()) 37 | } else { 38 | None 39 | }; 40 | 41 | let start_ts: DateTime = Local::now(); 42 | let input = CreateFileInput { 43 | parent, 44 | name: file_path.file_name().unwrap().to_string_lossy().to_string(), 45 | content_type: content_type.to_string(), 46 | size: Some(file_size), 47 | hash, 48 | ..Default::default() 49 | }; 50 | 51 | let fs = tokio::fs::File::open(&file_path) 52 | .await 53 | .map_err(format_error)?; 54 | let mut res = cli 55 | .upload(fs, input, move |progress| { 56 | let ts: DateTime = Local::now(); 57 | let ts = ts.format("%Y-%m-%d %H:%M:%S").to_string(); 58 | println!( 59 | "{} uploaded: {:.2}%, {:?}", 60 | ts, 61 | (progress.filled as f32 / file_size as f32) * 100.0, 62 | progress 63 | ); 64 | }) 65 | .await 66 | .map_err(format_error)?; 67 | 68 | let mut i = 0u8; 69 | while let Some(err) = res.error { 70 | i += 1; 71 | if i > retry { 72 | return Err(format!("upload failed: {}", err)); 73 | } 74 | 75 | println!( 76 | "upload error: {}.\ntry to resumable upload {} after 5s:", 77 | err, i 78 | ); 79 | time::sleep(Duration::from_secs(5)).await; 80 | let fs = tokio::fs::File::open(&file_path) 81 | .await 82 | .map_err(format_error)?; 83 | res = cli 84 | .upload_chunks( 85 | fs, 86 | res.id, 87 | Some(file_size), 88 | None, 89 | &res.uploaded_chunks, 90 | move |progress| { 91 | let ts: DateTime = Local::now(); 92 | let ts = ts.format("%Y-%m-%d %H:%M:%S").to_string(); 93 | println!( 94 | "{} uploaded: {:.2}%, {:?}", 95 | ts, 96 | (progress.filled as f32 / file_size as f32) * 100.0, 97 | progress 98 | ); 99 | }, 100 | ) 101 | .await; 102 | } 103 | 104 | println!( 105 | "upload success, file id: {}, size: {}, chunks: {}, retry: {}, time elapsed: {}", 106 | res.id, 107 | res.filled, 108 | res.uploaded_chunks.len(), 109 | i, 110 | Local::now().signed_duration_since(start_ts) 111 | ); 112 | Ok(()) 113 | } 114 | 115 | async fn pre_sum_hash(mut fs: tokio::fs::File) -> Result<[u8; 32], String> { 116 | let mut hasher = Sha3_256::new(); 117 | let mut buf = vec![0u8; 1024 * 1024 * 2]; 118 | loop { 119 | let n = fs.read(&mut buf).await.map_err(format_error)?; 120 | if n == 0 { 121 | break; 122 | } 123 | hasher.update(&buf[..n]); 124 | } 125 | Ok(hasher.finalize().into()) 126 | } 127 | -------------------------------------------------------------------------------- /docs/access_control.md: -------------------------------------------------------------------------------- 1 | # `IC-OSS` Access Control 2 | 3 | `ic-oss` provides 4 different access control mechanisms for file resources, which can adapt to scenarios ranging from minimalist ones to complex enterprise-level permission control scenarios. 4 | 5 | ## The `managers` and `auditors` attributes of the Bucket 6 | 7 | Managers can operate on all files and folders in the bucket, including creating, deleting, moving, modifying, etc. 8 | 9 | Auditors can view all files and folders in the bucket, including archived (status == -1) resources, but cannot perform modification operations. 10 | 11 | The `admin_set_managers` interface and the `admin_set_auditors` interface can set the managers and auditors of the bucket, and the `admin_update_bucket` can update other attributes of the bucket. However, only the controllers of the canister have the permission to call these 3 interfaces. 12 | 13 | ## The `visibility` attribute of the Bucket 14 | 15 | `visibility` controls the visibility of the bucket and has 2 values: 16 | - 0: Private, only users with access permission to the bucket can access it. 17 | - 1: Public, any user without permission can view all files and folders in the bucket, but does not include archived (status == -1) resources, and cannot perform modification operations. 18 | 19 | The `admin_update_bucket` can update the `visibility` attribute of the bucket. 20 | 21 | ## The `status` attribute of the Bucket 22 | 23 | `status` controls the status of the bucket and has 3 values: 24 | - 0: Normal, all operations can be performed. 25 | - 1: Read-only, only read operations can be performed, and write operations cannot be performed. 26 | - -1: Archived. Only `managers` and `auditors` can view all files and folders in the bucket, and no other operations can be performed. 27 | Files and folders also have a `status` attribute, and its definition is similar to the above. 28 | 29 | ## Access Control based on `access_token` and Permissions Policy 30 | 31 | Based on `access_token` and permissions policy, more complex and fine-grained access control can be achieved for files and folders in the bucket. 32 | 33 | `ic_oss_cluster` records the user's permissions policies and issues `access_token` for the user. The `access_token` contains the user's permission information. `ic_oss_bucket` verifies the `access_token` and determines whether the user has the permission to perform the operation based on the permission information in it. 34 | 35 | The managers of `ic_oss_cluster` can use the `admin_attach_policies` and `admin_detach_policies` interfaces to assign or cancel permissions for the user. 36 | 37 | ### Access Token 38 | 39 | The `access_token` implemented by `ic-oss` based on COSE (RFC9052) and CWT (RFC8392) supports two signature algorithms: Secp256k1 and Ed25519. The permissions policies are stored in the `scope (9)` field of the `access_token`. The core information of the Token is as follows: 40 | 41 | ```rust 42 | pub struct Token { 43 | pub subject: Principal, // the owner of the token 44 | pub audience: Principal, // the canister id of the bucket 45 | pub policies: String, // the permission policies 46 | } 47 | ``` 48 | For the complete implementation, please refer to the [ic-oss-cose](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_cose) library. 49 | 50 | ### Permissions Policy 51 | 52 | `ic-oss` has designed a set of simple yet powerful permission policy patterns, which can achieve from simple read and write permissions to complex enterprise-level permission control. 53 | 54 | The basic expression of Permission is as follows: 55 | 56 | ```shell 57 | Resource.Operation[.Constraint] 58 | ``` 59 | 60 | Permission examples: 61 | ```shell 62 | * # == *.* 63 | File.Read # == File.Read.* 64 | Folder.Write # == Folder.Write.* 65 | Bucket.Read # == Bucket.Read.* 66 | Bucket.Read.Info 67 | Bucket.*.File 68 | ``` 69 | 70 | The basic expression of Permission Policy is as follows: 71 | 72 | ```shell 73 | Permission:Resource1,Resource2,... 74 | ``` 75 | 76 | Permission Policy examples: 77 | ```shell 78 | File.*:* # == File.* 79 | File.Read:* # == File.Read 80 | Folder.Write:1,2 81 | Bucket.Read:* # == Bucket.Read 82 | ``` 83 | 84 | The scope of `access_token` contains 1 to n Permission Policies, separated by spaces. 85 | 86 | Permission Policies examples: 87 | ```shell 88 | scope = "File.*:1 Folder.*:2,3,5 Folder.Read Bucket.Read" 89 | ``` 90 | 91 | For the complete implementation, please refer to the [ic-oss-types](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_types) library. 92 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `IC-OSS` 2 | 3 | 🗂 A decentralized Object Storage Service on the Internet Computer. 4 | 5 | 💝 Backed by a **$25k Developer Grant** from the [DFINITY Foundation](https://dfinity.org/grants). 6 | 7 | ## Overview 8 | 9 | `ic-oss` is a decentralized object storage service fully running on the Internet Computer that provides: 10 | - Simple and efficient file storage/retrieval 11 | - File directory tree structure 12 | - Unlimited horizontal scalability 13 | - Enterprise-grade access control 14 | 15 | Perfect for NFTs, chain blocks, verifiable credentials, blogs, documents, and decentralized applications. 16 | 17 | ![IC-OSS](./ic-oss.webp) 18 | 19 | ## Key Features 20 | 21 | - Large File Support 22 | - File sharding 23 | - File encryption 24 | - Concurrent high-speed uploads 25 | - Resumable uploads 26 | - Segmented downloads 27 | - Advanced Organization 28 | - File directory tree structure 29 | - Bucket-based clustering 30 | - Flexible Access Control 31 | - Public/private access 32 | - Read/write permissions 33 | - File/folder/bucket level controls 34 | 35 | Check more details: 36 | - [Bucket features](https://github.com/ldclabs/ic-oss/blob/main/src/ic_oss_bucket/README.md) 37 | - [Cluster features](https://github.com/ldclabs/ic-oss/blob/main/src/ic_oss_cluster/README.md) 38 | - [Access Control](https://github.com/ldclabs/ic-oss/blob/main/docs/access_control.md) 39 | 40 | ## Packages 41 | 42 | | Package | Description | 43 | | :--------------------------------------------------------------------------------------------------- | :----------------------------------------------------------- | 44 | | [ic_object_store](https://github.com/ldclabs/ic-oss/tree/main/src/ic_object_store) | Rust client SDK for the IC Object Store canister | 45 | | [ic_object_store_canister](https://github.com/ldclabs/ic-oss/tree/main/src/ic_object_store_canister) | A Object Store of Apache Arrow | 46 | | [ic_oss_bucket](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_bucket) | Storage bucket smart contract | 47 | | [ic_oss_cluster](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_cluster) | Cluster management smart contract | 48 | | [ic-oss-can](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_can) | Rust library for implementing file storage in smart contract | 49 | | [ic-oss-types](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_types) | Rust shared type definitions | 50 | | [ic-oss](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss) | Rust client SDK | 51 | | [ic_oss_ts](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_ts) | Typescript client SDK | 52 | | [ic-oss-cli](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_cli) | Command-line tool implemented in Rust | 53 | 54 | ## Who's using? 55 | 56 | - [dMsg.net](https://dmsg.net): The world's 1st decentralized end-to-end encrypted messaging application fully running on the Internet Computer blockchain. dMsg.net uses IC-OSS to store user avatars (public), channel logos and encrypted files (private). 57 | - [Anda](https://github.com/ldclabs/anda): An AI agent framework built with Rust, powered by ICP and TEEs. Anda uses `ic_object_store_canister` to store AI memory states. 58 | 59 | If you plan to use this project and have any questions, feel free to open an issue. I will address it as soon as possible. 60 | 61 | ## Integration 62 | 63 | ![IC-OSS Sequence](./ic-oss-sequence.webp) 64 | 65 | 1. Dapp backend: Configure access control via `ic_oss_cluster` 66 | 2. Dapp frontend: Obtain `access_token` using `ic-oss-ts` SDK 67 | 3. Dapp frontend: Use token to interact with `ic_oss_bucket` 68 | 69 | ## Examples 70 | 71 | - [examples/ai_canister](https://github.com/ldclabs/ic-oss/tree/main/examples/ai_canister): A Rust demonstration project used to show how to implement large file storage in the ICP canister by using `ic-oss-can`. 72 | - [examples/upload_js](https://github.com/ldclabs/ic-oss/tree/main/examples/upload_js): A Javascript demonstration project used to show how to upload files to ic_oss_bucket canister by using `@ldclabs/ic_oss_ts`. 73 | - [examples/video_player](https://github.com/ldclabs/ic-oss/tree/main/examples/video_player): A video player website to test HTTP range request. 74 | 75 | ## License 76 | Copyright © 2024-2025 [LDC Labs](https://github.com/ldclabs). 77 | 78 | `ldclabs/ic-oss` is licensed under the MIT License. See [LICENSE](LICENSE-MIT) for the full license text. 79 | -------------------------------------------------------------------------------- /src/ic_oss_cluster/src/lib.rs: -------------------------------------------------------------------------------- 1 | use candid::{utils::ArgumentEncoder, CandidType, Nat, Principal}; 2 | use ic_cdk::management_canister as mgt; 3 | use ic_oss_types::{ 4 | cluster::{AddWasmInput, BucketDeploymentInfo, ClusterInfo, DeployWasmInput, WasmInfo}, 5 | cose::Token, 6 | }; 7 | use serde::{Deserialize, Serialize}; 8 | use serde_bytes::{ByteArray, ByteBuf}; 9 | use std::collections::{BTreeMap, BTreeSet}; 10 | 11 | mod api_admin; 12 | mod api_auth; 13 | mod api_query; 14 | mod ecdsa; 15 | mod init; 16 | mod schnorr; 17 | mod store; 18 | 19 | use crate::init::ChainArgs; 20 | 21 | static ANONYMOUS: Principal = Principal::anonymous(); 22 | // NNS Cycles Minting Canister: "rkp4c-7iaaa-aaaaa-aaaca-cai" 23 | static CMC_PRINCIPAL: Principal = Principal::from_slice(&[0, 0, 0, 0, 0, 0, 0, 4, 1, 1]); 24 | static TOKEN_KEY_DERIVATION_PATH: &[u8] = b"ic_oss_cluster"; 25 | const SECONDS: u64 = 1_000_000_000; 26 | const MILLISECONDS: u64 = 1_000_000; 27 | 28 | fn is_controller() -> Result<(), String> { 29 | let caller = ic_cdk::api::msg_caller(); 30 | if ic_cdk::api::is_controller(&caller) || store::state::is_controller(&caller) { 31 | Ok(()) 32 | } else { 33 | Err("user is not a controller".to_string()) 34 | } 35 | } 36 | 37 | fn is_controller_or_manager() -> Result<(), String> { 38 | let caller = ic_cdk::api::msg_caller(); 39 | if ic_cdk::api::is_controller(&caller) 40 | || store::state::is_controller(&caller) 41 | || store::state::is_manager(&caller) 42 | { 43 | Ok(()) 44 | } else { 45 | Err("user is not a controller or manager".to_string()) 46 | } 47 | } 48 | 49 | fn is_controller_or_manager_or_committer() -> Result<(), String> { 50 | let caller = ic_cdk::api::msg_caller(); 51 | if ic_cdk::api::is_controller(&caller) 52 | || store::state::is_controller(&caller) 53 | || store::state::is_manager(&caller) 54 | || store::state::is_committer(&caller) 55 | { 56 | Ok(()) 57 | } else { 58 | Err("user is not a controller or manager or committer".to_string()) 59 | } 60 | } 61 | 62 | pub fn validate_principals(principals: &BTreeSet) -> Result<(), String> { 63 | if principals.is_empty() { 64 | return Err("principals cannot be empty".to_string()); 65 | } 66 | if principals.contains(&ANONYMOUS) { 67 | return Err("anonymous user is not allowed".to_string()); 68 | } 69 | Ok(()) 70 | } 71 | 72 | async fn call(id: Principal, method: &str, args: In, cycles: u128) -> Result 73 | where 74 | In: ArgumentEncoder + Send, 75 | Out: candid::CandidType + for<'a> candid::Deserialize<'a>, 76 | { 77 | let res = ic_cdk::call::Call::bounded_wait(id, method) 78 | .with_args(&args) 79 | .with_cycles(cycles) 80 | .await 81 | .map_err(|err| format!("failed to call {} on {:?}, error: {:?}", method, &id, err))?; 82 | res.candid().map_err(|err| { 83 | format!( 84 | "failed to decode response from {} on {:?}, error: {:?}", 85 | method, &id, err 86 | ) 87 | }) 88 | } 89 | 90 | #[derive(Clone, Eq, PartialEq, Debug, CandidType, Deserialize)] 91 | pub struct SubnetId { 92 | pub principal_id: String, 93 | } 94 | 95 | #[derive(Clone, Eq, PartialEq, Debug, CandidType, Deserialize)] 96 | pub enum SubnetSelection { 97 | /// Choose a specific subnet 98 | Subnet { subnet: SubnetId }, 99 | // Skip the SubnetFilter on the CMC SubnetSelection for simplification. 100 | // https://github.com/dfinity/ic/blob/master/rs/nns/cmc/cmc.did#L35 101 | } 102 | 103 | #[derive(Clone, Eq, PartialEq, Debug, CandidType, Deserialize)] 104 | struct CreateCanisterInput { 105 | pub settings: Option, 106 | pub subnet_selection: Option, 107 | pub subnet_type: Option, 108 | } 109 | 110 | /// Error for create_canister. 111 | #[derive(Clone, Eq, PartialEq, Debug, CandidType, Deserialize, Serialize)] 112 | pub enum CreateCanisterOutput { 113 | Refunded { 114 | refund_amount: u128, 115 | create_error: String, 116 | }, 117 | } 118 | 119 | async fn create_canister_on( 120 | subnet: Principal, 121 | settings: Option, 122 | cycles: u128, 123 | ) -> Result { 124 | let arg = CreateCanisterInput { 125 | settings, 126 | subnet_type: None, 127 | subnet_selection: Some(SubnetSelection::Subnet { 128 | subnet: SubnetId { 129 | principal_id: subnet.to_text(), 130 | }, 131 | }), 132 | }; 133 | let res: Result = 134 | call(CMC_PRINCIPAL, "create_canister", (&arg,), cycles).await?; 135 | res.map_err(|err| format!("failed to create canister, error: {:?}", err)) 136 | } 137 | 138 | ic_cdk::export_candid!(); 139 | -------------------------------------------------------------------------------- /src/ic_oss_ts/src/uploader.ts: -------------------------------------------------------------------------------- 1 | import { sha3_256 } from '@noble/hashes/sha3' 2 | import { type ReadableStream } from 'web-streams-polyfill' 3 | import { BucketCanister } from './bucket.canister.js' 4 | import { ConcurrencyQueue } from './queue.js' 5 | import { 6 | CHUNK_SIZE, 7 | readableStreamAsyncIterator, 8 | readAll, 9 | toFixedChunkSizeReadable 10 | } from './stream.js' 11 | import { FileConfig, Progress, UploadFileChunksResult } from './types.js' 12 | 13 | export const MAX_FILE_SIZE_PER_CALL = 1024 * 2048 14 | 15 | export class Uploader { 16 | readonly #cli: BucketCanister 17 | readonly concurrency: number 18 | readonly setReadonly: boolean 19 | 20 | constructor( 21 | client: BucketCanister, 22 | concurrency: number = 16, 23 | setReadonly = false 24 | ) { 25 | this.#cli = client 26 | this.concurrency = concurrency 27 | this.setReadonly = setReadonly 28 | } 29 | 30 | async upload( 31 | file: FileConfig, 32 | onProgress: (progress: Progress) => void = () => {} 33 | ): Promise { 34 | const stream = await toFixedChunkSizeReadable(file) 35 | const size = file.size || 0 36 | if (size > 0 && size <= MAX_FILE_SIZE_PER_CALL) { 37 | const content = await readAll(stream, size) 38 | const hash = file.hash || sha3_256(content) 39 | let res = await this.#cli.createFile({ 40 | status: this.setReadonly ? [1] : [], 41 | content: [content], 42 | custom: [], 43 | hash: [hash], 44 | name: file.name, 45 | size: [BigInt(size)], 46 | content_type: file.contentType, 47 | parent: file.parent || 0, 48 | dek: [] 49 | }) 50 | 51 | onProgress({ 52 | filled: size, 53 | size, 54 | chunkIndex: 0, 55 | concurrency: 1 56 | }) 57 | 58 | return { 59 | id: res.id, 60 | filled: size, 61 | uploadedChunks: [], 62 | hash 63 | } 64 | } 65 | 66 | let res = await this.#cli.createFile({ 67 | status: [], 68 | content: [], 69 | custom: [], 70 | hash: [], 71 | name: file.name, 72 | size: size > 0 ? [BigInt(size)] : [], 73 | content_type: file.contentType, 74 | parent: file.parent || 0, 75 | dek: [] 76 | }) 77 | 78 | return await this.upload_chunks( 79 | stream, 80 | res.id, 81 | size, 82 | file.hash || null, 83 | [], 84 | onProgress 85 | ) 86 | } 87 | 88 | async upload_chunks( 89 | stream: ReadableStream, 90 | id: number, 91 | size: number, 92 | hash: Uint8Array | null = null, 93 | excludeChunks: number[] = [], 94 | onProgress: (progress: Progress) => void = () => {} 95 | ): Promise { 96 | const queue = new ConcurrencyQueue(this.concurrency) 97 | 98 | let chunkIndex = 0 99 | let prevChunkSize = CHUNK_SIZE 100 | const hasher = sha3_256.create() 101 | const rt: UploadFileChunksResult = { 102 | id, 103 | filled: 0, 104 | uploadedChunks: [], 105 | hash 106 | } 107 | 108 | try { 109 | for await (const value of readableStreamAsyncIterator(stream)) { 110 | if (prevChunkSize !== CHUNK_SIZE) { 111 | throw new Error( 112 | `Prev chunk size mismatch, expected ${CHUNK_SIZE} but got ${prevChunkSize}` 113 | ) 114 | } 115 | const chunk = new Uint8Array(value) 116 | prevChunkSize = chunk.byteLength 117 | const index = chunkIndex 118 | chunkIndex += 1 119 | 120 | if (excludeChunks.includes(index)) { 121 | rt.filled += chunk.byteLength 122 | onProgress({ 123 | filled: rt.filled, 124 | size, 125 | chunkIndex: index, 126 | concurrency: 0 127 | }) 128 | continue 129 | } 130 | 131 | await queue.push(async (_aborter, concurrency) => { 132 | !hash && hasher.update(chunk) 133 | const res = await this.#cli.updateFileChunk({ 134 | id, 135 | chunk_index: index, 136 | content: chunk 137 | }) 138 | 139 | rt.filled += chunk.byteLength 140 | rt.uploadedChunks.push(index) 141 | onProgress({ 142 | filled: Number(res.filled), 143 | size, 144 | chunkIndex: index, 145 | concurrency 146 | }) 147 | }) 148 | } 149 | 150 | await queue.wait() 151 | if (!rt.hash) { 152 | rt.hash = hasher.digest() 153 | } 154 | await this.#cli.updateFileInfo({ 155 | id, 156 | status: this.setReadonly ? [1] : [], 157 | hash: [rt.hash], 158 | custom: [], 159 | name: [], 160 | size: [BigInt(size)], 161 | content_type: [] 162 | }) 163 | } catch (err) { 164 | ;(err as any).data = rt 165 | throw err 166 | } 167 | 168 | return rt 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /src/ic_oss_bucket/README.md: -------------------------------------------------------------------------------- 1 | # `ic_oss_bucket` 2 | 3 | A decentralized Object Storage Service bucket on the Internet Computer, part of [ic-oss](https://github.com/ldclabs/ic-oss). 4 | 5 | ## Overview 6 | 7 | `ic_oss_bucket` is an ICP smart contract that functions as a storage bucket in the `ic-oss` cluster. Multiple buckets can be deployed for horizontal scaling, all managed by `ic_oss_cluster`. 8 | 9 | ## Features 10 | 11 | - Supports large file uploads and downloads through file sharding, concurrent high-speed uploads, resumable uploads, and segmented downloads. 12 | - Enables HTTP streaming and HTTP range downloads. 13 | - Ensures file deduplication and retrieval using file hash indexing. 14 | - Supports encrypted file storage and file-level encryption keys. 15 | - Allows custom metadata for files. 16 | - Provides a directory tree structure, enabling file and folder movement within the same bucket. 17 | - Offers public and private modes for a bucket. 18 | - Supports archive, read-write, and read-only status for files, folders, and buckets. 19 | - Enables fine-grained access control for reading, writing, and deleting files, folders, and buckets. 20 | - Includes auditors with the ability to read all contents within a bucket. 21 | 22 | ## Demo 23 | 24 | Try it online: https://a4gq6-oaaaa-aaaab-qaa4q-cai.raw.icp0.io/?id=mmrxu-fqaaa-aaaap-ahhna-cai 25 | 26 | Access file through HTTPs: 27 | ``` 28 | # Direct download 29 | https://mmrxu-fqaaa-aaaap-ahhna-cai.icp0.io/f/1 # By ID 30 | https://mmrxu-fqaaa-aaaap-ahhna-cai.icp0.io/h/ # By hash, when enable_hash_index = true 31 | 32 | # Download with filename 33 | https://mmrxu-fqaaa-aaaap-ahhna-cai.icp0.io/f/1?filename=mydoc.md 34 | 35 | # Download with token 36 | https://mmrxu-fqaaa-aaaap-ahhna-cai.icp0.io/f/1?filename=mydoc.md&token= 37 | 38 | # Inline viewing 39 | https://mmrxu-fqaaa-aaaap-ahhna-cai.icp0.io/f/1?inline 40 | https://mmrxu-fqaaa-aaaap-ahhna-cai.icp0.io/f/2?inline 41 | ``` 42 | 43 | ## Quick Start 44 | 45 | ### Local Deployment 46 | 47 | 1. Deploy the canister: 48 | ```bash 49 | dfx deploy ic_oss_bucket 50 | ``` 51 | 52 | Or with custom configuration: 53 | ```bash 54 | # dfx canister create --specified-id mmrxu-fqaaa-aaaap-ahhna-cai ic_oss_bucket 55 | dfx deploy ic_oss_bucket --argument "(opt variant {Init = 56 | record { 57 | name = \"LDC Labs\"; 58 | file_id = 0; 59 | max_file_size = 0; 60 | max_folder_depth = 10; 61 | max_children = 1000; 62 | visibility = 0; 63 | max_custom_data_size = 4096; 64 | enable_hash_index = false; 65 | } 66 | })" 67 | ``` 68 | 69 | 2. Set up permissions: 70 | ```bash 71 | # Get your principal 72 | MYID=$(dfx identity get-principal) 73 | # Get the uploader principal 74 | ic-oss-cli -i debug/uploader.pem identity 75 | # principal: nprym-ylvyz-ig3fr-lgcmn-zzzt4-tyuix-3v6bm-fsel7-6lq6x-zh2w7-zqe 76 | 77 | # Add managers 78 | dfx canister call ic_oss_bucket admin_add_managers "(vec {principal \"$MYID\"; principal \"nprym-ylvyz-ig3fr-lgcmn-zzzt4-tyuix-3v6bm-fsel7-6lq6x-zh2w7-zqe\"})" 79 | 80 | # Configure public keys and visibility 81 | dfx canister call ic_oss_bucket admin_update_bucket '(record { 82 | visibility = opt 1; 83 | trusted_eddsa_pub_keys = opt vec {blob "..."}; # Your public key here 84 | }, null)' 85 | ``` 86 | 87 | 3. Basic operations: 88 | ```bash 89 | # Get bucket info 90 | dfx canister call ic_oss_bucket get_bucket_info '(null)' 91 | 92 | # Upload a file 93 | ic-oss-cli -i debug/uploader.pem put -b mmrxu-fqaaa-aaaap-ahhna-cai --path README.md 94 | 95 | # Create folders 96 | dfx canister call ic_oss_bucket create_folder '(record { parent = 0; name = "home"; }, null)' 97 | 98 | # List contents 99 | dfx canister call ic_oss_bucket list_files '(0, null, null, null)' # Files 100 | dfx canister call ic_oss_bucket list_folders '(0, null, null, null)' # Folders 101 | ``` 102 | 103 | ## API Reference 104 | 105 | The canister exposes a comprehensive Candid API. Key endpoints include: 106 | 107 | ```candid 108 | # File Operations 109 | create_file : (CreateFileInput, opt blob) -> (Result_2) 110 | update_file_chunk : (UpdateFileChunkInput, opt blob) -> (Result_13) 111 | update_file_info : (UpdateFileInput, opt blob) -> (Result_12) 112 | get_file_info : (nat32, opt blob) -> (Result_8) query 113 | get_file_chunks : (nat32, nat32, opt nat32, opt blob) -> (Result_7) query 114 | list_files : (nat32, opt nat32, opt nat32, opt blob) -> (Result_10) query 115 | delete_file : (nat32, opt blob) -> (Result_3) 116 | 117 | # Folder Operations 118 | create_folder : (CreateFolderInput, opt blob) -> (Result_2) 119 | list_folders : (nat32, opt nat32, opt nat32, opt blob) -> (Result_11) query 120 | delete_folder : (nat32, opt blob) -> (Result_3) 121 | 122 | # Admin Operations 123 | admin_add_managers : (vec principal) -> (Result) 124 | admin_update_bucket : (UpdateBucketInput) -> (Result) 125 | ``` 126 | 127 | Full Candid API definition: [ic_oss_bucket.did](https://github.com/ldclabs/ic-oss/tree/main/src/ic_oss_bucket/ic_oss_bucket.did) 128 | 129 | ## License 130 | 131 | Copyright © 2024-2025 [LDC Labs](https://github.com/ldclabs). 132 | 133 | Licensed under the MIT License. See [LICENSE](../../LICENSE-MIT) for details. -------------------------------------------------------------------------------- /src/ic_object_store_canister/ic_object_store_canister.did: -------------------------------------------------------------------------------- 1 | type Attribute = variant { 2 | ContentType; 3 | Metadata : text; 4 | ContentEncoding; 5 | ContentLanguage; 6 | CacheControl; 7 | ContentDisposition; 8 | }; 9 | type Error = variant { 10 | NotModified : record { path : text; error : text }; 11 | UnknownConfigurationKey : record { key : text }; 12 | NotFound : record { path : text }; 13 | PermissionDenied : record { path : text; error : text }; 14 | Generic : record { error : text }; 15 | AlreadyExists : record { path : text }; 16 | InvalidPath : record { path : text }; 17 | NotSupported : record { error : text }; 18 | Precondition : record { path : text; error : text }; 19 | NotImplemented; 20 | Unauthenticated : record { path : text; error : text }; 21 | }; 22 | type GetOptions = record { 23 | if_match : opt text; 24 | if_unmodified_since : opt nat64; 25 | head : bool; 26 | if_modified_since : opt nat64; 27 | version : opt text; 28 | if_none_match : opt text; 29 | range : opt GetRange; 30 | }; 31 | type GetRange = variant { 32 | Offset : nat64; 33 | Bounded : record { nat64; nat64 }; 34 | Suffix : nat64; 35 | }; 36 | type GetResult = record { 37 | meta : ObjectMeta; 38 | attributes : vec record { Attribute; text }; 39 | range : record { nat64; nat64 }; 40 | payload : blob; 41 | }; 42 | type InitArgs = record { governance_canister : opt principal; name : text }; 43 | type InstallArgs = variant { Upgrade : UpgradeArgs; Init : InitArgs }; 44 | type ListResult = record { 45 | common_prefixes : vec text; 46 | objects : vec ObjectMeta; 47 | }; 48 | type ObjectMeta = record { 49 | aes_tags : opt vec blob; 50 | size : nat64; 51 | e_tag : opt text; 52 | version : opt text; 53 | last_modified : nat64; 54 | aes_nonce : opt blob; 55 | location : text; 56 | }; 57 | type PartId = record { content_id : text }; 58 | type PutMode = variant { Overwrite; Create; Update : UpdateVersion }; 59 | type PutMultipartOptions = record { 60 | aes_tags : opt vec blob; 61 | tags : text; 62 | attributes : vec record { Attribute; text }; 63 | aes_nonce : opt blob; 64 | }; 65 | type PutOptions = record { 66 | aes_tags : opt vec blob; 67 | mode : PutMode; 68 | tags : text; 69 | attributes : vec record { Attribute; text }; 70 | aes_nonce : opt blob; 71 | }; 72 | type Result = variant { Ok; Err : Error }; 73 | type Result_1 = variant { Ok; Err : text }; 74 | type Result_10 = variant { Ok : vec ObjectMeta; Err : Error }; 75 | type Result_11 = variant { Ok : ListResult; Err : Error }; 76 | type Result_12 = variant { Ok : PartId; Err : Error }; 77 | type Result_13 = variant { Ok : text; Err : text }; 78 | type Result_2 = variant { Ok : UpdateVersion; Err : Error }; 79 | type Result_3 = variant { Ok : text; Err : Error }; 80 | type Result_4 = variant { Ok : GetResult; Err : Error }; 81 | type Result_5 = variant { Ok : blob; Err : Error }; 82 | type Result_6 = variant { Ok : vec blob; Err : Error }; 83 | type Result_7 = variant { Ok : StateInfo; Err : text }; 84 | type Result_8 = variant { Ok : ObjectMeta; Err : Error }; 85 | type Result_9 = variant { Ok : bool; Err : text }; 86 | type StateInfo = record { 87 | next_etag : nat64; 88 | managers : vec principal; 89 | governance_canister : opt principal; 90 | name : text; 91 | auditors : vec principal; 92 | objects : nat64; 93 | }; 94 | type UpdateVersion = record { e_tag : opt text; version : opt text }; 95 | type UpgradeArgs = record { 96 | governance_canister : opt principal; 97 | name : opt text; 98 | }; 99 | service : (opt InstallArgs) -> { 100 | abort_multipart : (text, text) -> (Result); 101 | admin_add_auditors : (vec principal) -> (Result_1); 102 | admin_add_managers : (vec principal) -> (Result_1); 103 | admin_clear : () -> (Result_1); 104 | admin_remove_auditors : (vec principal) -> (Result_1); 105 | admin_remove_managers : (vec principal) -> (Result_1); 106 | complete_multipart : (text, text, PutMultipartOptions) -> (Result_2); 107 | copy : (text, text) -> (Result); 108 | copy_if_not_exists : (text, text) -> (Result); 109 | create_multipart : (text) -> (Result_3); 110 | delete : (text) -> (Result); 111 | get_opts : (text, GetOptions) -> (Result_4) query; 112 | get_part : (text, nat64) -> (Result_5) query; 113 | get_ranges : (text, vec record { nat64; nat64 }) -> (Result_6) query; 114 | get_state : () -> (Result_7) query; 115 | head : (text) -> (Result_8) query; 116 | is_member : (text, principal) -> (Result_9) query; 117 | list : (opt text) -> (Result_10) query; 118 | list_with_delimiter : (opt text) -> (Result_11) query; 119 | list_with_offset : (opt text, text) -> (Result_10) query; 120 | put_opts : (text, blob, PutOptions) -> (Result_2); 121 | put_part : (text, text, nat64, blob) -> (Result_12); 122 | rename : (text, text) -> (Result); 123 | rename_if_not_exists : (text, text) -> (Result); 124 | validate_admin_add_auditors : (vec principal) -> (Result_13); 125 | validate_admin_add_managers : (vec principal) -> (Result_13); 126 | validate_admin_clear : () -> (Result_13); 127 | validate_admin_remove_auditors : (vec principal) -> (Result_13); 128 | validate_admin_remove_managers : (vec principal) -> (Result_13); 129 | } 130 | -------------------------------------------------------------------------------- /src/declarations/ic_object_store_canister/ic_object_store_canister.did: -------------------------------------------------------------------------------- 1 | type Attribute = variant { 2 | ContentType; 3 | Metadata : text; 4 | ContentEncoding; 5 | ContentLanguage; 6 | CacheControl; 7 | ContentDisposition; 8 | }; 9 | type Error = variant { 10 | NotModified : record { path : text; error : text }; 11 | UnknownConfigurationKey : record { key : text }; 12 | NotFound : record { path : text }; 13 | PermissionDenied : record { path : text; error : text }; 14 | Generic : record { error : text }; 15 | AlreadyExists : record { path : text }; 16 | InvalidPath : record { path : text }; 17 | NotSupported : record { error : text }; 18 | Precondition : record { path : text; error : text }; 19 | NotImplemented; 20 | Unauthenticated : record { path : text; error : text }; 21 | }; 22 | type GetOptions = record { 23 | if_match : opt text; 24 | if_unmodified_since : opt nat64; 25 | head : bool; 26 | if_modified_since : opt nat64; 27 | version : opt text; 28 | if_none_match : opt text; 29 | range : opt GetRange; 30 | }; 31 | type GetRange = variant { 32 | Offset : nat64; 33 | Bounded : record { nat64; nat64 }; 34 | Suffix : nat64; 35 | }; 36 | type GetResult = record { 37 | meta : ObjectMeta; 38 | attributes : vec record { Attribute; text }; 39 | range : record { nat64; nat64 }; 40 | payload : blob; 41 | }; 42 | type InitArgs = record { governance_canister : opt principal; name : text }; 43 | type InstallArgs = variant { Upgrade : UpgradeArgs; Init : InitArgs }; 44 | type ListResult = record { 45 | common_prefixes : vec text; 46 | objects : vec ObjectMeta; 47 | }; 48 | type ObjectMeta = record { 49 | aes_tags : opt vec blob; 50 | size : nat64; 51 | e_tag : opt text; 52 | version : opt text; 53 | last_modified : nat64; 54 | aes_nonce : opt blob; 55 | location : text; 56 | }; 57 | type PartId = record { content_id : text }; 58 | type PutMode = variant { Overwrite; Create; Update : UpdateVersion }; 59 | type PutMultipartOptions = record { 60 | aes_tags : opt vec blob; 61 | tags : text; 62 | attributes : vec record { Attribute; text }; 63 | aes_nonce : opt blob; 64 | }; 65 | type PutOptions = record { 66 | aes_tags : opt vec blob; 67 | mode : PutMode; 68 | tags : text; 69 | attributes : vec record { Attribute; text }; 70 | aes_nonce : opt blob; 71 | }; 72 | type Result = variant { Ok; Err : Error }; 73 | type Result_1 = variant { Ok; Err : text }; 74 | type Result_10 = variant { Ok : vec ObjectMeta; Err : Error }; 75 | type Result_11 = variant { Ok : ListResult; Err : Error }; 76 | type Result_12 = variant { Ok : PartId; Err : Error }; 77 | type Result_13 = variant { Ok : text; Err : text }; 78 | type Result_2 = variant { Ok : UpdateVersion; Err : Error }; 79 | type Result_3 = variant { Ok : text; Err : Error }; 80 | type Result_4 = variant { Ok : GetResult; Err : Error }; 81 | type Result_5 = variant { Ok : blob; Err : Error }; 82 | type Result_6 = variant { Ok : vec blob; Err : Error }; 83 | type Result_7 = variant { Ok : StateInfo; Err : text }; 84 | type Result_8 = variant { Ok : ObjectMeta; Err : Error }; 85 | type Result_9 = variant { Ok : bool; Err : text }; 86 | type StateInfo = record { 87 | next_etag : nat64; 88 | managers : vec principal; 89 | governance_canister : opt principal; 90 | name : text; 91 | auditors : vec principal; 92 | objects : nat64; 93 | }; 94 | type UpdateVersion = record { e_tag : opt text; version : opt text }; 95 | type UpgradeArgs = record { 96 | governance_canister : opt principal; 97 | name : opt text; 98 | }; 99 | service : (opt InstallArgs) -> { 100 | abort_multipart : (text, text) -> (Result); 101 | admin_add_auditors : (vec principal) -> (Result_1); 102 | admin_add_managers : (vec principal) -> (Result_1); 103 | admin_clear : () -> (Result_1); 104 | admin_remove_auditors : (vec principal) -> (Result_1); 105 | admin_remove_managers : (vec principal) -> (Result_1); 106 | complete_multipart : (text, text, PutMultipartOptions) -> (Result_2); 107 | copy : (text, text) -> (Result); 108 | copy_if_not_exists : (text, text) -> (Result); 109 | create_multipart : (text) -> (Result_3); 110 | delete : (text) -> (Result); 111 | get_opts : (text, GetOptions) -> (Result_4) query; 112 | get_part : (text, nat64) -> (Result_5) query; 113 | get_ranges : (text, vec record { nat64; nat64 }) -> (Result_6) query; 114 | get_state : () -> (Result_7) query; 115 | head : (text) -> (Result_8) query; 116 | is_member : (text, principal) -> (Result_9) query; 117 | list : (opt text) -> (Result_10) query; 118 | list_with_delimiter : (opt text) -> (Result_11) query; 119 | list_with_offset : (opt text, text) -> (Result_10) query; 120 | put_opts : (text, blob, PutOptions) -> (Result_2); 121 | put_part : (text, text, nat64, blob) -> (Result_12); 122 | rename : (text, text) -> (Result); 123 | rename_if_not_exists : (text, text) -> (Result); 124 | validate_admin_add_auditors : (vec principal) -> (Result_13); 125 | validate_admin_add_managers : (vec principal) -> (Result_13); 126 | validate_admin_clear : () -> (Result_13); 127 | validate_admin_remove_auditors : (vec principal) -> (Result_13); 128 | validate_admin_remove_managers : (vec principal) -> (Result_13); 129 | } 130 | -------------------------------------------------------------------------------- /src/ic_oss_ts/candid/ic_object_store_canister/ic_object_store_canister.did: -------------------------------------------------------------------------------- 1 | type Attribute = variant { 2 | ContentType; 3 | Metadata : text; 4 | ContentEncoding; 5 | ContentLanguage; 6 | CacheControl; 7 | ContentDisposition; 8 | }; 9 | type Error = variant { 10 | NotModified : record { path : text; error : text }; 11 | UnknownConfigurationKey : record { key : text }; 12 | NotFound : record { path : text }; 13 | PermissionDenied : record { path : text; error : text }; 14 | Generic : record { error : text }; 15 | AlreadyExists : record { path : text }; 16 | InvalidPath : record { path : text }; 17 | NotSupported : record { error : text }; 18 | Precondition : record { path : text; error : text }; 19 | NotImplemented; 20 | Unauthenticated : record { path : text; error : text }; 21 | }; 22 | type GetOptions = record { 23 | if_match : opt text; 24 | if_unmodified_since : opt nat64; 25 | head : bool; 26 | if_modified_since : opt nat64; 27 | version : opt text; 28 | if_none_match : opt text; 29 | range : opt GetRange; 30 | }; 31 | type GetRange = variant { 32 | Offset : nat64; 33 | Bounded : record { nat64; nat64 }; 34 | Suffix : nat64; 35 | }; 36 | type GetResult = record { 37 | meta : ObjectMeta; 38 | attributes : vec record { Attribute; text }; 39 | range : record { nat64; nat64 }; 40 | payload : blob; 41 | }; 42 | type InitArgs = record { governance_canister : opt principal; name : text }; 43 | type InstallArgs = variant { Upgrade : UpgradeArgs; Init : InitArgs }; 44 | type ListResult = record { 45 | common_prefixes : vec text; 46 | objects : vec ObjectMeta; 47 | }; 48 | type ObjectMeta = record { 49 | aes_tags : opt vec blob; 50 | size : nat64; 51 | e_tag : opt text; 52 | version : opt text; 53 | last_modified : nat64; 54 | aes_nonce : opt blob; 55 | location : text; 56 | }; 57 | type PartId = record { content_id : text }; 58 | type PutMode = variant { Overwrite; Create; Update : UpdateVersion }; 59 | type PutMultipartOptions = record { 60 | aes_tags : opt vec blob; 61 | tags : text; 62 | attributes : vec record { Attribute; text }; 63 | aes_nonce : opt blob; 64 | }; 65 | type PutOptions = record { 66 | aes_tags : opt vec blob; 67 | mode : PutMode; 68 | tags : text; 69 | attributes : vec record { Attribute; text }; 70 | aes_nonce : opt blob; 71 | }; 72 | type Result = variant { Ok; Err : Error }; 73 | type Result_1 = variant { Ok; Err : text }; 74 | type Result_10 = variant { Ok : vec ObjectMeta; Err : Error }; 75 | type Result_11 = variant { Ok : ListResult; Err : Error }; 76 | type Result_12 = variant { Ok : PartId; Err : Error }; 77 | type Result_13 = variant { Ok : text; Err : text }; 78 | type Result_2 = variant { Ok : UpdateVersion; Err : Error }; 79 | type Result_3 = variant { Ok : text; Err : Error }; 80 | type Result_4 = variant { Ok : GetResult; Err : Error }; 81 | type Result_5 = variant { Ok : blob; Err : Error }; 82 | type Result_6 = variant { Ok : vec blob; Err : Error }; 83 | type Result_7 = variant { Ok : StateInfo; Err : text }; 84 | type Result_8 = variant { Ok : ObjectMeta; Err : Error }; 85 | type Result_9 = variant { Ok : bool; Err : text }; 86 | type StateInfo = record { 87 | next_etag : nat64; 88 | managers : vec principal; 89 | governance_canister : opt principal; 90 | name : text; 91 | auditors : vec principal; 92 | objects : nat64; 93 | }; 94 | type UpdateVersion = record { e_tag : opt text; version : opt text }; 95 | type UpgradeArgs = record { 96 | governance_canister : opt principal; 97 | name : opt text; 98 | }; 99 | service : (opt InstallArgs) -> { 100 | abort_multipart : (text, text) -> (Result); 101 | admin_add_auditors : (vec principal) -> (Result_1); 102 | admin_add_managers : (vec principal) -> (Result_1); 103 | admin_clear : () -> (Result_1); 104 | admin_remove_auditors : (vec principal) -> (Result_1); 105 | admin_remove_managers : (vec principal) -> (Result_1); 106 | complete_multipart : (text, text, PutMultipartOptions) -> (Result_2); 107 | copy : (text, text) -> (Result); 108 | copy_if_not_exists : (text, text) -> (Result); 109 | create_multipart : (text) -> (Result_3); 110 | delete : (text) -> (Result); 111 | get_opts : (text, GetOptions) -> (Result_4) query; 112 | get_part : (text, nat64) -> (Result_5) query; 113 | get_ranges : (text, vec record { nat64; nat64 }) -> (Result_6) query; 114 | get_state : () -> (Result_7) query; 115 | head : (text) -> (Result_8) query; 116 | is_member : (text, principal) -> (Result_9) query; 117 | list : (opt text) -> (Result_10) query; 118 | list_with_delimiter : (opt text) -> (Result_11) query; 119 | list_with_offset : (opt text, text) -> (Result_10) query; 120 | put_opts : (text, blob, PutOptions) -> (Result_2); 121 | put_part : (text, text, nat64, blob) -> (Result_12); 122 | rename : (text, text) -> (Result); 123 | rename_if_not_exists : (text, text) -> (Result); 124 | validate_admin_add_auditors : (vec principal) -> (Result_13); 125 | validate_admin_add_managers : (vec principal) -> (Result_13); 126 | validate_admin_clear : () -> (Result_13); 127 | validate_admin_remove_auditors : (vec principal) -> (Result_13); 128 | validate_admin_remove_managers : (vec principal) -> (Result_13); 129 | } 130 | -------------------------------------------------------------------------------- /src/ic_oss_can/src/types.rs: -------------------------------------------------------------------------------- 1 | use candid::Principal; 2 | use ciborium::{from_reader, into_writer}; 3 | use ic_oss_types::file::*; 4 | use ic_stable_structures::{storable::Bound, Storable}; 5 | use serde::{Deserialize, Serialize}; 6 | use serde_bytes::ByteArray; 7 | use std::{ 8 | borrow::Cow, 9 | collections::{BTreeMap, BTreeSet}, 10 | ops, 11 | }; 12 | 13 | pub const MILLISECONDS: u64 = 1_000_000_000; 14 | 15 | #[derive(Clone, Deserialize, Serialize)] 16 | pub struct Files { 17 | pub file_id: u32, 18 | pub max_file_size: u64, 19 | pub visibility: u8, // 0: private; 1: public 20 | pub managers: BTreeSet, // managers can read and write 21 | pub files: BTreeMap, 22 | } 23 | 24 | impl Files { 25 | pub fn list_files(&self, prev: u32, take: u32) -> Vec { 26 | let mut res = Vec::with_capacity(take as usize); 27 | for (file_id, file) in self 28 | .files 29 | .range(ops::Range { 30 | start: 1, 31 | end: prev, 32 | }) 33 | .rev() 34 | { 35 | res.push(file.clone().into_info(*file_id)); 36 | if res.len() >= take as usize { 37 | break; 38 | } 39 | } 40 | res 41 | } 42 | } 43 | 44 | impl Default for Files { 45 | fn default() -> Self { 46 | Self { 47 | file_id: 1, // 0 is reserved for the Files data itself 48 | max_file_size: MAX_FILE_SIZE, 49 | visibility: 0, 50 | managers: BTreeSet::new(), 51 | files: BTreeMap::new(), 52 | } 53 | } 54 | } 55 | 56 | impl Storable for Files { 57 | const BOUND: Bound = Bound::Unbounded; 58 | 59 | fn into_bytes(self) -> Vec { 60 | let mut buf = vec![]; 61 | into_writer(&self, &mut buf).expect("failed to encode Files data"); 62 | buf 63 | } 64 | 65 | fn to_bytes(&self) -> Cow<'_, [u8]> { 66 | let mut buf = vec![]; 67 | into_writer(self, &mut buf).expect("failed to encode Files data"); 68 | Cow::Owned(buf) 69 | } 70 | 71 | fn from_bytes(bytes: Cow<'_, [u8]>) -> Self { 72 | from_reader(&bytes[..]).expect("failed to decode Files data") 73 | } 74 | } 75 | 76 | #[derive(Clone, Default, Deserialize, Serialize, Ord, PartialOrd, Eq, PartialEq)] 77 | pub struct FileId(pub u32, pub u32); 78 | impl Storable for FileId { 79 | const BOUND: Bound = Bound::Bounded { 80 | max_size: 11, 81 | is_fixed_size: false, 82 | }; 83 | 84 | fn into_bytes(self) -> Vec { 85 | let mut buf = vec![]; 86 | into_writer(&self, &mut buf).expect("failed to encode FileId data"); 87 | buf 88 | } 89 | 90 | fn to_bytes(&self) -> Cow<'_, [u8]> { 91 | let mut buf = vec![]; 92 | into_writer(self, &mut buf).expect("failed to encode FileId data"); 93 | Cow::Owned(buf) 94 | } 95 | 96 | fn from_bytes(bytes: Cow<'_, [u8]>) -> Self { 97 | from_reader(&bytes[..]).expect("failed to decode FileId data") 98 | } 99 | } 100 | 101 | #[derive(Clone, Default, Deserialize, Serialize)] 102 | pub struct FileMetadata { 103 | pub name: String, 104 | pub content_type: String, // MIME types 105 | pub size: u64, 106 | pub filled: u64, 107 | pub created_at: u64, // unix timestamp in milliseconds 108 | pub updated_at: u64, // unix timestamp in milliseconds 109 | pub chunks: u32, 110 | pub hash: Option>, // recommend sha3 256 111 | } 112 | 113 | impl Storable for FileMetadata { 114 | const BOUND: Bound = Bound::Unbounded; 115 | 116 | fn into_bytes(self) -> Vec { 117 | let mut buf = vec![]; 118 | into_writer(&self, &mut buf).expect("failed to encode FileMetadata data"); 119 | buf 120 | } 121 | 122 | fn to_bytes(&self) -> Cow<'_, [u8]> { 123 | let mut buf = vec![]; 124 | into_writer(self, &mut buf).expect("failed to encode FileMetadata data"); 125 | Cow::Owned(buf) 126 | } 127 | 128 | fn from_bytes(bytes: Cow<'_, [u8]>) -> Self { 129 | from_reader(&bytes[..]).expect("failed to decode FileMetadata data") 130 | } 131 | } 132 | 133 | impl FileMetadata { 134 | pub fn into_info(self, id: u32) -> FileInfo { 135 | FileInfo { 136 | id, 137 | name: self.name, 138 | content_type: self.content_type, 139 | size: self.size, 140 | filled: self.filled, 141 | created_at: self.created_at, 142 | updated_at: self.updated_at, 143 | chunks: self.chunks, 144 | hash: self.hash, 145 | ..Default::default() 146 | } 147 | } 148 | } 149 | 150 | #[derive(Clone, Default, Deserialize, Serialize)] 151 | pub struct Chunk(pub Vec); 152 | 153 | impl Storable for Chunk { 154 | const BOUND: Bound = Bound::Bounded { 155 | max_size: CHUNK_SIZE, 156 | is_fixed_size: false, 157 | }; 158 | 159 | fn into_bytes(self) -> Vec { 160 | self.0 161 | } 162 | 163 | fn to_bytes(&self) -> Cow<'_, [u8]> { 164 | Cow::Borrowed(&self.0) 165 | } 166 | 167 | fn from_bytes(bytes: Cow<[u8]>) -> Self { 168 | Self(bytes.to_vec()) 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /src/ic_oss_bucket/src/api_admin.rs: -------------------------------------------------------------------------------- 1 | use candid::{pretty::candid::value::pp_value, CandidType, IDLValue, Principal}; 2 | use ic_oss_types::bucket::UpdateBucketInput; 3 | use std::collections::BTreeSet; 4 | 5 | use crate::{is_controller, store, validate_principals}; 6 | 7 | #[ic_cdk::update(guard = "is_controller")] 8 | fn admin_set_managers(args: BTreeSet) -> Result<(), String> { 9 | validate_admin_set_managers(args.clone())?; 10 | store::state::with_mut(|r| { 11 | r.managers = args; 12 | }); 13 | Ok(()) 14 | } 15 | 16 | #[ic_cdk::update(guard = "is_controller")] 17 | fn admin_add_managers(args: BTreeSet) -> Result<(), String> { 18 | validate_principals(&args)?; 19 | store::state::with_mut(|r| { 20 | r.managers.extend(args); 21 | Ok(()) 22 | }) 23 | } 24 | 25 | #[ic_cdk::update(guard = "is_controller")] 26 | fn admin_remove_managers(args: BTreeSet) -> Result<(), String> { 27 | validate_principals(&args)?; 28 | store::state::with_mut(|r| { 29 | r.managers.retain(|p| !args.contains(p)); 30 | Ok(()) 31 | }) 32 | } 33 | 34 | #[ic_cdk::update(guard = "is_controller")] 35 | fn admin_add_auditors(args: BTreeSet) -> Result<(), String> { 36 | validate_principals(&args)?; 37 | store::state::with_mut(|r| { 38 | r.auditors.extend(args); 39 | Ok(()) 40 | }) 41 | } 42 | 43 | #[ic_cdk::update(guard = "is_controller")] 44 | fn admin_remove_auditors(args: BTreeSet) -> Result<(), String> { 45 | validate_principals(&args)?; 46 | store::state::with_mut(|r| { 47 | r.auditors.retain(|p| !args.contains(p)); 48 | Ok(()) 49 | }) 50 | } 51 | 52 | #[ic_cdk::update(guard = "is_controller")] 53 | fn admin_set_auditors(args: BTreeSet) -> Result<(), String> { 54 | validate_principals(&args)?; 55 | store::state::with_mut(|r| { 56 | r.auditors = args; 57 | }); 58 | Ok(()) 59 | } 60 | 61 | #[ic_cdk::update(guard = "is_controller")] 62 | fn admin_update_bucket(args: UpdateBucketInput) -> Result<(), String> { 63 | args.validate()?; 64 | store::state::with_mut(|s| { 65 | if let Some(name) = args.name { 66 | s.name = name; 67 | } 68 | if let Some(max_file_size) = args.max_file_size { 69 | s.max_file_size = max_file_size; 70 | } 71 | if let Some(max_folder_depth) = args.max_folder_depth { 72 | s.max_folder_depth = max_folder_depth; 73 | } 74 | if let Some(max_children) = args.max_children { 75 | s.max_children = max_children; 76 | } 77 | if let Some(max_custom_data_size) = args.max_custom_data_size { 78 | s.max_custom_data_size = max_custom_data_size; 79 | } 80 | if let Some(enable_hash_index) = args.enable_hash_index { 81 | s.enable_hash_index = enable_hash_index; 82 | } 83 | if let Some(status) = args.status { 84 | s.status = status; 85 | } 86 | if let Some(visibility) = args.visibility { 87 | s.visibility = visibility; 88 | } 89 | if let Some(trusted_ecdsa_pub_keys) = args.trusted_ecdsa_pub_keys { 90 | s.trusted_ecdsa_pub_keys = trusted_ecdsa_pub_keys; 91 | } 92 | if let Some(trusted_eddsa_pub_keys) = args.trusted_eddsa_pub_keys { 93 | s.trusted_eddsa_pub_keys = trusted_eddsa_pub_keys; 94 | } 95 | }); 96 | Ok(()) 97 | } 98 | 99 | // ----- Use validate2_xxxxxx instead of validate_xxxxxx ----- 100 | 101 | #[ic_cdk::update] 102 | fn validate_admin_set_managers(args: BTreeSet) -> Result<(), String> { 103 | validate_principals(&args)?; 104 | Ok(()) 105 | } 106 | 107 | #[ic_cdk::update] 108 | fn validate2_admin_set_managers(args: BTreeSet) -> Result { 109 | validate_principals(&args)?; 110 | pretty_format(&args) 111 | } 112 | 113 | #[ic_cdk::update] 114 | fn validate_admin_set_auditors(args: BTreeSet) -> Result<(), String> { 115 | validate_principals(&args)?; 116 | Ok(()) 117 | } 118 | 119 | #[ic_cdk::update] 120 | fn validate2_admin_set_auditors(args: BTreeSet) -> Result { 121 | validate_principals(&args)?; 122 | pretty_format(&args) 123 | } 124 | 125 | #[ic_cdk::update] 126 | fn validate_admin_update_bucket(args: UpdateBucketInput) -> Result<(), String> { 127 | args.validate() 128 | } 129 | 130 | #[ic_cdk::update] 131 | fn validate2_admin_update_bucket(args: UpdateBucketInput) -> Result { 132 | args.validate()?; 133 | pretty_format(&args) 134 | } 135 | 136 | #[ic_cdk::update] 137 | fn validate_admin_add_managers(args: BTreeSet) -> Result { 138 | validate_principals(&args)?; 139 | pretty_format(&args) 140 | } 141 | 142 | #[ic_cdk::update] 143 | fn validate_admin_remove_managers(args: BTreeSet) -> Result { 144 | validate_principals(&args)?; 145 | pretty_format(&args) 146 | } 147 | 148 | #[ic_cdk::update] 149 | fn validate_admin_add_auditors(args: BTreeSet) -> Result { 150 | validate_principals(&args)?; 151 | pretty_format(&args) 152 | } 153 | 154 | #[ic_cdk::update] 155 | fn validate_admin_remove_auditors(args: BTreeSet) -> Result { 156 | validate_principals(&args)?; 157 | pretty_format(&args) 158 | } 159 | 160 | fn pretty_format(data: &T) -> Result 161 | where 162 | T: CandidType, 163 | { 164 | let val = IDLValue::try_from_candid_type(data).map_err(|err| format!("{err:?}"))?; 165 | let doc = pp_value(7, &val); 166 | 167 | Ok(format!("{}", doc.pretty(120))) 168 | } 169 | -------------------------------------------------------------------------------- /src/ic_oss_ts/src/cluster.canister.ts: -------------------------------------------------------------------------------- 1 | import type { Principal } from '@dfinity/principal' 2 | import { Canister, createServices } from '@dfinity/utils' 3 | import type { 4 | AddWasmInput, 5 | BucketDeploymentInfo, 6 | ClusterInfo, 7 | _SERVICE as ClusterService, 8 | DeployWasmInput, 9 | Token, 10 | WasmInfo 11 | } from '../candid/ic_oss_cluster/ic_oss_cluster.did.js' 12 | import { idlFactory } from '../candid/ic_oss_cluster/ic_oss_cluster.did.js' 13 | import type { CanisterOptions } from './types.js' 14 | import { resultOk } from './types.js' 15 | 16 | export class ClusterCanister extends Canister { 17 | #resultOk: typeof resultOk = resultOk 18 | 19 | static create(options: CanisterOptions) { 20 | const { service, certifiedService, canisterId } = 21 | createServices({ 22 | options, 23 | idlFactory, 24 | certifiedIdlFactory: idlFactory 25 | }) 26 | 27 | const self = new ClusterCanister(canisterId, service, certifiedService) 28 | self.#resultOk = options.unwrapResult || resultOk 29 | return self 30 | } 31 | 32 | async getClusterInfo(): Promise { 33 | const res = await this.service.get_cluster_info() 34 | return this.#resultOk(res) 35 | } 36 | 37 | async accessToken(audience: Principal): Promise { 38 | const res = await this.service.access_token(audience) 39 | return Uint8Array.from(this.#resultOk(res)) 40 | } 41 | 42 | async ed25519AccessToken(audience: Principal): Promise { 43 | const res = await this.service.ed25519_access_token(audience) 44 | return Uint8Array.from(this.#resultOk(res)) 45 | } 46 | 47 | async adminSignAccessToken(input: Token): Promise { 48 | const res = await this.service.admin_sign_access_token(input) 49 | return Uint8Array.from(this.#resultOk(res)) 50 | } 51 | 52 | async adminEd25519AccessToken(input: Token): Promise { 53 | const res = await this.service.admin_ed25519_access_token(input) 54 | return Uint8Array.from(this.#resultOk(res)) 55 | } 56 | 57 | async adminWeakAccessToken( 58 | input: Token, 59 | now_sec: bigint, 60 | expiration_sec: bigint 61 | ): Promise { 62 | const res = await this.service.admin_weak_access_token( 63 | input, 64 | now_sec, 65 | expiration_sec 66 | ) 67 | return Uint8Array.from(this.#resultOk(res)) 68 | } 69 | 70 | async adminSetManagers(input: Principal[]): Promise { 71 | const res = await this.service.admin_set_managers(input) 72 | return this.#resultOk(res) 73 | } 74 | 75 | async adminAddWasm( 76 | input: AddWasmInput, 77 | forcePrevHash: Uint8Array | null = null 78 | ): Promise { 79 | const res = await this.service.admin_add_wasm( 80 | input, 81 | forcePrevHash ? [forcePrevHash] : [] 82 | ) 83 | return this.#resultOk(res) 84 | } 85 | 86 | async adminAttachPolicies(input: Token): Promise { 87 | const res = await this.service.admin_attach_policies(input) 88 | return this.#resultOk(res) 89 | } 90 | 91 | async adminDetachPolicies(input: Token): Promise { 92 | const res = await this.service.admin_detach_policies(input) 93 | return this.#resultOk(res) 94 | } 95 | 96 | async adminBatchCallBuckets( 97 | buckets: Principal[], 98 | method: string, 99 | args: Uint8Array | null = null 100 | ): Promise { 101 | const res = await this.service.admin_batch_call_buckets( 102 | buckets, 103 | method, 104 | args ? [args] : [] 105 | ) 106 | 107 | return this.#resultOk(res) as Uint8Array[] 108 | } 109 | 110 | async adminDeployBucket( 111 | input: DeployWasmInput, 112 | ignorePrevHash: Uint8Array | null = null 113 | ): Promise { 114 | const res = await this.service.admin_deploy_bucket( 115 | input, 116 | ignorePrevHash ? [ignorePrevHash] : [] 117 | ) 118 | return this.#resultOk(res) 119 | } 120 | 121 | async adminUpgradeAllBuckets(args: Uint8Array | null = null): Promise { 122 | const res = await this.service.admin_upgrade_all_buckets(args ? [args] : []) 123 | return this.#resultOk(res) 124 | } 125 | 126 | async adminTopupAllBuckets(): Promise { 127 | const res = await this.service.admin_topup_all_buckets() 128 | return this.#resultOk(res) 129 | } 130 | 131 | async bucketDeploymentLogs( 132 | prev: bigint | null = null, 133 | take: bigint | null = null 134 | ): Promise { 135 | const res = await this.service.bucket_deployment_logs( 136 | prev == null ? [] : [prev], 137 | take == null ? [] : [take] 138 | ) 139 | return this.#resultOk(res) 140 | } 141 | 142 | async getBucketWasm(hash: Uint8Array): Promise { 143 | const res = await this.service.get_bucket_wasm(hash) 144 | return this.#resultOk(res) 145 | } 146 | 147 | async getBuckets(): Promise { 148 | const res = await this.service.get_buckets() 149 | return this.#resultOk(res) 150 | } 151 | 152 | async getDeployedBuckets(): Promise { 153 | const res = await this.service.get_deployed_buckets() 154 | return this.#resultOk(res) 155 | } 156 | 157 | async getSubjectPolicies( 158 | subject: Principal 159 | ): Promise> { 160 | const res = await this.service.get_subject_policies(subject) 161 | return this.#resultOk(res) 162 | } 163 | 164 | async getSubjectPoliciesFor( 165 | subject: Principal, 166 | audience: Principal 167 | ): Promise { 168 | const res = await this.service.get_subject_policies_for(subject, audience) 169 | return this.#resultOk(res) 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /src/ic_oss_bucket/src/api_init.rs: -------------------------------------------------------------------------------- 1 | use candid::{CandidType, Principal}; 2 | use ic_oss_types::file::MAX_FILE_SIZE; 3 | use serde::Deserialize; 4 | 5 | use crate::store; 6 | 7 | #[derive(Clone, Debug, CandidType, Deserialize)] 8 | pub enum CanisterArgs { 9 | Init(InitArgs), 10 | Upgrade(UpgradeArgs), 11 | } 12 | 13 | #[derive(Clone, Debug, CandidType, Deserialize)] 14 | pub struct InitArgs { 15 | name: String, // bucket name 16 | file_id: u32, // the first file id, default is 0 17 | max_file_size: u64, // in bytes, default is 384GB 18 | max_folder_depth: u8, // default is 10 19 | max_children: u16, // maximum number of subfolders and subfiles in a folder., default is 1000 20 | max_custom_data_size: u16, // in bytes, default is 4KB 21 | enable_hash_index: bool, // if enabled, indexing will be built using file hash, allowing files to be read by their hash and preventing duplicate hash for files. default is false 22 | visibility: u8, // 0: private; 1: public, can be accessed by anyone, default is 0 23 | governance_canister: Option, 24 | } 25 | 26 | #[derive(Clone, Debug, CandidType, Deserialize)] 27 | pub struct UpgradeArgs { 28 | max_file_size: Option, 29 | max_folder_depth: Option, 30 | max_children: Option, 31 | max_custom_data_size: Option, 32 | enable_hash_index: Option, 33 | governance_canister: Option, 34 | } 35 | 36 | impl UpgradeArgs { 37 | fn validate(&self) -> Result<(), String> { 38 | if let Some(max_file_size) = self.max_file_size { 39 | if max_file_size == 0 { 40 | return Err("max_file_size should be greater than 0".to_string()); 41 | } 42 | if max_file_size >= MAX_FILE_SIZE { 43 | return Err(format!( 44 | "max_file_size should be smaller than or equal to {}", 45 | MAX_FILE_SIZE 46 | )); 47 | } 48 | } 49 | if let Some(max_folder_depth) = self.max_folder_depth { 50 | if max_folder_depth == 0 { 51 | return Err("max_folder_depth should be greater than 0".to_string()); 52 | } 53 | } 54 | if let Some(max_children) = self.max_children { 55 | if max_children == 0 { 56 | return Err("max_children should be greater than 0".to_string()); 57 | } 58 | } 59 | 60 | if let Some(max_custom_data_size) = self.max_custom_data_size { 61 | if max_custom_data_size == 0 { 62 | return Err("max_custom_data_size should be greater than 0".to_string()); 63 | } 64 | } 65 | Ok(()) 66 | } 67 | } 68 | 69 | #[ic_cdk::init] 70 | fn init(args: Option) { 71 | match args { 72 | Some(CanisterArgs::Init(args)) => { 73 | store::state::with_mut(|b| { 74 | if !args.name.is_empty() { 75 | b.name = args.name 76 | }; 77 | b.file_id = args.file_id; 78 | if args.max_file_size > 0 { 79 | b.max_file_size = args.max_file_size 80 | }; 81 | if args.max_folder_depth > 0 { 82 | b.max_folder_depth = args.max_folder_depth 83 | }; 84 | if args.max_children > 0 { 85 | b.max_children = args.max_children 86 | }; 87 | if args.visibility > 0 { 88 | b.visibility = 1 89 | }; 90 | if args.max_custom_data_size > 0 { 91 | b.max_custom_data_size = args.max_custom_data_size 92 | }; 93 | b.enable_hash_index = args.enable_hash_index; 94 | b.governance_canister = args.governance_canister; 95 | }); 96 | } 97 | Some(CanisterArgs::Upgrade(_)) => { 98 | ic_cdk::trap( 99 | "Cannot initialize the canister with an Upgrade args. Please provide an Init args.", 100 | ); 101 | } 102 | None => {} 103 | } 104 | 105 | store::state::init_http_certified_data(); 106 | } 107 | 108 | #[ic_cdk::pre_upgrade] 109 | fn pre_upgrade() { 110 | store::state::save(); 111 | } 112 | 113 | #[ic_cdk::post_upgrade] 114 | fn post_upgrade(args: Option) { 115 | store::state::load(); 116 | match args { 117 | Some(CanisterArgs::Upgrade(args)) => { 118 | if let Err(err) = args.validate() { 119 | ic_cdk::trap(&err); 120 | } 121 | 122 | store::state::with_mut(|s| { 123 | if let Some(max_file_size) = args.max_file_size { 124 | s.max_file_size = max_file_size; 125 | } 126 | if let Some(max_folder_depth) = args.max_folder_depth { 127 | s.max_folder_depth = max_folder_depth; 128 | } 129 | if let Some(max_children) = args.max_children { 130 | s.max_children = max_children; 131 | } 132 | 133 | if let Some(max_custom_data_size) = args.max_custom_data_size { 134 | s.max_custom_data_size = max_custom_data_size; 135 | } 136 | if let Some(enable_hash_index) = args.enable_hash_index { 137 | s.enable_hash_index = enable_hash_index; 138 | } 139 | if let Some(governance_canister) = args.governance_canister { 140 | s.governance_canister = Some(governance_canister); 141 | } 142 | }); 143 | } 144 | Some(CanisterArgs::Init(_)) => { 145 | ic_cdk::trap( 146 | "Cannot upgrade the canister with an Init args. Please provide an Upgrade args.", 147 | ); 148 | } 149 | _ => {} 150 | } 151 | 152 | store::state::init_http_certified_data(); 153 | } 154 | -------------------------------------------------------------------------------- /src/ic_oss_ts/src/bucket.canister.ts: -------------------------------------------------------------------------------- 1 | import { Canister, createServices } from '@dfinity/utils' 2 | import type { 3 | BucketInfo, 4 | _SERVICE as BucketService, 5 | CanisterStatusResult, 6 | CreateFileInput, 7 | CreateFileOutput, 8 | CreateFolderInput, 9 | FileInfo, 10 | FolderInfo, 11 | FolderName, 12 | MoveInput, 13 | UpdateFileChunkInput, 14 | UpdateFileChunkOutput, 15 | UpdateFileInput, 16 | UpdateFileOutput, 17 | UpdateFolderInput 18 | } from '../candid/ic_oss_bucket/ic_oss_bucket.did.js' 19 | import { idlFactory } from '../candid/ic_oss_bucket/ic_oss_bucket.did.js' 20 | import type { CanisterOptions } from './types.js' 21 | import { FileChunk, resultOk } from './types.js' 22 | 23 | export class BucketCanister extends Canister { 24 | #resultOk: typeof resultOk = resultOk 25 | #accessToken: [] | [Uint8Array] = [] 26 | 27 | static create( 28 | options: CanisterOptions & { 29 | accessToken?: Uint8Array 30 | } 31 | ) { 32 | const { service, certifiedService, canisterId } = 33 | createServices({ 34 | options, 35 | idlFactory, 36 | certifiedIdlFactory: idlFactory 37 | }) 38 | 39 | const self = new BucketCanister(canisterId, service, certifiedService) 40 | self.#resultOk = options.unwrapResult || resultOk 41 | self.#accessToken = options.accessToken ? [options.accessToken] : [] 42 | return self 43 | } 44 | 45 | async getCanisterStatus(): Promise { 46 | const res = await this.service.get_canister_status() 47 | return this.#resultOk(res) 48 | } 49 | 50 | async getBucketInfo(): Promise { 51 | const res = await this.service.get_bucket_info(this.#accessToken) 52 | return this.#resultOk(res) 53 | } 54 | 55 | async batchDeleteSubfiles(parent: number, ids: number[]): Promise { 56 | const res = await this.service.batch_delete_subfiles( 57 | parent, 58 | ids, 59 | this.#accessToken 60 | ) 61 | return this.#resultOk(res) as number[] 62 | } 63 | 64 | async createFile(input: CreateFileInput): Promise { 65 | const res = await this.service.create_file(input, this.#accessToken) 66 | return this.#resultOk(res) 67 | } 68 | 69 | async createFolder(input: CreateFolderInput): Promise { 70 | const res = await this.service.create_folder(input, this.#accessToken) 71 | return this.#resultOk(res) 72 | } 73 | 74 | async deleteFile(id: number): Promise { 75 | const res = await this.service.delete_file(id, this.#accessToken) 76 | return this.#resultOk(res) 77 | } 78 | 79 | async deleteFolder(id: number): Promise { 80 | const res = await this.service.delete_folder(id, this.#accessToken) 81 | return this.#resultOk(res) 82 | } 83 | 84 | async getFileAncestors(id: number): Promise { 85 | const res = await this.service.get_file_ancestors(id, this.#accessToken) 86 | return this.#resultOk(res) 87 | } 88 | 89 | async getFolderAncestors(id: number): Promise { 90 | const res = await this.service.get_folder_ancestors(id, this.#accessToken) 91 | return this.#resultOk(res) 92 | } 93 | 94 | async getFileChunks( 95 | id: number, 96 | chunkIdex: number, 97 | take: number = 0 98 | ): Promise { 99 | const res = await this.service.get_file_chunks( 100 | id, 101 | chunkIdex, 102 | take > 0 ? [take] : [], 103 | this.#accessToken 104 | ) 105 | return this.#resultOk(res) as FileChunk[] 106 | } 107 | 108 | async getFileInfo(id: number): Promise { 109 | const res = await this.service.get_file_info(id, this.#accessToken) 110 | return this.#resultOk(res) 111 | } 112 | 113 | async getFileInfoByHash(hash: Uint8Array): Promise { 114 | const res = await this.service.get_file_info_by_hash( 115 | hash, 116 | this.#accessToken 117 | ) 118 | return this.#resultOk(res) 119 | } 120 | 121 | async getFolderInfo(id: number): Promise { 122 | const res = await this.service.get_folder_info(id, this.#accessToken) 123 | return this.#resultOk(res) 124 | } 125 | 126 | async listFiles( 127 | parent: number, 128 | prev: number = 0, 129 | take: number = 0 130 | ): Promise { 131 | const res = await this.service.list_files( 132 | parent, 133 | prev > 0 ? [prev] : [], 134 | take > 0 ? [take] : [], 135 | this.#accessToken 136 | ) 137 | return this.#resultOk(res) 138 | } 139 | 140 | async listFolders( 141 | parent: number, 142 | prev: number = 0, 143 | take: number = 0 144 | ): Promise { 145 | const res = await this.service.list_folders( 146 | parent, 147 | prev > 0 ? [prev] : [], 148 | take > 0 ? [take] : [], 149 | this.#accessToken 150 | ) 151 | return this.#resultOk(res) 152 | } 153 | 154 | async moveFile(input: MoveInput): Promise { 155 | const res = await this.service.move_file(input, this.#accessToken) 156 | return this.#resultOk(res) 157 | } 158 | 159 | async moveFolder(input: MoveInput): Promise { 160 | const res = await this.service.move_folder(input, this.#accessToken) 161 | return this.#resultOk(res) 162 | } 163 | 164 | async updateFileChunk( 165 | input: UpdateFileChunkInput 166 | ): Promise { 167 | const res = await this.service.update_file_chunk(input, this.#accessToken) 168 | return this.#resultOk(res) 169 | } 170 | 171 | async updateFileInfo(input: UpdateFileInput): Promise { 172 | const res = await this.service.update_file_info(input, this.#accessToken) 173 | return this.#resultOk(res) 174 | } 175 | 176 | async updateFolderInfo(input: UpdateFolderInput): Promise { 177 | const res = await this.service.update_folder_info(input, this.#accessToken) 178 | return this.#resultOk(res) 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /src/ic_oss_ts/src/stream.ts: -------------------------------------------------------------------------------- 1 | import mime from 'mime/lite' 2 | import type { FileHandle } from 'node:fs/promises' 3 | import { ReadableStream } from 'web-streams-polyfill' 4 | import { FileConfig } from './types.js' 5 | 6 | export const CHUNK_SIZE = 256 * 1024 7 | 8 | // https://stackoverflow.com/questions/76700924/ts2504-type-readablestreamuint8array-must-have-a-symbol-asynciterator 9 | export async function* readableStreamAsyncIterator(self: ReadableStream) { 10 | const reader = self.getReader() 11 | try { 12 | while (true) { 13 | const { done, value } = await reader.read() 14 | if (done) return 15 | yield value 16 | } 17 | } finally { 18 | reader.releaseLock() 19 | } 20 | } 21 | 22 | export async function toFixedChunkSizeReadable(file: FileConfig) { 23 | if (typeof File === 'function' && file.content instanceof File) { 24 | if (!file.name) { 25 | file.name = file.content.name 26 | } 27 | if (!file.contentType) { 28 | file.contentType = file.content.type 29 | } 30 | if (!file.size) { 31 | file.size = file.content.size 32 | } 33 | return streamToFixedChunkSizeReadable( 34 | CHUNK_SIZE, 35 | file.content.stream() as any as ReadableStream 36 | ) 37 | } 38 | 39 | if (typeof Blob === 'function' && file.content instanceof Blob) { 40 | if (!file.contentType) { 41 | file.contentType = file.content.type 42 | } 43 | if (!file.size) { 44 | file.size = file.content.size 45 | } 46 | return streamToFixedChunkSizeReadable( 47 | CHUNK_SIZE, 48 | file.content.stream() as any as ReadableStream 49 | ) 50 | } 51 | 52 | if ( 53 | Array.isArray(file.content) || 54 | file.content instanceof Uint8Array || 55 | file.content instanceof ArrayBuffer 56 | ) { 57 | return uint8ArrayToFixedChunkSizeReadable( 58 | CHUNK_SIZE, 59 | Uint8Array.from(file.content as ArrayLike) 60 | ) 61 | } 62 | 63 | if (file.content instanceof ReadableStream) { 64 | return streamToFixedChunkSizeReadable( 65 | CHUNK_SIZE, 66 | file.content as any as ReadableStream 67 | ) 68 | } 69 | 70 | if (typeof file.content == 'string') { 71 | const { open } = await import('node:fs/promises') 72 | const path = await import('node:path') 73 | if (!file.name) { 74 | file.name = path.basename(file.content) 75 | } 76 | if (!file.contentType) { 77 | file.contentType = mime.getType(file.name) ?? 'application/octet-stream' 78 | } 79 | 80 | const fs = await open(file.content, 'r') 81 | const stat = await fs.stat() 82 | file.size = stat.size 83 | // try to fix "Closing file descriptor xx on garbage collection" 84 | ;(file as any).originFile = fs 85 | return streamToFixedChunkSizeReadable( 86 | CHUNK_SIZE, 87 | fs.readableWebStream() as any as ReadableStream, 88 | fs 89 | ) 90 | } 91 | 92 | throw new Error( 93 | 'Invalid arguments, FixedChunkSizeReadableStream could not be created' 94 | ) 95 | } 96 | 97 | export function streamToFixedChunkSizeReadable( 98 | chunkSize: number, 99 | stream: ReadableStream, 100 | fh?: FileHandle 101 | ) { 102 | const reader = stream.getReader() 103 | let buffer = new Uint8Array(0) 104 | 105 | return new ReadableStream({ 106 | type: 'bytes', 107 | autoAllocateChunkSize: chunkSize, 108 | async pull(controller) { 109 | const byob = (controller as ReadableByteStreamController).byobRequest 110 | if (!byob) { 111 | throw new Error('byobRequest is required') 112 | } 113 | const v = byob.view! 114 | const w = new Uint8Array(v.buffer, v.byteOffset, v.byteLength) 115 | 116 | while (buffer.byteLength < chunkSize) { 117 | const { done, value } = await reader.read() 118 | 119 | if (done) { 120 | if (buffer.byteLength > 0) { 121 | w.set(buffer) 122 | byob.respond(buffer.byteLength) 123 | } 124 | 125 | reader.releaseLock() 126 | controller.close() 127 | fh?.close() 128 | return 129 | } 130 | 131 | const val = new Uint8Array(value) 132 | const newBuffer = new Uint8Array(buffer.byteLength + val.byteLength) 133 | newBuffer.set(buffer) 134 | newBuffer.set(val, buffer.byteLength) 135 | buffer = newBuffer 136 | } 137 | 138 | w.set(buffer.slice(0, w.byteLength)) 139 | buffer = buffer.slice(w.byteLength) 140 | byob.respond(w.byteLength) 141 | }, 142 | cancel(_reason) { 143 | reader.releaseLock() 144 | fh?.close() 145 | } 146 | }) 147 | } 148 | 149 | export function uint8ArrayToFixedChunkSizeReadable( 150 | chunkSize: number, 151 | data: Uint8Array 152 | ) { 153 | let offset = 0 154 | 155 | return new ReadableStream({ 156 | type: 'bytes', 157 | autoAllocateChunkSize: chunkSize, 158 | pull(controller) { 159 | const byob = (controller as ReadableByteStreamController).byobRequest 160 | if (!byob) { 161 | throw new Error('byobRequest is required') 162 | } 163 | 164 | const v = byob.view! 165 | const w = new Uint8Array(v.buffer, v.byteOffset, v.byteLength) 166 | const bytesToRead = Math.min(w.byteLength, data.byteLength - offset) 167 | w.set(data.subarray(offset, offset + bytesToRead)) 168 | offset += bytesToRead 169 | 170 | if (bytesToRead === 0) { 171 | controller.close() 172 | } else { 173 | byob.respond(bytesToRead) 174 | } 175 | } 176 | }) 177 | } 178 | 179 | export async function readAll( 180 | stream: ReadableStream, 181 | size: number 182 | ): Promise { 183 | const data = new Uint8Array(size) 184 | let offset = 0 185 | for await (const value of readableStreamAsyncIterator(stream)) { 186 | const chunk = new Uint8Array(value) 187 | if (offset + chunk.byteLength <= size) { 188 | data.set(chunk, offset) 189 | offset += chunk.byteLength 190 | } else { 191 | offset += chunk.byteLength 192 | break 193 | } 194 | } 195 | 196 | if (offset != size) { 197 | throw new Error( 198 | `failed to read all data, expected ${size} bytes but got ${offset}` 199 | ) 200 | } 201 | 202 | return data 203 | } 204 | -------------------------------------------------------------------------------- /src/ic_oss/src/cluster.rs: -------------------------------------------------------------------------------- 1 | use candid::{Nat, Principal}; 2 | use ic_agent::Agent; 3 | use ic_oss_types::{cluster::*, cose::Token}; 4 | use serde_bytes::{ByteArray, ByteBuf}; 5 | use std::{ 6 | collections::{BTreeMap, BTreeSet}, 7 | sync::Arc, 8 | }; 9 | 10 | use crate::agent::{query_call, update_call}; 11 | 12 | #[derive(Clone)] 13 | pub struct Client { 14 | agent: Arc, 15 | cluster: Principal, 16 | } 17 | 18 | impl Client { 19 | pub fn new(agent: Arc, cluster: Principal) -> Client { 20 | Client { agent, cluster } 21 | } 22 | 23 | /// the caller of agent should be canister controller 24 | pub async fn admin_set_managers(&self, args: BTreeSet) -> Result<(), String> { 25 | update_call(&self.agent, &self.cluster, "admin_set_managers", (args,)).await? 26 | } 27 | 28 | /// the caller of agent should be canister manager 29 | pub async fn admin_sign_access_token(&self, args: Token) -> Result { 30 | update_call( 31 | &self.agent, 32 | &self.cluster, 33 | "admin_sign_access_token", 34 | (args,), 35 | ) 36 | .await? 37 | } 38 | 39 | pub async fn admin_ed25519_access_token(&self, args: Token) -> Result { 40 | update_call( 41 | &self.agent, 42 | &self.cluster, 43 | "admin_ed25519_access_token", 44 | (args,), 45 | ) 46 | .await? 47 | } 48 | 49 | pub async fn admin_weak_access_token( 50 | &self, 51 | args: Token, 52 | now_sec: u64, 53 | expiration_sec: u64, 54 | ) -> Result { 55 | query_call( 56 | &self.agent, 57 | &self.cluster, 58 | "admin_weak_access_token", 59 | (args, now_sec, expiration_sec), 60 | ) 61 | .await? 62 | } 63 | 64 | /// the caller of agent should be canister manager 65 | pub async fn admin_attach_policies(&self, args: Token) -> Result<(), String> { 66 | update_call(&self.agent, &self.cluster, "admin_attach_policies", (args,)).await? 67 | } 68 | 69 | /// the caller of agent should be canister manager 70 | pub async fn admin_detach_policies(&self, args: Token) -> Result<(), String> { 71 | update_call(&self.agent, &self.cluster, "admin_detach_policies", (args,)).await? 72 | } 73 | 74 | pub async fn access_token(&self, audience: Principal) -> Result { 75 | update_call(&self.agent, &self.cluster, "access_token", (audience,)).await? 76 | } 77 | 78 | pub async fn ed25519_access_token(&self, audience: Principal) -> Result { 79 | update_call( 80 | &self.agent, 81 | &self.cluster, 82 | "ed25519_access_token", 83 | (audience,), 84 | ) 85 | .await? 86 | } 87 | 88 | pub async fn get_cluster_info(&self) -> Result { 89 | query_call(&self.agent, &self.cluster, "get_cluster_info", ()).await? 90 | } 91 | 92 | pub async fn get_bucket_wasm(&self, hash: ByteArray<32>) -> Result { 93 | query_call(&self.agent, &self.cluster, "get_bucket_wasm", (hash,)).await? 94 | } 95 | 96 | pub async fn get_buckets(&self) -> Result, String> { 97 | query_call(&self.agent, &self.cluster, "get_buckets", ()).await? 98 | } 99 | 100 | pub async fn get_deployed_buckets(&self) -> Result, String> { 101 | query_call(&self.agent, &self.cluster, "get_deployed_buckets", ()).await? 102 | } 103 | 104 | pub async fn bucket_deployment_logs( 105 | &self, 106 | prev: Option, 107 | take: Option, 108 | ) -> Result, String> { 109 | query_call( 110 | &self.agent, 111 | &self.cluster, 112 | "bucket_deployment_logs", 113 | (prev, take), 114 | ) 115 | .await? 116 | } 117 | 118 | pub async fn get_subject_policies( 119 | &self, 120 | subject: Principal, 121 | ) -> Result, String> { 122 | query_call( 123 | &self.agent, 124 | &self.cluster, 125 | "get_subject_policies", 126 | (subject,), 127 | ) 128 | .await? 129 | } 130 | 131 | pub async fn get_subject_policies_for( 132 | &self, 133 | subject: Principal, 134 | audience: Principal, 135 | ) -> Result { 136 | query_call( 137 | &self.agent, 138 | &self.cluster, 139 | "get_subject_policies_for", 140 | (subject, audience), 141 | ) 142 | .await? 143 | } 144 | 145 | pub async fn admin_add_wasm( 146 | &self, 147 | args: AddWasmInput, 148 | force_prev_hash: Option>, 149 | ) -> Result<(), String> { 150 | update_call( 151 | &self.agent, 152 | &self.cluster, 153 | "admin_add_wasm", 154 | (args, force_prev_hash), 155 | ) 156 | .await? 157 | } 158 | 159 | pub async fn admin_deploy_bucket( 160 | &self, 161 | args: DeployWasmInput, 162 | ignore_prev_hash: Option>, 163 | ) -> Result<(), String> { 164 | update_call( 165 | &self.agent, 166 | &self.cluster, 167 | "admin_deploy_bucket", 168 | (args, ignore_prev_hash), 169 | ) 170 | .await? 171 | } 172 | 173 | pub async fn admin_upgrade_all_buckets(&self, args: Option) -> Result<(), String> { 174 | update_call( 175 | &self.agent, 176 | &self.cluster, 177 | "admin_upgrade_all_buckets", 178 | (args,), 179 | ) 180 | .await? 181 | } 182 | 183 | pub async fn admin_batch_call_buckets( 184 | &self, 185 | buckets: BTreeSet, 186 | method: String, 187 | args: Option, 188 | ) -> Result, String> { 189 | update_call( 190 | &self.agent, 191 | &self.cluster, 192 | "admin_batch_call_buckets", 193 | (buckets, method, args), 194 | ) 195 | .await? 196 | } 197 | 198 | pub async fn admin_topup_all_buckets(&self) -> Result { 199 | update_call(&self.agent, &self.cluster, "admin_topup_all_buckets", ()).await? 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /src/declarations/ic_object_store_canister/ic_object_store_canister.did.d.ts: -------------------------------------------------------------------------------- 1 | import type { Principal } from '@dfinity/principal'; 2 | import type { ActorMethod } from '@dfinity/agent'; 3 | import type { IDL } from '@dfinity/candid'; 4 | 5 | export type Attribute = { 'ContentType' : null } | 6 | { 'Metadata' : string } | 7 | { 'ContentEncoding' : null } | 8 | { 'ContentLanguage' : null } | 9 | { 'CacheControl' : null } | 10 | { 'ContentDisposition' : null }; 11 | export type Error = { 'NotModified' : { 'path' : string, 'error' : string } } | 12 | { 'UnknownConfigurationKey' : { 'key' : string } } | 13 | { 'NotFound' : { 'path' : string } } | 14 | { 'PermissionDenied' : { 'path' : string, 'error' : string } } | 15 | { 'Generic' : { 'error' : string } } | 16 | { 'AlreadyExists' : { 'path' : string } } | 17 | { 'InvalidPath' : { 'path' : string } } | 18 | { 'NotSupported' : { 'error' : string } } | 19 | { 'Precondition' : { 'path' : string, 'error' : string } } | 20 | { 'NotImplemented' : null } | 21 | { 'Unauthenticated' : { 'path' : string, 'error' : string } }; 22 | export interface GetOptions { 23 | 'if_match' : [] | [string], 24 | 'if_unmodified_since' : [] | [bigint], 25 | 'head' : boolean, 26 | 'if_modified_since' : [] | [bigint], 27 | 'version' : [] | [string], 28 | 'if_none_match' : [] | [string], 29 | 'range' : [] | [GetRange], 30 | } 31 | export type GetRange = { 'Offset' : bigint } | 32 | { 'Bounded' : [bigint, bigint] } | 33 | { 'Suffix' : bigint }; 34 | export interface GetResult { 35 | 'meta' : ObjectMeta, 36 | 'attributes' : Array<[Attribute, string]>, 37 | 'range' : [bigint, bigint], 38 | 'payload' : Uint8Array | number[], 39 | } 40 | export interface InitArgs { 41 | 'governance_canister' : [] | [Principal], 42 | 'name' : string, 43 | } 44 | export type InstallArgs = { 'Upgrade' : UpgradeArgs } | 45 | { 'Init' : InitArgs }; 46 | export interface ListResult { 47 | 'common_prefixes' : Array, 48 | 'objects' : Array, 49 | } 50 | export interface ObjectMeta { 51 | 'aes_tags' : [] | [Array], 52 | 'size' : bigint, 53 | 'e_tag' : [] | [string], 54 | 'version' : [] | [string], 55 | 'last_modified' : bigint, 56 | 'aes_nonce' : [] | [Uint8Array | number[]], 57 | 'location' : string, 58 | } 59 | export interface PartId { 'content_id' : string } 60 | export type PutMode = { 'Overwrite' : null } | 61 | { 'Create' : null } | 62 | { 'Update' : UpdateVersion }; 63 | export interface PutMultipartOptions { 64 | 'aes_tags' : [] | [Array], 65 | 'tags' : string, 66 | 'attributes' : Array<[Attribute, string]>, 67 | 'aes_nonce' : [] | [Uint8Array | number[]], 68 | } 69 | export interface PutOptions { 70 | 'aes_tags' : [] | [Array], 71 | 'mode' : PutMode, 72 | 'tags' : string, 73 | 'attributes' : Array<[Attribute, string]>, 74 | 'aes_nonce' : [] | [Uint8Array | number[]], 75 | } 76 | export type Result = { 'Ok' : null } | 77 | { 'Err' : Error }; 78 | export type Result_1 = { 'Ok' : null } | 79 | { 'Err' : string }; 80 | export type Result_10 = { 'Ok' : Array } | 81 | { 'Err' : Error }; 82 | export type Result_11 = { 'Ok' : ListResult } | 83 | { 'Err' : Error }; 84 | export type Result_12 = { 'Ok' : PartId } | 85 | { 'Err' : Error }; 86 | export type Result_13 = { 'Ok' : string } | 87 | { 'Err' : string }; 88 | export type Result_2 = { 'Ok' : UpdateVersion } | 89 | { 'Err' : Error }; 90 | export type Result_3 = { 'Ok' : string } | 91 | { 'Err' : Error }; 92 | export type Result_4 = { 'Ok' : GetResult } | 93 | { 'Err' : Error }; 94 | export type Result_5 = { 'Ok' : Uint8Array | number[] } | 95 | { 'Err' : Error }; 96 | export type Result_6 = { 'Ok' : Array } | 97 | { 'Err' : Error }; 98 | export type Result_7 = { 'Ok' : StateInfo } | 99 | { 'Err' : string }; 100 | export type Result_8 = { 'Ok' : ObjectMeta } | 101 | { 'Err' : Error }; 102 | export type Result_9 = { 'Ok' : boolean } | 103 | { 'Err' : string }; 104 | export interface StateInfo { 105 | 'next_etag' : bigint, 106 | 'managers' : Array, 107 | 'governance_canister' : [] | [Principal], 108 | 'name' : string, 109 | 'auditors' : Array, 110 | 'objects' : bigint, 111 | } 112 | export interface UpdateVersion { 113 | 'e_tag' : [] | [string], 114 | 'version' : [] | [string], 115 | } 116 | export interface UpgradeArgs { 117 | 'governance_canister' : [] | [Principal], 118 | 'name' : [] | [string], 119 | } 120 | export interface _SERVICE { 121 | 'abort_multipart' : ActorMethod<[string, string], Result>, 122 | 'admin_add_auditors' : ActorMethod<[Array], Result_1>, 123 | 'admin_add_managers' : ActorMethod<[Array], Result_1>, 124 | 'admin_clear' : ActorMethod<[], Result_1>, 125 | 'admin_remove_auditors' : ActorMethod<[Array], Result_1>, 126 | 'admin_remove_managers' : ActorMethod<[Array], Result_1>, 127 | 'complete_multipart' : ActorMethod< 128 | [string, string, PutMultipartOptions], 129 | Result_2 130 | >, 131 | 'copy' : ActorMethod<[string, string], Result>, 132 | 'copy_if_not_exists' : ActorMethod<[string, string], Result>, 133 | 'create_multipart' : ActorMethod<[string], Result_3>, 134 | 'delete' : ActorMethod<[string], Result>, 135 | 'get_opts' : ActorMethod<[string, GetOptions], Result_4>, 136 | 'get_part' : ActorMethod<[string, bigint], Result_5>, 137 | 'get_ranges' : ActorMethod<[string, Array<[bigint, bigint]>], Result_6>, 138 | 'get_state' : ActorMethod<[], Result_7>, 139 | 'head' : ActorMethod<[string], Result_8>, 140 | 'is_member' : ActorMethod<[string, Principal], Result_9>, 141 | 'list' : ActorMethod<[[] | [string]], Result_10>, 142 | 'list_with_delimiter' : ActorMethod<[[] | [string]], Result_11>, 143 | 'list_with_offset' : ActorMethod<[[] | [string], string], Result_10>, 144 | 'put_opts' : ActorMethod< 145 | [string, Uint8Array | number[], PutOptions], 146 | Result_2 147 | >, 148 | 'put_part' : ActorMethod< 149 | [string, string, bigint, Uint8Array | number[]], 150 | Result_12 151 | >, 152 | 'rename' : ActorMethod<[string, string], Result>, 153 | 'rename_if_not_exists' : ActorMethod<[string, string], Result>, 154 | 'validate_admin_add_auditors' : ActorMethod<[Array], Result_13>, 155 | 'validate_admin_add_managers' : ActorMethod<[Array], Result_13>, 156 | 'validate_admin_clear' : ActorMethod<[], Result_13>, 157 | 'validate_admin_remove_auditors' : ActorMethod<[Array], Result_13>, 158 | 'validate_admin_remove_managers' : ActorMethod<[Array], Result_13>, 159 | } 160 | export declare const idlFactory: IDL.InterfaceFactory; 161 | export declare const init: (args: { IDL: typeof IDL }) => IDL.Type[]; 162 | -------------------------------------------------------------------------------- /src/ic_oss_ts/candid/ic_object_store_canister/ic_object_store_canister.did.d.ts: -------------------------------------------------------------------------------- 1 | import type { Principal } from '@dfinity/principal'; 2 | import type { ActorMethod } from '@dfinity/agent'; 3 | import type { IDL } from '@dfinity/candid'; 4 | 5 | export type Attribute = { 'ContentType' : null } | 6 | { 'Metadata' : string } | 7 | { 'ContentEncoding' : null } | 8 | { 'ContentLanguage' : null } | 9 | { 'CacheControl' : null } | 10 | { 'ContentDisposition' : null }; 11 | export type Error = { 'NotModified' : { 'path' : string, 'error' : string } } | 12 | { 'UnknownConfigurationKey' : { 'key' : string } } | 13 | { 'NotFound' : { 'path' : string } } | 14 | { 'PermissionDenied' : { 'path' : string, 'error' : string } } | 15 | { 'Generic' : { 'error' : string } } | 16 | { 'AlreadyExists' : { 'path' : string } } | 17 | { 'InvalidPath' : { 'path' : string } } | 18 | { 'NotSupported' : { 'error' : string } } | 19 | { 'Precondition' : { 'path' : string, 'error' : string } } | 20 | { 'NotImplemented' : null } | 21 | { 'Unauthenticated' : { 'path' : string, 'error' : string } }; 22 | export interface GetOptions { 23 | 'if_match' : [] | [string], 24 | 'if_unmodified_since' : [] | [bigint], 25 | 'head' : boolean, 26 | 'if_modified_since' : [] | [bigint], 27 | 'version' : [] | [string], 28 | 'if_none_match' : [] | [string], 29 | 'range' : [] | [GetRange], 30 | } 31 | export type GetRange = { 'Offset' : bigint } | 32 | { 'Bounded' : [bigint, bigint] } | 33 | { 'Suffix' : bigint }; 34 | export interface GetResult { 35 | 'meta' : ObjectMeta, 36 | 'attributes' : Array<[Attribute, string]>, 37 | 'range' : [bigint, bigint], 38 | 'payload' : Uint8Array | number[], 39 | } 40 | export interface InitArgs { 41 | 'governance_canister' : [] | [Principal], 42 | 'name' : string, 43 | } 44 | export type InstallArgs = { 'Upgrade' : UpgradeArgs } | 45 | { 'Init' : InitArgs }; 46 | export interface ListResult { 47 | 'common_prefixes' : Array, 48 | 'objects' : Array, 49 | } 50 | export interface ObjectMeta { 51 | 'aes_tags' : [] | [Array], 52 | 'size' : bigint, 53 | 'e_tag' : [] | [string], 54 | 'version' : [] | [string], 55 | 'last_modified' : bigint, 56 | 'aes_nonce' : [] | [Uint8Array | number[]], 57 | 'location' : string, 58 | } 59 | export interface PartId { 'content_id' : string } 60 | export type PutMode = { 'Overwrite' : null } | 61 | { 'Create' : null } | 62 | { 'Update' : UpdateVersion }; 63 | export interface PutMultipartOptions { 64 | 'aes_tags' : [] | [Array], 65 | 'tags' : string, 66 | 'attributes' : Array<[Attribute, string]>, 67 | 'aes_nonce' : [] | [Uint8Array | number[]], 68 | } 69 | export interface PutOptions { 70 | 'aes_tags' : [] | [Array], 71 | 'mode' : PutMode, 72 | 'tags' : string, 73 | 'attributes' : Array<[Attribute, string]>, 74 | 'aes_nonce' : [] | [Uint8Array | number[]], 75 | } 76 | export type Result = { 'Ok' : null } | 77 | { 'Err' : Error }; 78 | export type Result_1 = { 'Ok' : null } | 79 | { 'Err' : string }; 80 | export type Result_10 = { 'Ok' : Array } | 81 | { 'Err' : Error }; 82 | export type Result_11 = { 'Ok' : ListResult } | 83 | { 'Err' : Error }; 84 | export type Result_12 = { 'Ok' : PartId } | 85 | { 'Err' : Error }; 86 | export type Result_13 = { 'Ok' : string } | 87 | { 'Err' : string }; 88 | export type Result_2 = { 'Ok' : UpdateVersion } | 89 | { 'Err' : Error }; 90 | export type Result_3 = { 'Ok' : string } | 91 | { 'Err' : Error }; 92 | export type Result_4 = { 'Ok' : GetResult } | 93 | { 'Err' : Error }; 94 | export type Result_5 = { 'Ok' : Uint8Array | number[] } | 95 | { 'Err' : Error }; 96 | export type Result_6 = { 'Ok' : Array } | 97 | { 'Err' : Error }; 98 | export type Result_7 = { 'Ok' : StateInfo } | 99 | { 'Err' : string }; 100 | export type Result_8 = { 'Ok' : ObjectMeta } | 101 | { 'Err' : Error }; 102 | export type Result_9 = { 'Ok' : boolean } | 103 | { 'Err' : string }; 104 | export interface StateInfo { 105 | 'next_etag' : bigint, 106 | 'managers' : Array, 107 | 'governance_canister' : [] | [Principal], 108 | 'name' : string, 109 | 'auditors' : Array, 110 | 'objects' : bigint, 111 | } 112 | export interface UpdateVersion { 113 | 'e_tag' : [] | [string], 114 | 'version' : [] | [string], 115 | } 116 | export interface UpgradeArgs { 117 | 'governance_canister' : [] | [Principal], 118 | 'name' : [] | [string], 119 | } 120 | export interface _SERVICE { 121 | 'abort_multipart' : ActorMethod<[string, string], Result>, 122 | 'admin_add_auditors' : ActorMethod<[Array], Result_1>, 123 | 'admin_add_managers' : ActorMethod<[Array], Result_1>, 124 | 'admin_clear' : ActorMethod<[], Result_1>, 125 | 'admin_remove_auditors' : ActorMethod<[Array], Result_1>, 126 | 'admin_remove_managers' : ActorMethod<[Array], Result_1>, 127 | 'complete_multipart' : ActorMethod< 128 | [string, string, PutMultipartOptions], 129 | Result_2 130 | >, 131 | 'copy' : ActorMethod<[string, string], Result>, 132 | 'copy_if_not_exists' : ActorMethod<[string, string], Result>, 133 | 'create_multipart' : ActorMethod<[string], Result_3>, 134 | 'delete' : ActorMethod<[string], Result>, 135 | 'get_opts' : ActorMethod<[string, GetOptions], Result_4>, 136 | 'get_part' : ActorMethod<[string, bigint], Result_5>, 137 | 'get_ranges' : ActorMethod<[string, Array<[bigint, bigint]>], Result_6>, 138 | 'get_state' : ActorMethod<[], Result_7>, 139 | 'head' : ActorMethod<[string], Result_8>, 140 | 'is_member' : ActorMethod<[string, Principal], Result_9>, 141 | 'list' : ActorMethod<[[] | [string]], Result_10>, 142 | 'list_with_delimiter' : ActorMethod<[[] | [string]], Result_11>, 143 | 'list_with_offset' : ActorMethod<[[] | [string], string], Result_10>, 144 | 'put_opts' : ActorMethod< 145 | [string, Uint8Array | number[], PutOptions], 146 | Result_2 147 | >, 148 | 'put_part' : ActorMethod< 149 | [string, string, bigint, Uint8Array | number[]], 150 | Result_12 151 | >, 152 | 'rename' : ActorMethod<[string, string], Result>, 153 | 'rename_if_not_exists' : ActorMethod<[string, string], Result>, 154 | 'validate_admin_add_auditors' : ActorMethod<[Array], Result_13>, 155 | 'validate_admin_add_managers' : ActorMethod<[Array], Result_13>, 156 | 'validate_admin_clear' : ActorMethod<[], Result_13>, 157 | 'validate_admin_remove_auditors' : ActorMethod<[Array], Result_13>, 158 | 'validate_admin_remove_managers' : ActorMethod<[Array], Result_13>, 159 | } 160 | export declare const idlFactory: IDL.InterfaceFactory; 161 | export declare const init: (args: { IDL: typeof IDL }) => IDL.Type[]; 162 | --------------------------------------------------------------------------------