├── replit.nix ├── rustfmt.toml ├── npm ├── darwin-x64 │ ├── README.md │ └── package.json ├── darwin-arm64 │ ├── README.md │ └── package.json └── linux-x64-gnu │ ├── README.md │ └── package.json ├── vitest.config.ts ├── .npmignore ├── .envrc ├── .prettierrc ├── tsconfig.json ├── .replit ├── flake.lock ├── flake.nix ├── tsup.config.ts ├── Cargo.toml ├── README.md ├── package.json ├── index.d.ts ├── syntheticEof.ts ├── .gitignore ├── tests ├── syntheticEOF.test.ts └── index.test.ts ├── .github └── workflows │ └── CI.yml ├── wrapper.ts ├── index.js └── src ├── lib.rs └── sandbox.rs /replit.nix: -------------------------------------------------------------------------------- 1 | { pkgs }: { 2 | deps = []; 3 | } -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | tab_spaces = 2 2 | edition = "2021" 3 | -------------------------------------------------------------------------------- /npm/darwin-x64/README.md: -------------------------------------------------------------------------------- 1 | # `@replit/ruspty-darwin-x64` 2 | 3 | This is the **x86_64-apple-darwin** binary for `@replit/ruspty` 4 | -------------------------------------------------------------------------------- /npm/darwin-arm64/README.md: -------------------------------------------------------------------------------- 1 | # `@replit/ruspty-darwin-arm64` 2 | 3 | This is the **aarch64-apple-darwin** binary for `@replit/ruspty` 4 | -------------------------------------------------------------------------------- /npm/linux-x64-gnu/README.md: -------------------------------------------------------------------------------- 1 | # `@replit/ruspty-linux-x64-gnu` 2 | 3 | This is the **x86_64-unknown-linux-gnu** binary for `@replit/ruspty` 4 | -------------------------------------------------------------------------------- /vitest.config.ts: -------------------------------------------------------------------------------- 1 | export default { 2 | test: { 3 | exclude: ['node_modules', 'dist', '.direnv'], 4 | fileParallelism: false, 5 | pool: 'forks', 6 | }, 7 | }; 8 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | .cargo 4 | .github 5 | npm 6 | .eslintrc 7 | .prettierignore 8 | rustfmt.toml 9 | yarn.lock 10 | *.node 11 | .yarn 12 | __test__ 13 | renovate.json 14 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then 2 | source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" 3 | fi 4 | use flake 5 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "quoteProps": "as-needed", 3 | "trailingComma": "all", 4 | "tabWidth": 2, 5 | "semi": true, 6 | "singleQuote": true, 7 | "bracketSpacing": true, 8 | "useTabs": false, 9 | "arrowParens": "always" 10 | } 11 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/tsconfig", 3 | "_version": "20.1.0", 4 | "compilerOptions": { 5 | "lib": [ 6 | "es2023" 7 | ], 8 | "module": "node16", 9 | "target": "es2022", 10 | "strict": true, 11 | "esModuleInterop": true, 12 | "skipLibCheck": true, 13 | "moduleResolution": "node16" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /.replit: -------------------------------------------------------------------------------- 1 | run = "npm run build && npm test" 2 | modules = ["rust-stable", "nodejs-20", "nix"] 3 | 4 | disableGuessImports = true 5 | disableInstallBeforeRun = true 6 | 7 | [nix] 8 | channel = "stable-23_11" 9 | 10 | [rules] 11 | 12 | [rules.formatter] 13 | 14 | [rules.formatter.fileExtensions] 15 | 16 | [rules.formatter.fileExtensions.".ts"] 17 | id = "module:nodejs-20:v24-20240117-0bd73cd/formatter:prettier" 18 | -------------------------------------------------------------------------------- /npm/darwin-x64/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@replit/ruspty-darwin-x64", 3 | "version": "3.6.0", 4 | "os": [ 5 | "darwin" 6 | ], 7 | "cpu": [ 8 | "x64" 9 | ], 10 | "main": "ruspty.darwin-x64.node", 11 | "files": [ 12 | "ruspty.darwin-x64.node" 13 | ], 14 | "license": "MIT", 15 | "engines": { 16 | "node": ">= 10" 17 | }, 18 | "repository": { 19 | "type": "git", 20 | "url": "git+https://github.com/replit/ruspty.git" 21 | } 22 | } -------------------------------------------------------------------------------- /npm/darwin-arm64/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@replit/ruspty-darwin-arm64", 3 | "version": "3.6.0", 4 | "os": [ 5 | "darwin" 6 | ], 7 | "cpu": [ 8 | "arm64" 9 | ], 10 | "main": "ruspty.darwin-arm64.node", 11 | "files": [ 12 | "ruspty.darwin-arm64.node" 13 | ], 14 | "license": "MIT", 15 | "engines": { 16 | "node": ">= 10" 17 | }, 18 | "repository": { 19 | "type": "git", 20 | "url": "git+https://github.com/replit/ruspty.git" 21 | } 22 | } -------------------------------------------------------------------------------- /npm/linux-x64-gnu/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@replit/ruspty-linux-x64-gnu", 3 | "version": "3.6.0", 4 | "os": [ 5 | "linux" 6 | ], 7 | "cpu": [ 8 | "x64" 9 | ], 10 | "main": "ruspty.linux-x64-gnu.node", 11 | "files": [ 12 | "ruspty.linux-x64-gnu.node" 13 | ], 14 | "license": "MIT", 15 | "engines": { 16 | "node": ">= 10" 17 | }, 18 | "libc": [ 19 | "glibc" 20 | ], 21 | "repository": { 22 | "type": "git", 23 | "url": "git+https://github.com/replit/ruspty.git" 24 | } 25 | } -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "nixpkgs": { 4 | "locked": { 5 | "lastModified": 1743576891, 6 | "narHash": "sha256-vXiKURtntURybE6FMNFAVpRPr8+e8KoLPrYs9TGuAKc=", 7 | "owner": "NixOS", 8 | "repo": "nixpkgs", 9 | "rev": "44a69ed688786e98a101f02b712c313f1ade37ab", 10 | "type": "github" 11 | }, 12 | "original": { 13 | "owner": "NixOS", 14 | "ref": "nixos-24.11", 15 | "repo": "nixpkgs", 16 | "type": "github" 17 | } 18 | }, 19 | "root": { 20 | "inputs": { 21 | "nixpkgs": "nixpkgs" 22 | } 23 | } 24 | }, 25 | "root": "root", 26 | "version": 7 27 | } 28 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11"; 3 | 4 | outputs = { self, nixpkgs }: 5 | let 6 | mkDevShell = system: 7 | let 8 | pkgs = nixpkgs.legacyPackages.${system}; 9 | in 10 | pkgs.mkShell { 11 | buildInputs = with pkgs; [ 12 | nodejs_20 13 | bun 14 | cargo 15 | clippy 16 | libiconv 17 | rustc 18 | rustfmt 19 | ]; 20 | }; 21 | in 22 | { 23 | devShells.aarch64-darwin.default = mkDevShell "aarch64-darwin"; 24 | devShells.x86_64-darwin.default = mkDevShell "x86_64-darwin"; 25 | devShells.aarch64-linux.default = mkDevShell "aarch64-linux"; 26 | devShells.x86_64-linux.default = mkDevShell "x86_64-linux"; 27 | }; 28 | } 29 | -------------------------------------------------------------------------------- /tsup.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'tsup'; 2 | import { platformArchTriples } from '@napi-rs/triples'; 3 | 4 | const triples: string[] = []; 5 | for (const platform in platformArchTriples) { 6 | for (const arch in platformArchTriples[platform]) { 7 | for (const triple of platformArchTriples[platform][arch]) { 8 | triples.push(triple.platformArchABI); 9 | } 10 | } 11 | } 12 | 13 | // they somehow forgot these 14 | triples.push('darwin-universal'); 15 | triples.push('linux-riscv64-musl'); 16 | 17 | export default defineConfig({ 18 | entry: ['wrapper.ts'], 19 | format: ['cjs'], 20 | splitting: false, 21 | dts: true, 22 | sourcemap: true, 23 | clean: true, 24 | external: triples.map((triple) => `./ruspty.${triple}.node`), 25 | }); 26 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2021" 3 | name = "replit_ruspty" 4 | version = "1.0.0" 5 | 6 | [lib] 7 | crate-type = ["cdylib"] 8 | 9 | [dependencies] 10 | backoff = "0.4.0" 11 | libc = "0.2.152" 12 | # Default enable napi4 feature, see https://nodejs.org/api/n-api.html#node-api-version-matrix 13 | napi = { version = "2.12.2", default-features = false, features = ["napi4"] } 14 | napi-derive = "2.12.2" 15 | nix = { version = "0.29.0", features = ["fs", "term", "poll", "signal", "ptrace"] } 16 | 17 | [target.'cfg(target_os = "linux")'.dependencies] 18 | anyhow = "1.0.94" 19 | log = { version = "0.4.22", features = ["kv"] } 20 | syscalls = "0.6.18" 21 | 22 | [dev-dependencies] 23 | tempfile = "3.14.0" 24 | env_logger = { version = "0.11.8", features = ["kv"] } 25 | 26 | [build-dependencies] 27 | napi-build = "2.0.1" 28 | 29 | [profile.release] 30 | lto = true 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `@replit/ruspty` - PTY for JavaScript through Rust FFI 2 | 3 | A very thin wrapper around PTYs and processes. 4 | 5 | ```ts 6 | const { Pty } = require('@replit/ruspty'); 7 | 8 | const pty = new Pty({ 9 | command: '/bin/sh', 10 | args: [], 11 | envs: {}, 12 | size: { rows: 24, cols: 80 }, 13 | onExit: (...result) => { 14 | // TODO: Handle process exit. 15 | }, 16 | }); 17 | 18 | const read = pty.read; 19 | const write = pty.write; 20 | 21 | read.on('data', (chunk) => { 22 | // TODO: Handle data. 23 | }); 24 | write.write('echo hello\n'); 25 | ``` 26 | 27 | ## Local Development 28 | 29 | - `npm install` 30 | - `npm run build` 31 | - `npm run test` 32 | 33 | ## Publishing 34 | 35 | Following ["Publish It" section from `napi-rs` docs](https://napi.rs/docs/introduction/simple-package#publish-it): 36 | 37 | 1. `git clean -f && npm install && npm run build` 38 | 2. `npm version [major|minor|patch]` 39 | 3. Send that as a Pull Request to GitHub. Ensure that the commit message consisting **only** of `x.y.z` - this is how the CI decides to publish to `npm`! 40 | 41 | `NPM_TOKEN` is part of the repo secrets, generated [like this](https://httptoolkit.com/blog/automatic-npm-publish-gha/). 42 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@replit/ruspty", 3 | "version": "3.6.0", 4 | "main": "dist/wrapper.js", 5 | "types": "dist/wrapper.d.ts", 6 | "author": "Szymon Kaliski ", 7 | "repository": { 8 | "type": "git", 9 | "url": "git+https://github.com/replit/ruspty.git" 10 | }, 11 | "homepage": "https://github.com/replit/ruspty#readme", 12 | "bugs": { 13 | "url": "https://github.com/replit/ruspty/issues" 14 | }, 15 | "napi": { 16 | "name": "ruspty", 17 | "triples": { 18 | "defaults": false, 19 | "additional": [ 20 | "x86_64-apple-darwin", 21 | "aarch64-apple-darwin", 22 | "x86_64-unknown-linux-gnu" 23 | ] 24 | } 25 | }, 26 | "license": "MIT", 27 | "devDependencies": { 28 | "@napi-rs/cli": "^2.18.4", 29 | "@napi-rs/triples": "^1.2.0", 30 | "@types/jest": "^29.5.11", 31 | "@types/node": "^20.14.2", 32 | "prettier": "^3.2.4", 33 | "tsup": "^8.3.5", 34 | "typescript": "^5.4.5", 35 | "vitest": "^1.6.1" 36 | }, 37 | "scripts": { 38 | "artifacts": "napi artifacts", 39 | "build": "napi build --platform --release && npm run build:wrapper && npm run format", 40 | "build:wrapper": "tsup", 41 | "prepublishOnly": "napi prepublish -t npm", 42 | "test": "vitest run", 43 | "test:ci": "vitest --reporter=verbose --reporter=github-actions --allowOnly run", 44 | "test:hang": "vitest run --reporter=hanging-process", 45 | "universal": "napi universal", 46 | "version": "napi version", 47 | "release": "npm publish --access public", 48 | "format": "npx prettier *.{js,ts} tests/*.ts --write" 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /index.d.ts: -------------------------------------------------------------------------------- 1 | /* tslint:disable */ 2 | /* eslint-disable */ 3 | 4 | /* auto-generated by NAPI-RS */ 5 | 6 | export const enum Operation { 7 | Modify = 'Modify', 8 | Delete = 'Delete', 9 | } 10 | /** 11 | * Sandboxing rules. Deleting / modifying a path with any of the prefixes is forbidden and will 12 | * cause process termination. 13 | */ 14 | export interface SandboxRule { 15 | /** The forbidden operation. */ 16 | operation: Operation; 17 | /** The list of prefixes that are matched by this rule. */ 18 | prefixes: Array; 19 | /** The list of prefixes that are excluded from this rule. */ 20 | excludePrefixes?: Array; 21 | /** The message to be shown if this rule triggers. */ 22 | message: string; 23 | } 24 | /** Options for the sandbox. */ 25 | export interface SandboxOptions { 26 | rules: Array; 27 | } 28 | /** The options that can be passed to the constructor of Pty. */ 29 | export interface PtyOptions { 30 | command: string; 31 | args?: Array; 32 | envs?: Record; 33 | dir?: string; 34 | size?: Size; 35 | cgroupPath?: string; 36 | apparmorProfile?: string; 37 | interactive?: boolean; 38 | sandbox?: SandboxOptions; 39 | onExit: (err: null | Error, exitCode: number) => void; 40 | } 41 | /** A size struct to pass to resize. */ 42 | export interface Size { 43 | cols: number; 44 | rows: number; 45 | } 46 | export const MAX_U16_VALUE: number; 47 | export const MIN_U16_VALUE: number; 48 | export declare function getSyntheticEofSequence(): Buffer; 49 | /** Resize the terminal. */ 50 | export declare function ptyResize(fd: number, size: Size): void; 51 | /** 52 | * Set the close-on-exec flag on a file descriptor. This is `fcntl(fd, F_SETFD, FD_CLOEXEC)` under 53 | * the covers. 54 | */ 55 | export declare function setCloseOnExec(fd: number, closeOnExec: boolean): void; 56 | /** 57 | * Get the close-on-exec flag on a file descriptor. This is `fcntl(fd, F_GETFD) & FD_CLOEXEC == 58 | *_CLOEXEC` under the covers. 59 | */ 60 | export declare function getCloseOnExec(fd: number): boolean; 61 | export declare class Pty { 62 | /** The pid of the forked process. */ 63 | pid: number; 64 | constructor(opts: PtyOptions); 65 | /** 66 | * Transfers ownership of the file descriptor for the PTY controller. This can only be called 67 | * once (it will error the second time). The caller is responsible for closing the file 68 | * descriptor. 69 | */ 70 | takeControllerFd(): c_int; 71 | /** 72 | * Closes the owned file descriptor for the PTY controller. The Nodejs side must call this 73 | * when it is done with the file descriptor to avoid leaking FDs. 74 | */ 75 | dropUserFd(): void; 76 | } 77 | -------------------------------------------------------------------------------- /syntheticEof.ts: -------------------------------------------------------------------------------- 1 | import { Transform } from 'node:stream'; 2 | import { getSyntheticEofSequence } from './index.js'; 3 | 4 | // keep in sync with lib.rs::SYNTHETIC_EOF 5 | export const SYNTHETIC_EOF = getSyntheticEofSequence(); 6 | export const EOF_EVENT = 'synthetic-eof'; 7 | 8 | // get the longest suffix of buffer that is a prefix of SYNTHETIC_EOF 9 | function getBufferEndPrefixLength(buffer: Buffer) { 10 | const maxLen = Math.min(buffer.length, SYNTHETIC_EOF.length); 11 | for (let len = maxLen; len > 0; len--) { 12 | let match = true; 13 | for (let i = 0; i < len; i++) { 14 | if (buffer[buffer.length - len + i] !== SYNTHETIC_EOF[i]) { 15 | match = false; 16 | break; 17 | } 18 | } 19 | 20 | if (match) { 21 | return len; 22 | } 23 | } 24 | 25 | return 0; 26 | } 27 | 28 | export class SyntheticEOFDetector extends Transform { 29 | buffer: Buffer; 30 | 31 | constructor(options = {}) { 32 | super(options); 33 | this.buffer = Buffer.alloc(0); 34 | } 35 | 36 | _transform(chunk: Buffer, _encoding: string, callback: () => void) { 37 | const searchData = Buffer.concat([this.buffer, chunk]); 38 | const eofIndex = searchData.indexOf(SYNTHETIC_EOF); 39 | 40 | if (eofIndex !== -1) { 41 | // found EOF - emit everything before it 42 | if (eofIndex > 0) { 43 | this.push(searchData.subarray(0, eofIndex)); 44 | } 45 | 46 | this.emit(EOF_EVENT); 47 | 48 | // emit everything after EOF (if any) and clear buffer 49 | const afterEOF = searchData.subarray(eofIndex + SYNTHETIC_EOF.length); 50 | if (afterEOF.length > 0) { 51 | this.push(afterEOF); 52 | } 53 | 54 | this.buffer = Buffer.alloc(0); 55 | } else { 56 | // no EOF - buffer potential partial match at end 57 | 58 | // get the longest suffix of buffer that is a prefix of SYNTHETIC_EOF 59 | // and emit everything before it 60 | // this is done for the case which the eof happened to be split across multiple chunks 61 | const commonPrefixLen = getBufferEndPrefixLength(searchData); 62 | 63 | if (commonPrefixLen > 0) { 64 | const emitSize = searchData.length - commonPrefixLen; 65 | if (emitSize > 0) { 66 | this.push(searchData.subarray(0, emitSize)); 67 | } 68 | this.buffer = searchData.subarray(emitSize); 69 | } else { 70 | this.push(searchData); 71 | this.buffer = Buffer.alloc(0); 72 | } 73 | } 74 | 75 | callback(); 76 | } 77 | 78 | _flush(callback: () => void) { 79 | if (this.buffer.length > 0) { 80 | this.push(this.buffer); 81 | } 82 | 83 | callback(); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.toptal.com/developers/gitignore/api/node 2 | # Edit at https://www.toptal.com/developers/gitignore?templates=node 3 | 4 | ### Node ### 5 | # Logs 6 | logs 7 | *.log 8 | npm-debug.log* 9 | yarn-debug.log* 10 | yarn-error.log* 11 | lerna-debug.log* 12 | 13 | # Diagnostic reports (https://nodejs.org/api/report.html) 14 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 15 | 16 | # Runtime data 17 | pids 18 | *.pid 19 | *.seed 20 | *.pid.lock 21 | 22 | # Directory for instrumented libs generated by jscoverage/JSCover 23 | lib-cov 24 | 25 | # Coverage directory used by tools like istanbul 26 | coverage 27 | *.lcov 28 | 29 | # nyc test coverage 30 | .nyc_output 31 | 32 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 33 | .grunt 34 | 35 | # Bower dependency directory (https://bower.io/) 36 | bower_components 37 | 38 | # node-waf configuration 39 | .lock-wscript 40 | 41 | # Compiled binary addons (https://nodejs.org/api/addons.html) 42 | build/Release 43 | 44 | # Dependency directories 45 | node_modules/ 46 | jspm_packages/ 47 | 48 | # TypeScript v1 declaration files 49 | typings/ 50 | 51 | # TypeScript cache 52 | *.tsbuildinfo 53 | 54 | # Optional npm cache directory 55 | .npm 56 | 57 | # Optional eslint cache 58 | .eslintcache 59 | 60 | # Microbundle cache 61 | .rpt2_cache/ 62 | .rts2_cache_cjs/ 63 | .rts2_cache_es/ 64 | .rts2_cache_umd/ 65 | 66 | # Optional REPL history 67 | .node_repl_history 68 | 69 | # Output of 'npm pack' 70 | *.tgz 71 | 72 | # Yarn Integrity file 73 | .yarn-integrity 74 | 75 | # dotenv environment variables file 76 | .env 77 | .env.test 78 | 79 | # parcel-bundler cache (https://parceljs.org/) 80 | .cache 81 | 82 | # Next.js build output 83 | .next 84 | 85 | # Nuxt.js build / generate output 86 | .nuxt 87 | dist 88 | 89 | # Gatsby files 90 | .cache/ 91 | # Comment in the public line in if your project uses Gatsby and not Next.js 92 | # https://nextjs.org/blog/next-9-1#public-directory-support 93 | # public 94 | 95 | # vuepress build output 96 | .vuepress/dist 97 | 98 | # Serverless directories 99 | .serverless/ 100 | 101 | # FuseBox cache 102 | .fusebox/ 103 | 104 | # DynamoDB Local files 105 | .dynamodb/ 106 | 107 | # TernJS port file 108 | .tern-port 109 | 110 | # Stores VSCode versions used for testing VSCode extensions 111 | .vscode-test 112 | 113 | # End of https://www.toptal.com/developers/gitignore/api/node 114 | 115 | # Created by https://www.toptal.com/developers/gitignore/api/macos 116 | # Edit at https://www.toptal.com/developers/gitignore?templates=macos 117 | 118 | ### macOS ### 119 | # General 120 | .DS_Store 121 | .AppleDouble 122 | .LSOverride 123 | 124 | # Icon must end with two 125 | Icon 126 | 127 | 128 | # Thumbnails 129 | ._* 130 | 131 | # Files that might appear in the root of a volume 132 | .DocumentRevisions-V100 133 | .fseventsd 134 | .Spotlight-V100 135 | .TemporaryItems 136 | .Trashes 137 | .VolumeIcon.icns 138 | .com.apple.timemachine.donotpresent 139 | 140 | # Directories potentially created on remote AFP share 141 | .AppleDB 142 | .AppleDesktop 143 | Network Trash Folder 144 | Temporary Items 145 | .apdisk 146 | 147 | ### macOS Patch ### 148 | # iCloud generated files 149 | *.icloud 150 | 151 | # End of https://www.toptal.com/developers/gitignore/api/macos 152 | 153 | # Created by https://www.toptal.com/developers/gitignore/api/windows 154 | # Edit at https://www.toptal.com/developers/gitignore?templates=windows 155 | 156 | ### Windows ### 157 | # Windows thumbnail cache files 158 | Thumbs.db 159 | Thumbs.db:encryptable 160 | ehthumbs.db 161 | ehthumbs_vista.db 162 | 163 | # Dump file 164 | *.stackdump 165 | 166 | # Folder config file 167 | [Dd]esktop.ini 168 | 169 | # Recycle Bin used on file shares 170 | $RECYCLE.BIN/ 171 | 172 | # Windows Installer files 173 | *.cab 174 | *.msi 175 | *.msix 176 | *.msm 177 | *.msp 178 | 179 | # Windows shortcuts 180 | *.lnk 181 | 182 | # End of https://www.toptal.com/developers/gitignore/api/windows 183 | 184 | #Added by cargo 185 | 186 | /target 187 | Cargo.lock 188 | .cargo/ 189 | 190 | .pnp.* 191 | .yarn/* 192 | !.yarn/patches 193 | !.yarn/plugins 194 | !.yarn/releases 195 | !.yarn/sdks 196 | !.yarn/versions 197 | 198 | *.node 199 | /.direnv 200 | -------------------------------------------------------------------------------- /tests/syntheticEOF.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, it, expect, vi, beforeEach } from 'vitest'; 2 | import { 3 | SyntheticEOFDetector, 4 | SYNTHETIC_EOF, 5 | EOF_EVENT, 6 | } from '../syntheticEof'; 7 | 8 | describe('sequence', () => { 9 | it('should have correct EOF sequence', () => { 10 | expect(SYNTHETIC_EOF).toEqual( 11 | Buffer.from([0x1b, 0x5d, 0x37, 0x38, 0x37, 0x38, 0x1b, 0x5c]), 12 | ); 13 | expect(SYNTHETIC_EOF.length).toBe(8); 14 | }); 15 | }); 16 | 17 | describe('SyntheticEOFDetector', () => { 18 | let detector: SyntheticEOFDetector; 19 | let onData: (data: Buffer) => void; 20 | let onEOF: () => void; 21 | let output: Buffer; 22 | 23 | beforeEach(() => { 24 | detector = new SyntheticEOFDetector(); 25 | output = Buffer.alloc(0); 26 | onData = vi.fn((data: Buffer) => (output = Buffer.concat([output, data]))); 27 | onEOF = vi.fn(); 28 | 29 | detector.on('data', onData); 30 | detector.on(EOF_EVENT, onEOF); 31 | }); 32 | 33 | it('should handle EOF at the end of stream', async () => { 34 | detector.write('Before EOF'); 35 | detector.write(SYNTHETIC_EOF); 36 | detector.end(); 37 | 38 | expect(output.toString()).toBe('Before EOF'); 39 | expect(onEOF).toHaveBeenCalledTimes(1); 40 | }); 41 | 42 | it('should handle EOF split across chunks', async () => { 43 | detector.write('Data1'); 44 | detector.write('\x1B]78'); // Partial EOF 45 | detector.write('78\x1B\\'); // Complete EOF 46 | detector.write('Data2'); 47 | detector.end(); 48 | 49 | expect(output.toString()).toBe('Data1Data2'); 50 | expect(onEOF).toHaveBeenCalledTimes(1); 51 | }); 52 | 53 | it('should pass through data when no EOF is present', async () => { 54 | detector.write('Just normal data'); 55 | detector.write(' with no EOF'); 56 | detector.end(); 57 | 58 | expect(output.toString()).toBe('Just normal data with no EOF'); 59 | expect(onEOF).not.toHaveBeenCalled(); 60 | }); 61 | 62 | it('should not trigger on partial EOF at end', async () => { 63 | detector.write('Data'); 64 | detector.write('\x1B]78'); // Incomplete EOF 65 | detector.end(); 66 | 67 | expect(output.toString()).toBe('Data\x1B]78'); 68 | expect(onEOF).not.toHaveBeenCalled(); 69 | }); 70 | 71 | it('should handle EOF split after escape', async () => { 72 | detector.write('\x1B'); 73 | detector.write(']7878\x1B\\'); 74 | detector.write('data1'); 75 | detector.end(); 76 | 77 | expect(output.toString()).toBe('data1'); 78 | expect(onEOF).toHaveBeenCalledTimes(1); 79 | }); 80 | 81 | it('should handle EOF split in the middle', async () => { 82 | detector.write('\x1B]78'); 83 | detector.write('78\x1B\\'); 84 | detector.write('data2'); 85 | detector.end(); 86 | 87 | expect(output.toString()).toBe('data2'); 88 | expect(onEOF).toHaveBeenCalledTimes(1); 89 | }); 90 | 91 | it('should not hold up data that isnt a prefix of EOF', async () => { 92 | detector.write('Data that is definitely not an EOF prefix'); 93 | 94 | expect(output.toString()).toBe('Data that is definitely not an EOF prefix'); 95 | expect(onEOF).not.toHaveBeenCalled(); 96 | 97 | detector.end(); 98 | expect(onEOF).not.toHaveBeenCalled(); 99 | }); 100 | 101 | it('should emit events in correct order', async () => { 102 | const detector = new SyntheticEOFDetector(); 103 | const events: Array< 104 | | { 105 | type: 'eof'; 106 | } 107 | | { 108 | type: 'data'; 109 | data: string; 110 | } 111 | > = []; 112 | 113 | detector.on('data', (chunk) => { 114 | events.push({ type: 'data', data: chunk.toString() }); 115 | }); 116 | detector.on(EOF_EVENT, () => { 117 | events.push({ type: 'eof' }); 118 | }); 119 | 120 | const finished = new Promise((resolve) => { 121 | detector.on('end', resolve); 122 | }); 123 | 124 | detector.write('before'); 125 | detector.write(SYNTHETIC_EOF); 126 | detector.write('after'); 127 | detector.end(); 128 | 129 | await finished; 130 | 131 | expect(events).toEqual([ 132 | { type: 'data', data: 'before' }, 133 | { type: 'eof' }, 134 | { type: 'data', data: 'after' }, 135 | ]); 136 | }); 137 | }); 138 | -------------------------------------------------------------------------------- /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | env: 3 | DEBUG: napi:* 4 | APP_NAME: ruspty 5 | MACOSX_DEPLOYMENT_TARGET: '10.13' 6 | 7 | permissions: 8 | contents: write 9 | id-token: write 10 | 'on': 11 | push: 12 | branches: 13 | - main 14 | tags-ignore: 15 | - '**' 16 | paths-ignore: 17 | - LICENSE 18 | - '**/*.gitignore' 19 | - .editorconfig 20 | - docs/** 21 | pull_request: null 22 | 23 | jobs: 24 | build: 25 | strategy: 26 | fail-fast: false 27 | matrix: 28 | settings: 29 | - host: macos-14-large 30 | target: x86_64-apple-darwin 31 | description: "macOS 14" 32 | - host: macos-latest 33 | target: aarch64-apple-darwin 34 | description: "macOS latest" 35 | - host: ubuntu-22.04 36 | target: x86_64-unknown-linux-gnu 37 | description: "Ubuntu Container(22.04)" 38 | name: Build ${{ matrix.settings.target }} on (${{ matrix.settings.description }}) 39 | runs-on: ${{ matrix.settings.host }} 40 | steps: 41 | - uses: actions/checkout@v4 42 | - name: Install container dependencies 43 | if: matrix.settings.host == 'ubuntu-22.04' 44 | run: | 45 | sudo apt-get update 46 | sudo apt-get install -y curl build-essential 47 | - name: Setup node 48 | uses: actions/setup-node@v4 49 | with: 50 | node-version: 20 51 | - name: Install 52 | uses: dtolnay/rust-toolchain@stable 53 | with: 54 | toolchain: stable 55 | targets: ${{ matrix.settings.target }} 56 | - name: Cache cargo 57 | uses: actions/cache@v4 58 | with: 59 | path: | 60 | ~/.cargo/registry/index/ 61 | ~/.cargo/registry/cache/ 62 | ~/.cargo/git/db/ 63 | .cargo-cache 64 | target/ 65 | key: ${{ matrix.settings.target }}-cargo-${{ matrix.settings.host }} 66 | - name: Install dependencies 67 | run: npm ci 68 | - name: Build 69 | run: |- 70 | set -e && 71 | npm run build && 72 | strip -x *.node 73 | shell: bash 74 | - name: Dump GLIBC symbols 75 | if: matrix.settings.host == 'ubuntu-22.04' 76 | run: | 77 | objdump -T *.node | grep GLIBC | sed 's/.*GLIBC_\([.0-9]*\).*/\1/g' | sort -Vu > glibc_versions.txt 78 | 79 | if [ -s glibc_versions.txt ]; then 80 | MAX_VERSION=$(cat glibc_versions.txt | sort -V | tail -n 1) 81 | echo "Highest GLIBC version: $MAX_VERSION" 82 | 83 | if [ "$(echo "$MAX_VERSION 2.35" | awk '{if ($1 > $2) print "1"; else print "0"}')" -eq 1 ]; then 84 | echo "Error: GLIBC version $MAX_VERSION is larger than 2.35" 85 | exit 1 86 | fi 87 | fi 88 | shell: bash 89 | - name: Upload artifact 90 | uses: actions/upload-artifact@v4 91 | with: 92 | name: bindings-${{ matrix.settings.target }} 93 | path: ${{ env.APP_NAME }}.*.node 94 | if-no-files-found: error 95 | 96 | test: 97 | needs: build 98 | strategy: 99 | fail-fast: false 100 | matrix: 101 | settings: 102 | - host: macos-14-large 103 | target: x86_64-apple-darwin 104 | description: "macOS 14" 105 | - host: macos-latest 106 | target: aarch64-apple-darwin 107 | description: "macOS latest" 108 | - host: ubuntu-22.04 109 | target: x86_64-unknown-linux-gnu 110 | description: "Ubuntu Container(22.04)" 111 | name: Test on ${{ matrix.settings.target }} (${{ matrix.settings.description }}) 112 | runs-on: ${{ matrix.settings.host }} 113 | steps: 114 | - uses: actions/checkout@v4 115 | - name: Install container dependencies 116 | if: matrix.settings.host == 'ubuntu-22.04' 117 | run: | 118 | sudo apt-get update 119 | sudo apt-get install -y curl build-essential cgroup-tools coreutils 120 | - name: Setup node 121 | uses: actions/setup-node@v4 122 | with: 123 | node-version: 20 124 | - name: Install dependencies 125 | run: npm ci 126 | 127 | - name: Download build artifacts 128 | uses: actions/download-artifact@v4 129 | with: 130 | name: bindings-${{ matrix.settings.target }} 131 | 132 | - name: Test bindings 133 | run: npm run test:ci 134 | 135 | publish: 136 | name: Publish 137 | runs-on: ubuntu-22.04 138 | if: github.ref == 'refs/heads/main' 139 | needs: 140 | - test 141 | steps: 142 | - uses: actions/checkout@v4 143 | - name: Setup node 144 | uses: actions/setup-node@v4 145 | with: 146 | node-version: 20 147 | - name: Install dependencies 148 | run: npm ci 149 | - name: Download all artifacts 150 | uses: actions/download-artifact@v4 151 | with: 152 | path: artifacts 153 | - name: Move artifacts 154 | run: npm run artifacts 155 | - name: Build wrapper 156 | run: npm run build:wrapper 157 | - name: List packages 158 | run: ls -R ./npm 159 | shell: bash 160 | - name: Publish 161 | run: | 162 | npm config set provenance true 163 | echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc 164 | npm publish --access public 165 | env: 166 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 167 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }} 168 | -------------------------------------------------------------------------------- /wrapper.ts: -------------------------------------------------------------------------------- 1 | import { type Readable, type Writable } from 'node:stream'; 2 | import { ReadStream } from 'node:tty'; 3 | import { 4 | Pty as RawPty, 5 | type Size, 6 | setCloseOnExec as rawSetCloseOnExec, 7 | getCloseOnExec as rawGetCloseOnExec, 8 | ptyResize, 9 | MAX_U16_VALUE, 10 | MIN_U16_VALUE, 11 | } from './index.js'; 12 | import { 13 | type PtyOptions, 14 | Operation, 15 | type SandboxRule, 16 | type SandboxOptions, 17 | } from './index.js'; 18 | import { EOF_EVENT, SyntheticEOFDetector } from './syntheticEof.js'; 19 | 20 | export { Operation, type SandboxRule, type SandboxOptions, type PtyOptions }; 21 | 22 | type ExitResult = { 23 | error: NodeJS.ErrnoException | null; 24 | code: number; 25 | }; 26 | 27 | /** 28 | * A very thin wrapper around PTYs and processes. 29 | * 30 | * @example 31 | * const { Pty } = require('@replit/ruspty'); 32 | * 33 | * const pty = new Pty({ 34 | * command: '/bin/sh', 35 | * args: [], 36 | * envs: ENV, 37 | * dir: CWD, 38 | * size: { rows: 24, cols: 80 }, 39 | * onExit: (...result) => { 40 | * // TODO: Handle process exit. 41 | * }, 42 | * }); 43 | * 44 | * const read = pty.read; 45 | * const write = pty.write; 46 | * 47 | * read.on('data', (chunk) => { 48 | * // TODO: Handle data. 49 | * }); 50 | * write.write('echo hello\n'); 51 | */ 52 | export class Pty { 53 | #pty: RawPty; 54 | #fd: number; 55 | 56 | #handledClose: boolean = false; 57 | #socketClosed: boolean = false; 58 | #userFdDropped: boolean = false; 59 | #fdDropTimeout: ReturnType | null = null; 60 | 61 | #socket: ReadStream; 62 | read: Readable; 63 | write: Writable; 64 | 65 | constructor(options: PtyOptions) { 66 | const realExit = options.onExit; 67 | 68 | let markExited!: (value: ExitResult) => void; 69 | let exitResult: Promise = new Promise((resolve) => { 70 | markExited = resolve; 71 | }); 72 | 73 | let markReadFinished!: () => void; 74 | let readFinished = new Promise((resolve) => { 75 | markReadFinished = resolve; 76 | }); 77 | 78 | // when pty exits, we should wait until the fd actually ends (end OR error) 79 | // before closing the pty 80 | // we use a mocked exit function to capture the exit result 81 | // and then call the real exit function after the fd is fully read 82 | this.#pty = new RawPty({ 83 | ...options, 84 | onExit: (error, code) => { 85 | // give nodejs a max of 1s to read the fd before 86 | // dropping the fd to avoid leaking it 87 | this.#fdDropTimeout = setTimeout(() => { 88 | this.dropUserFd(); 89 | }, 1000); 90 | 91 | markExited({ error, code }); 92 | }, 93 | }); 94 | this.#fd = this.#pty.takeControllerFd(); 95 | this.#socket = new ReadStream(this.#fd); 96 | 97 | // catch end events 98 | const handleClose = async () => { 99 | if (this.#socketClosed) { 100 | return; 101 | } 102 | 103 | this.#socketClosed = true; 104 | 105 | // must wait for fd close and exit result before calling real exit 106 | await readFinished; 107 | const result = await exitResult; 108 | realExit(result.error, result.code); 109 | }; 110 | 111 | // PTYs signal their done-ness with an EIO error. we therefore need to filter them out (as well as 112 | // cleaning up other spurious errors) so that the user doesn't need to handle them and be in 113 | // blissful peace. 114 | const handleError = (err: NodeJS.ErrnoException) => { 115 | if (err.code) { 116 | const code = err.code; 117 | if (code === 'EINTR' || code === 'EAGAIN') { 118 | // these two are expected. EINTR happens when the kernel restarts a `read(2)`/`write(2)` 119 | // syscall due to it being interrupted by another syscall, and EAGAIN happens when there 120 | // is no more data to be read by the fd. 121 | return; 122 | } else if (code.indexOf('EIO') !== -1) { 123 | // EIO only happens when the child dies. It is therefore our only true signal that there 124 | // is nothing left to read and we can start tearing things down. If we hadn't received an 125 | // error so far, we are considered to be in good standing. 126 | this.#socket.off('error', handleError); 127 | // emit 'end' to signal no more data 128 | // this will trigger our 'end' handler which marks readFinished 129 | this.#socket.emit('end'); 130 | return; 131 | } 132 | } 133 | 134 | // if we haven't handled the error by now, we should throw it 135 | throw err; 136 | }; 137 | 138 | // we need this synthetic eof detector as the pty stream has no way 139 | // of distinguishing the program exiting vs the data being fully read 140 | // this is injected on the rust side after the .wait on the child process 141 | // returns 142 | // more details: https://github.com/replit/ruspty/pull/93 143 | this.read = this.#socket.pipe(new SyntheticEOFDetector()); 144 | this.write = this.#socket; 145 | 146 | this.#socket.on('error', handleError); 147 | this.#socket.once('end', markReadFinished); 148 | this.#socket.once('close', handleClose); 149 | this.read.once(EOF_EVENT, async () => { 150 | // even if the program accidentally emits our synthetic eof 151 | // we dont yank the user fd away from them until the program actually exits 152 | // (and drops its copy of the user fd) 153 | await exitResult; 154 | this.dropUserFd(); 155 | }); 156 | } 157 | 158 | private dropUserFd() { 159 | if (this.#userFdDropped) { 160 | return; 161 | } 162 | 163 | if (this.#fdDropTimeout) { 164 | clearTimeout(this.#fdDropTimeout); 165 | } 166 | 167 | this.#userFdDropped = true; 168 | this.#pty.dropUserFd(); 169 | } 170 | 171 | close() { 172 | this.#handledClose = true; 173 | 174 | // end instead of destroy so that the user can read the last bits of data 175 | // and allow graceful close event to mark the fd as ended 176 | this.#socket.end(); 177 | this.dropUserFd(); 178 | } 179 | 180 | resize(size: Size) { 181 | if (this.#handledClose || this.#socketClosed) { 182 | return; 183 | } 184 | 185 | if ( 186 | size.cols < MIN_U16_VALUE || 187 | size.cols > MAX_U16_VALUE || 188 | size.rows < MIN_U16_VALUE || 189 | size.rows > MAX_U16_VALUE 190 | ) { 191 | throw new RangeError( 192 | `Size (${size.rows}x${size.cols}) out of range: must be between ${MIN_U16_VALUE} and ${MAX_U16_VALUE}`, 193 | ); 194 | } 195 | 196 | try { 197 | ptyResize(this.#fd, size); 198 | } catch (e: unknown) { 199 | // napi-rs only throws strings so we must string match here 200 | // https://docs.rs/napi/latest/napi/struct.Error.html#method.new 201 | if ( 202 | e instanceof Error && 203 | (e.message.indexOf('os error 9') !== -1 || // EBADF 204 | e.message.indexOf('os error 25') !== -1) 205 | ) { 206 | // ENOTTY 207 | // error 9 is EBADF (bad file descriptor) 208 | // error 25 is ENOTTY (inappropriate ioctl for device) 209 | // These can happen if the PTY has already exited or wasn't a terminal device 210 | // In that case, we just ignore the error. 211 | return; 212 | } 213 | 214 | // otherwise, rethrow 215 | throw e; 216 | } 217 | } 218 | 219 | get pid() { 220 | return this.#pty.pid; 221 | } 222 | } 223 | 224 | /** 225 | * Set the close-on-exec flag on a file descriptor. This is `fcntl(fd, F_SETFD, FD_CLOEXEC)` under 226 | * the covers. 227 | */ 228 | export const setCloseOnExec = rawSetCloseOnExec; 229 | 230 | /** 231 | * Get the close-on-exec flag on a file descriptor. This is `fcntl(fd, F_GETFD) & FD_CLOEXEC == 232 | * FD_CLOEXEC` under the covers. 233 | */ 234 | export const getCloseOnExec = rawGetCloseOnExec; 235 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | /* tslint:disable */ 2 | /* eslint-disable */ 3 | /* prettier-ignore */ 4 | 5 | /* auto-generated by NAPI-RS */ 6 | 7 | const { existsSync, readFileSync } = require('fs') 8 | const { join } = require('path'); 9 | 10 | const { platform, arch } = process; 11 | 12 | let nativeBinding = null; 13 | let localFileExisted = false; 14 | let loadError = null; 15 | 16 | function isMusl() { 17 | // For Node 10 18 | if (!process.report || typeof process.report.getReport !== 'function') { 19 | try { 20 | const lddPath = require('child_process') 21 | .execSync('which ldd') 22 | .toString() 23 | .trim(); 24 | return readFileSync(lddPath, 'utf8').includes('musl'); 25 | } catch (e) { 26 | return true; 27 | } 28 | } else { 29 | const { glibcVersionRuntime } = process.report.getReport().header; 30 | return !glibcVersionRuntime; 31 | } 32 | } 33 | 34 | switch (platform) { 35 | case 'android': 36 | switch (arch) { 37 | case 'arm64': 38 | localFileExisted = existsSync( 39 | join(__dirname, 'ruspty.android-arm64.node'), 40 | ); 41 | try { 42 | if (localFileExisted) { 43 | nativeBinding = require('./ruspty.android-arm64.node'); 44 | } else { 45 | nativeBinding = require('@replit/ruspty-android-arm64'); 46 | } 47 | } catch (e) { 48 | loadError = e; 49 | } 50 | break; 51 | case 'arm': 52 | localFileExisted = existsSync( 53 | join(__dirname, 'ruspty.android-arm-eabi.node'), 54 | ); 55 | try { 56 | if (localFileExisted) { 57 | nativeBinding = require('./ruspty.android-arm-eabi.node'); 58 | } else { 59 | nativeBinding = require('@replit/ruspty-android-arm-eabi'); 60 | } 61 | } catch (e) { 62 | loadError = e; 63 | } 64 | break; 65 | default: 66 | throw new Error(`Unsupported architecture on Android ${arch}`); 67 | } 68 | break; 69 | case 'win32': 70 | switch (arch) { 71 | case 'x64': 72 | localFileExisted = existsSync( 73 | join(__dirname, 'ruspty.win32-x64-msvc.node'), 74 | ); 75 | try { 76 | if (localFileExisted) { 77 | nativeBinding = require('./ruspty.win32-x64-msvc.node'); 78 | } else { 79 | nativeBinding = require('@replit/ruspty-win32-x64-msvc'); 80 | } 81 | } catch (e) { 82 | loadError = e; 83 | } 84 | break; 85 | case 'ia32': 86 | localFileExisted = existsSync( 87 | join(__dirname, 'ruspty.win32-ia32-msvc.node'), 88 | ); 89 | try { 90 | if (localFileExisted) { 91 | nativeBinding = require('./ruspty.win32-ia32-msvc.node'); 92 | } else { 93 | nativeBinding = require('@replit/ruspty-win32-ia32-msvc'); 94 | } 95 | } catch (e) { 96 | loadError = e; 97 | } 98 | break; 99 | case 'arm64': 100 | localFileExisted = existsSync( 101 | join(__dirname, 'ruspty.win32-arm64-msvc.node'), 102 | ); 103 | try { 104 | if (localFileExisted) { 105 | nativeBinding = require('./ruspty.win32-arm64-msvc.node'); 106 | } else { 107 | nativeBinding = require('@replit/ruspty-win32-arm64-msvc'); 108 | } 109 | } catch (e) { 110 | loadError = e; 111 | } 112 | break; 113 | default: 114 | throw new Error(`Unsupported architecture on Windows: ${arch}`); 115 | } 116 | break; 117 | case 'darwin': 118 | localFileExisted = existsSync( 119 | join(__dirname, 'ruspty.darwin-universal.node'), 120 | ); 121 | try { 122 | if (localFileExisted) { 123 | nativeBinding = require('./ruspty.darwin-universal.node'); 124 | } else { 125 | nativeBinding = require('@replit/ruspty-darwin-universal'); 126 | } 127 | break; 128 | } catch {} 129 | switch (arch) { 130 | case 'x64': 131 | localFileExisted = existsSync( 132 | join(__dirname, 'ruspty.darwin-x64.node'), 133 | ); 134 | try { 135 | if (localFileExisted) { 136 | nativeBinding = require('./ruspty.darwin-x64.node'); 137 | } else { 138 | nativeBinding = require('@replit/ruspty-darwin-x64'); 139 | } 140 | } catch (e) { 141 | loadError = e; 142 | } 143 | break; 144 | case 'arm64': 145 | localFileExisted = existsSync( 146 | join(__dirname, 'ruspty.darwin-arm64.node'), 147 | ); 148 | try { 149 | if (localFileExisted) { 150 | nativeBinding = require('./ruspty.darwin-arm64.node'); 151 | } else { 152 | nativeBinding = require('@replit/ruspty-darwin-arm64'); 153 | } 154 | } catch (e) { 155 | loadError = e; 156 | } 157 | break; 158 | default: 159 | throw new Error(`Unsupported architecture on macOS: ${arch}`); 160 | } 161 | break; 162 | case 'freebsd': 163 | if (arch !== 'x64') { 164 | throw new Error(`Unsupported architecture on FreeBSD: ${arch}`); 165 | } 166 | localFileExisted = existsSync(join(__dirname, 'ruspty.freebsd-x64.node')); 167 | try { 168 | if (localFileExisted) { 169 | nativeBinding = require('./ruspty.freebsd-x64.node'); 170 | } else { 171 | nativeBinding = require('@replit/ruspty-freebsd-x64'); 172 | } 173 | } catch (e) { 174 | loadError = e; 175 | } 176 | break; 177 | case 'linux': 178 | switch (arch) { 179 | case 'x64': 180 | if (isMusl()) { 181 | localFileExisted = existsSync( 182 | join(__dirname, 'ruspty.linux-x64-musl.node'), 183 | ); 184 | try { 185 | if (localFileExisted) { 186 | nativeBinding = require('./ruspty.linux-x64-musl.node'); 187 | } else { 188 | nativeBinding = require('@replit/ruspty-linux-x64-musl'); 189 | } 190 | } catch (e) { 191 | loadError = e; 192 | } 193 | } else { 194 | localFileExisted = existsSync( 195 | join(__dirname, 'ruspty.linux-x64-gnu.node'), 196 | ); 197 | try { 198 | if (localFileExisted) { 199 | nativeBinding = require('./ruspty.linux-x64-gnu.node'); 200 | } else { 201 | nativeBinding = require('@replit/ruspty-linux-x64-gnu'); 202 | } 203 | } catch (e) { 204 | loadError = e; 205 | } 206 | } 207 | break; 208 | case 'arm64': 209 | if (isMusl()) { 210 | localFileExisted = existsSync( 211 | join(__dirname, 'ruspty.linux-arm64-musl.node'), 212 | ); 213 | try { 214 | if (localFileExisted) { 215 | nativeBinding = require('./ruspty.linux-arm64-musl.node'); 216 | } else { 217 | nativeBinding = require('@replit/ruspty-linux-arm64-musl'); 218 | } 219 | } catch (e) { 220 | loadError = e; 221 | } 222 | } else { 223 | localFileExisted = existsSync( 224 | join(__dirname, 'ruspty.linux-arm64-gnu.node'), 225 | ); 226 | try { 227 | if (localFileExisted) { 228 | nativeBinding = require('./ruspty.linux-arm64-gnu.node'); 229 | } else { 230 | nativeBinding = require('@replit/ruspty-linux-arm64-gnu'); 231 | } 232 | } catch (e) { 233 | loadError = e; 234 | } 235 | } 236 | break; 237 | case 'arm': 238 | if (isMusl()) { 239 | localFileExisted = existsSync( 240 | join(__dirname, 'ruspty.linux-arm-musleabihf.node'), 241 | ); 242 | try { 243 | if (localFileExisted) { 244 | nativeBinding = require('./ruspty.linux-arm-musleabihf.node'); 245 | } else { 246 | nativeBinding = require('@replit/ruspty-linux-arm-musleabihf'); 247 | } 248 | } catch (e) { 249 | loadError = e; 250 | } 251 | } else { 252 | localFileExisted = existsSync( 253 | join(__dirname, 'ruspty.linux-arm-gnueabihf.node'), 254 | ); 255 | try { 256 | if (localFileExisted) { 257 | nativeBinding = require('./ruspty.linux-arm-gnueabihf.node'); 258 | } else { 259 | nativeBinding = require('@replit/ruspty-linux-arm-gnueabihf'); 260 | } 261 | } catch (e) { 262 | loadError = e; 263 | } 264 | } 265 | break; 266 | case 'riscv64': 267 | if (isMusl()) { 268 | localFileExisted = existsSync( 269 | join(__dirname, 'ruspty.linux-riscv64-musl.node'), 270 | ); 271 | try { 272 | if (localFileExisted) { 273 | nativeBinding = require('./ruspty.linux-riscv64-musl.node'); 274 | } else { 275 | nativeBinding = require('@replit/ruspty-linux-riscv64-musl'); 276 | } 277 | } catch (e) { 278 | loadError = e; 279 | } 280 | } else { 281 | localFileExisted = existsSync( 282 | join(__dirname, 'ruspty.linux-riscv64-gnu.node'), 283 | ); 284 | try { 285 | if (localFileExisted) { 286 | nativeBinding = require('./ruspty.linux-riscv64-gnu.node'); 287 | } else { 288 | nativeBinding = require('@replit/ruspty-linux-riscv64-gnu'); 289 | } 290 | } catch (e) { 291 | loadError = e; 292 | } 293 | } 294 | break; 295 | case 's390x': 296 | localFileExisted = existsSync( 297 | join(__dirname, 'ruspty.linux-s390x-gnu.node'), 298 | ); 299 | try { 300 | if (localFileExisted) { 301 | nativeBinding = require('./ruspty.linux-s390x-gnu.node'); 302 | } else { 303 | nativeBinding = require('@replit/ruspty-linux-s390x-gnu'); 304 | } 305 | } catch (e) { 306 | loadError = e; 307 | } 308 | break; 309 | default: 310 | throw new Error(`Unsupported architecture on Linux: ${arch}`); 311 | } 312 | break; 313 | default: 314 | throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`); 315 | } 316 | 317 | if (!nativeBinding) { 318 | if (loadError) { 319 | throw loadError; 320 | } 321 | throw new Error(`Failed to load native binding`); 322 | } 323 | 324 | const { 325 | Pty, 326 | Operation, 327 | MAX_U16_VALUE, 328 | MIN_U16_VALUE, 329 | getSyntheticEofSequence, 330 | ptyResize, 331 | setCloseOnExec, 332 | getCloseOnExec, 333 | } = nativeBinding; 334 | 335 | module.exports.Pty = Pty; 336 | module.exports.Operation = Operation; 337 | module.exports.MAX_U16_VALUE = MAX_U16_VALUE; 338 | module.exports.MIN_U16_VALUE = MIN_U16_VALUE; 339 | module.exports.getSyntheticEofSequence = getSyntheticEofSequence; 340 | module.exports.ptyResize = ptyResize; 341 | module.exports.setCloseOnExec = setCloseOnExec; 342 | module.exports.getCloseOnExec = getCloseOnExec; 343 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::fs::{write, File}; 3 | use std::io::ErrorKind; 4 | use std::io::{Error, Write}; 5 | use std::os::fd::{AsRawFd, OwnedFd}; 6 | use std::os::fd::{FromRawFd, IntoRawFd, RawFd}; 7 | use std::os::unix::process::CommandExt; 8 | use std::process::{Command, Stdio}; 9 | use std::thread; 10 | 11 | use napi::bindgen_prelude::{Buffer, JsFunction}; 12 | use napi::threadsafe_function::{ErrorStrategy, ThreadsafeFunction, ThreadsafeFunctionCallMode}; 13 | use napi::Status::GenericFailure; 14 | use napi::{self, Env}; 15 | use nix::errno::Errno; 16 | use nix::fcntl::{fcntl, FcntlArg, FdFlag, OFlag}; 17 | use nix::libc::{self, c_int, TIOCSCTTY, TIOCSWINSZ}; 18 | use nix::pty::{openpty, Winsize}; 19 | use nix::sys::termios::{self, SetArg}; 20 | 21 | #[macro_use] 22 | extern crate napi_derive; 23 | 24 | #[cfg(target_os = "linux")] 25 | mod sandbox; 26 | 27 | #[napi] 28 | #[allow(dead_code)] 29 | struct Pty { 30 | controller_fd: Option, 31 | user_fd: Option, 32 | /// The pid of the forked process. 33 | pub pid: u32, 34 | } 35 | 36 | #[napi(string_enum)] 37 | pub enum Operation { 38 | Modify, 39 | Delete, 40 | } 41 | 42 | /// Sandboxing rules. Deleting / modifying a path with any of the prefixes is forbidden and will 43 | /// cause process termination. 44 | #[napi(object)] 45 | pub struct SandboxRule { 46 | /// The forbidden operation. 47 | pub operation: Operation, 48 | /// The list of prefixes that are matched by this rule. 49 | pub prefixes: Vec, 50 | /// The list of prefixes that are excluded from this rule. 51 | pub exclude_prefixes: Option>, 52 | /// The message to be shown if this rule triggers. 53 | pub message: String, 54 | } 55 | 56 | /// Options for the sandbox. 57 | #[napi(object)] 58 | pub struct SandboxOptions { 59 | pub rules: Vec, 60 | } 61 | 62 | /// The options that can be passed to the constructor of Pty. 63 | #[napi(object)] 64 | struct PtyOptions { 65 | pub command: String, 66 | pub args: Option>, 67 | pub envs: Option>, 68 | pub dir: Option, 69 | pub size: Option, 70 | pub cgroup_path: Option, 71 | pub apparmor_profile: Option, 72 | pub interactive: Option, 73 | pub sandbox: Option, 74 | #[napi(ts_type = "(err: null | Error, exitCode: number) => void")] 75 | pub on_exit: JsFunction, 76 | } 77 | 78 | /// A size struct to pass to resize. 79 | #[napi(object)] 80 | struct Size { 81 | pub cols: u16, 82 | pub rows: u16, 83 | } 84 | 85 | #[napi] 86 | pub const MAX_U16_VALUE: u16 = u16::MAX; 87 | #[napi] 88 | pub const MIN_U16_VALUE: u16 = u16::MIN; 89 | 90 | const SYNTHETIC_EOF: &[u8] = b"\x1B]7878\x1B\\"; 91 | 92 | #[napi] 93 | pub fn get_synthetic_eof_sequence() -> Buffer { 94 | SYNTHETIC_EOF.into() 95 | } 96 | 97 | fn cast_to_napi_error(err: Errno) -> napi::Error { 98 | napi::Error::new(GenericFailure, err) 99 | } 100 | 101 | #[napi] 102 | impl Pty { 103 | #[napi(constructor)] 104 | #[allow(dead_code)] 105 | pub fn new(_env: Env, opts: PtyOptions) -> Result { 106 | #[cfg(not(target_os = "linux"))] 107 | if opts.cgroup_path.is_some() { 108 | return Err(napi::Error::new( 109 | napi::Status::GenericFailure, 110 | "cgroup_path is only supported on Linux", 111 | )); 112 | } 113 | 114 | #[cfg(not(target_os = "linux"))] 115 | if opts.sandbox.is_some() { 116 | return Err(napi::Error::new( 117 | napi::Status::GenericFailure, 118 | "sandbox is only supported on Linux", 119 | )); 120 | } 121 | 122 | #[cfg(not(target_os = "linux"))] 123 | if opts.apparmor_profile.is_some() { 124 | return Err(napi::Error::new( 125 | napi::Status::GenericFailure, 126 | "apparmor is only supported on Linux", 127 | )); 128 | } 129 | 130 | #[cfg(target_os = "linux")] 131 | if opts.sandbox.is_some() && opts.cgroup_path.is_none() { 132 | return Err(napi::Error::new( 133 | napi::Status::GenericFailure, 134 | "cannot enable sandbox without cgroup", 135 | )); 136 | } 137 | 138 | let size = opts.size.unwrap_or(Size { cols: 80, rows: 24 }); 139 | let window_size = Winsize { 140 | ws_col: size.cols, 141 | ws_row: size.rows, 142 | ws_xpixel: 0, 143 | ws_ypixel: 0, 144 | }; 145 | 146 | let mut cmd = Command::new(opts.command); 147 | if let Some(args) = opts.args { 148 | cmd.args(args); 149 | } 150 | 151 | // open pty pair, and set close-on-exec to avoid unwanted copies of the FDs from finding their 152 | // way into subprocesses. Also set the nonblocking flag to avoid Node from consuming a full I/O 153 | // thread for this. 154 | let pty_res = openpty(&window_size, None).map_err(cast_to_napi_error)?; 155 | let controller_fd = pty_res.master; 156 | let user_fd = pty_res.slave; 157 | set_close_on_exec(controller_fd.as_raw_fd(), true)?; 158 | set_close_on_exec(user_fd.as_raw_fd(), true)?; 159 | set_nonblocking(controller_fd.as_raw_fd())?; 160 | 161 | // duplicate pty user_fd to be the child's stdin, stdout, and stderr 162 | if opts.interactive.unwrap_or(true) { 163 | cmd.stdin(Stdio::from(user_fd.try_clone()?)); 164 | } else { 165 | cmd.stdin(Stdio::null()); 166 | } 167 | cmd.stderr(Stdio::from(user_fd.try_clone()?)); 168 | cmd.stdout(Stdio::from(user_fd.try_clone()?)); 169 | 170 | // we want the env to be clean, we can always pass in `process.env` if we want to. 171 | cmd.env_clear(); 172 | if let Some(envs) = opts.envs { 173 | cmd.envs(envs); 174 | } 175 | 176 | // set working dir if applicable 177 | if let Some(dir) = opts.dir { 178 | cmd.current_dir(dir); 179 | } 180 | 181 | let raw_user_fd = user_fd.as_raw_fd(); 182 | let raw_controller_fd = controller_fd.as_raw_fd(); 183 | unsafe { 184 | // right before we spawn the child, we should do a bunch of setup 185 | // this is all run in the context of the child process 186 | cmd.pre_exec(move || { 187 | // set the cgroup if specified 188 | #[cfg(target_os = "linux")] 189 | if let Some(cgroup_path) = &opts.cgroup_path { 190 | let pid = libc::getpid(); 191 | let cgroup_path = format!("{}/cgroup.procs", cgroup_path); 192 | let mut cgroup_file = File::create(cgroup_path)?; 193 | cgroup_file.write_all(format!("{}", pid).as_bytes())?; 194 | 195 | // also set the sandbox if specified. It's important for it to be in a cgroup so that we don't 196 | // accidentally leak processes if something went wrong. 197 | if let Some(sandbox_opts) = &opts.sandbox { 198 | if let Err(err) = sandbox::install_sandbox(sandbox::Options { 199 | rules: sandbox_opts 200 | .rules 201 | .iter() 202 | .map(|rule| sandbox::Rule { 203 | operation: match rule.operation { 204 | Operation::Modify => sandbox::Operation::Modify, 205 | Operation::Delete => sandbox::Operation::Delete, 206 | }, 207 | prefixes: rule.prefixes.clone(), 208 | exclude_prefixes: rule.exclude_prefixes.clone(), 209 | message: rule.message.clone(), 210 | }) 211 | .collect(), 212 | }) { 213 | return Err(Error::new( 214 | ErrorKind::Other, 215 | format!("install_sandbox: {:#?}", err), 216 | )); 217 | } 218 | } 219 | } 220 | 221 | // start a new session 222 | let err = libc::setsid(); 223 | if err == -1 { 224 | return Err(Error::new(ErrorKind::Other, "setsid")); 225 | } 226 | 227 | // become the controlling tty for the program. 228 | // Note that TIOCSCTTY is not the same size in all platforms. 229 | #[allow(clippy::useless_conversion)] 230 | let err = libc::ioctl(raw_user_fd, TIOCSCTTY.into(), 0); 231 | if err == -1 { 232 | return Err(Error::new(ErrorKind::Other, "ioctl-TIOCSCTTY")); 233 | } 234 | 235 | // we need to drop the controller fd, since we don't need it in the child 236 | // and it's not safe to keep it open 237 | libc::close(raw_controller_fd); 238 | 239 | // just to be safe, mark every single file descriptor as close-on-exec. 240 | // needs to use the raw syscall to avoid dependencies on newer versions of glibc. 241 | #[cfg(target_os = "linux")] 242 | libc::syscall( 243 | libc::SYS_close_range, 244 | 3, 245 | libc::c_uint::MAX, 246 | libc::CLOSE_RANGE_CLOEXEC as c_int, 247 | ); 248 | 249 | // Set the AppArmor profile. 250 | #[cfg(target_os = "linux")] 251 | if let Some(apparmor_profile) = &opts.apparmor_profile { 252 | // TODO: Make this fail once we're sure we're never going back. 253 | let _ = write( 254 | "/proc/self/attr/apparmor/exec", 255 | format!("exec {apparmor_profile}"), 256 | ); 257 | } 258 | 259 | // set input modes 260 | let user_fd = OwnedFd::from_raw_fd(raw_user_fd); 261 | if let Ok(mut termios) = termios::tcgetattr(&user_fd) { 262 | termios.input_flags |= termios::InputFlags::IUTF8; 263 | termios::tcsetattr(&user_fd, SetArg::TCSANOW, &termios)?; 264 | } 265 | 266 | // reset signal handlers 267 | libc::signal(libc::SIGCHLD, libc::SIG_DFL); 268 | libc::signal(libc::SIGHUP, libc::SIG_DFL); 269 | libc::signal(libc::SIGINT, libc::SIG_DFL); 270 | libc::signal(libc::SIGQUIT, libc::SIG_DFL); 271 | libc::signal(libc::SIGTERM, libc::SIG_DFL); 272 | libc::signal(libc::SIGALRM, libc::SIG_DFL); 273 | 274 | Ok(()) 275 | }); 276 | } 277 | 278 | // actually spawn the child 279 | let mut child = cmd.spawn()?; 280 | let pid = child.id(); 281 | 282 | // We're creating a new thread for every child, this uses a bit more system resources compared 283 | // to alternatives (below), trading off simplicity of implementation. 284 | // 285 | // The alternatives: 286 | // - Mandate that every single `wait` goes through a central process-wide loop that knows 287 | // about all processes (this is what `pid1` does), but needs a bit of care and some static 288 | // analysis to ensure that every single call goes through the wrapper to avoid double `wait`'s 289 | // on a child. 290 | // - Have a single thread loop where other entities can register children (by sending the pid 291 | // over a channel) and this loop can use `poll` to listen for each child's `pidfd` for when 292 | // they are ready to be `wait`'ed. This has the inconvenience that it consumes one FD per child. 293 | // 294 | // For discussion check out: https://github.com/replit/ruspty/pull/1#discussion_r1463672548 295 | let ts_on_exit: ThreadsafeFunction = opts 296 | .on_exit 297 | .create_threadsafe_function(0, |ctx| ctx.env.create_int32(ctx.value).map(|v| vec![v]))?; 298 | 299 | thread::spawn(move || { 300 | let wait_result = child.wait(); 301 | 302 | // by this point, child has closed its copy of the user_fd 303 | // lets inject our synthetic EOF OSC into the user_fd 304 | // its ok to ignore the result here as we have a timeout on the nodejs side to handle if this write fails 305 | let _ = write_syn_eof_to_fd(raw_user_fd); 306 | 307 | match wait_result { 308 | Ok(status) => { 309 | if status.success() { 310 | ts_on_exit.call(Ok(0), ThreadsafeFunctionCallMode::Blocking); 311 | } else { 312 | ts_on_exit.call( 313 | Ok(status.code().unwrap_or(-1)), 314 | ThreadsafeFunctionCallMode::Blocking, 315 | ); 316 | } 317 | } 318 | Err(err) => { 319 | ts_on_exit.call( 320 | Err(napi::Error::new( 321 | GenericFailure, 322 | format!( 323 | "OS error when waiting for child process to exit: {}", 324 | err.raw_os_error().unwrap_or(-1) 325 | ), 326 | )), 327 | ThreadsafeFunctionCallMode::Blocking, 328 | ); 329 | } 330 | } 331 | }); 332 | 333 | Ok(Pty { 334 | controller_fd: Some(controller_fd), 335 | user_fd: Some(user_fd), 336 | pid, 337 | }) 338 | } 339 | 340 | /// Transfers ownership of the file descriptor for the PTY controller. This can only be called 341 | /// once (it will error the second time). The caller is responsible for closing the file 342 | /// descriptor. 343 | #[napi] 344 | #[allow(dead_code)] 345 | pub fn take_controller_fd(&mut self) -> Result { 346 | if let Some(fd) = self.controller_fd.take() { 347 | Ok(fd.into_raw_fd()) 348 | } else { 349 | Err(napi::Error::new( 350 | napi::Status::GenericFailure, 351 | "fd failed: bad file descriptor (os error 9)", 352 | )) 353 | } 354 | } 355 | 356 | /// Closes the owned file descriptor for the PTY controller. The Nodejs side must call this 357 | /// when it is done with the file descriptor to avoid leaking FDs. 358 | #[napi] 359 | #[allow(dead_code)] 360 | pub fn drop_user_fd(&mut self) -> Result<(), napi::Error> { 361 | self.user_fd.take(); 362 | Ok(()) 363 | } 364 | } 365 | 366 | /// Resize the terminal. 367 | #[napi] 368 | #[allow(dead_code)] 369 | fn pty_resize(fd: i32, size: Size) -> Result<(), napi::Error> { 370 | let window_size = Winsize { 371 | ws_col: size.cols, 372 | ws_row: size.rows, 373 | ws_xpixel: 0, 374 | ws_ypixel: 0, 375 | }; 376 | 377 | let res = unsafe { libc::ioctl(fd, TIOCSWINSZ, &window_size as *const _) }; 378 | if res == -1 { 379 | return Err(napi::Error::new( 380 | napi::Status::GenericFailure, 381 | format!("ioctl TIOCSWINSZ failed: {}", Error::last_os_error()), 382 | )); 383 | } 384 | 385 | Ok(()) 386 | } 387 | 388 | /// Set the close-on-exec flag on a file descriptor. This is `fcntl(fd, F_SETFD, FD_CLOEXEC)` under 389 | /// the covers. 390 | #[napi] 391 | #[allow(dead_code)] 392 | fn set_close_on_exec(fd: i32, close_on_exec: bool) -> Result<(), napi::Error> { 393 | let old_flags = match fcntl(fd as RawFd, FcntlArg::F_GETFD) { 394 | Ok(flags) => FdFlag::from_bits_truncate(flags), 395 | Err(err) => { 396 | return Err(napi::Error::new( 397 | GenericFailure, 398 | format!("fcntl F_GETFD: {}", err,), 399 | )); 400 | } 401 | }; 402 | let mut new_flags = old_flags; 403 | new_flags.set(FdFlag::FD_CLOEXEC, close_on_exec); 404 | if old_flags == new_flags { 405 | // It's already in the correct state! 406 | return Ok(()); 407 | } 408 | 409 | if let Err(err) = fcntl(fd as RawFd, FcntlArg::F_SETFD(new_flags)) { 410 | return Err(napi::Error::new( 411 | GenericFailure, 412 | format!("fcntl F_SETFD: {}", err,), 413 | )); 414 | }; 415 | 416 | Ok(()) 417 | } 418 | 419 | /// Get the close-on-exec flag on a file descriptor. This is `fcntl(fd, F_GETFD) & FD_CLOEXEC == 420 | ///_CLOEXEC` under the covers. 421 | #[napi] 422 | #[allow(dead_code)] 423 | fn get_close_on_exec(fd: i32) -> Result { 424 | match fcntl(fd as RawFd, FcntlArg::F_GETFD) { 425 | Ok(flags) => Ok(FdFlag::from_bits_truncate(flags).contains(FdFlag::FD_CLOEXEC)), 426 | Err(err) => Err(napi::Error::new( 427 | GenericFailure, 428 | format!("fcntl F_GETFD: {}", err,), 429 | )), 430 | } 431 | } 432 | 433 | /// Set the file descriptor to be non-blocking. 434 | #[allow(dead_code)] 435 | fn set_nonblocking(fd: i32) -> Result<(), napi::Error> { 436 | let old_flags = match fcntl(fd, FcntlArg::F_GETFL) { 437 | Ok(flags) => OFlag::from_bits_truncate(flags), 438 | Err(err) => { 439 | return Err(napi::Error::new( 440 | GenericFailure, 441 | format!("fcntl F_GETFL: {}", err), 442 | )); 443 | } 444 | }; 445 | 446 | let mut new_flags = old_flags; 447 | new_flags.set(OFlag::O_NONBLOCK, true); 448 | if old_flags != new_flags { 449 | if let Err(err) = fcntl(fd, FcntlArg::F_SETFL(new_flags)) { 450 | return Err(napi::Error::new( 451 | GenericFailure, 452 | format!("fcntl F_SETFL: {}", err), 453 | )); 454 | } 455 | } 456 | Ok(()) 457 | } 458 | 459 | fn write_syn_eof_to_fd(fd: libc::c_int) -> std::io::Result<()> { 460 | let mut remaining = SYNTHETIC_EOF; 461 | while !remaining.is_empty() { 462 | match unsafe { 463 | libc::write( 464 | fd, 465 | remaining.as_ptr() as *const libc::c_void, 466 | remaining.len(), 467 | ) 468 | } { 469 | -1 => { 470 | let err = std::io::Error::last_os_error(); 471 | if err.kind() == std::io::ErrorKind::Interrupted { 472 | continue; 473 | } 474 | 475 | return Err(err); 476 | } 477 | 0 => { 478 | return Err(std::io::Error::new( 479 | std::io::ErrorKind::WriteZero, 480 | "write returned 0", 481 | )); 482 | } 483 | n => { 484 | remaining = &remaining[n as usize..]; 485 | } 486 | } 487 | } 488 | Ok(()) 489 | } 490 | -------------------------------------------------------------------------------- /tests/index.test.ts: -------------------------------------------------------------------------------- 1 | import { Pty, getCloseOnExec, setCloseOnExec, Operation } from '../wrapper'; 2 | import { type Writable } from 'stream'; 3 | import { readdirSync, readlinkSync } from 'fs'; 4 | import { mkdir, rm, mkdtemp, writeFile } from 'node:fs/promises'; 5 | import { tmpdir } from 'node:os'; 6 | import { join } from 'node:path'; 7 | import { 8 | describe, 9 | test, 10 | expect, 11 | beforeEach, 12 | vi, 13 | type Mock, 14 | assert, 15 | } from 'vitest'; 16 | import { exec as execAsync } from 'child_process'; 17 | import { promisify } from 'util'; 18 | const exec = promisify(execAsync); 19 | 20 | const EOT = '\x04'; 21 | const procSelfFd = '/proc/self/fd/'; 22 | const IS_DARWIN = process.platform === 'darwin'; 23 | 24 | const testSkipOnDarwin = IS_DARWIN ? test.skip : test; 25 | const testOnlyOnDarwin = IS_DARWIN ? test : test.skip; 26 | 27 | type FdRecord = Record; 28 | function getOpenFds(): FdRecord { 29 | const fds: FdRecord = {}; 30 | if (process.platform !== 'linux') { 31 | return fds; 32 | } 33 | 34 | for (const filename of readdirSync(procSelfFd)) { 35 | try { 36 | const linkTarget = readlinkSync(procSelfFd + filename); 37 | if ( 38 | linkTarget.startsWith('anon_inode:[') || 39 | linkTarget.startsWith('socket:[') || 40 | // node likes to asynchronously read stuff mid-test. 41 | linkTarget.includes('/ruspty/') || 42 | linkTarget === '/dev/null' 43 | ) { 44 | continue; 45 | } 46 | 47 | fds[filename] = linkTarget; 48 | } catch (err: any) { 49 | if (err.code === 'ENOENT') { 50 | continue; 51 | } 52 | throw err; 53 | } 54 | } 55 | 56 | return fds; 57 | } 58 | 59 | describe('PTY', { repeats: 500 }, () => { 60 | test('spawns and exits', async () => { 61 | const oldFds = getOpenFds(); 62 | const message = 'hello from a pty'; 63 | let buffer = ''; 64 | 65 | const onExit = vi.fn(); 66 | const pty = new Pty({ 67 | command: 'echo', 68 | args: [message], 69 | onExit, 70 | }); 71 | 72 | const readStream = pty.read; 73 | readStream.on('data', (chunk) => { 74 | buffer = chunk.toString(); 75 | }); 76 | 77 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 78 | expect(onExit).toHaveBeenCalledWith(null, 0); 79 | expect(buffer.trim()).toBe(message); 80 | expect(pty.write.writable).toBe(false); 81 | expect(pty.read.readable).toBe(false); 82 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 83 | }); 84 | 85 | test('captures an exit code', async () => { 86 | const oldFds = getOpenFds(); 87 | const onExit = vi.fn(); 88 | const pty = new Pty({ 89 | command: 'sh', 90 | args: ['-c', 'exit 17'], 91 | onExit, 92 | }); 93 | 94 | // set a pty reader so it can flow 95 | pty.read.on('data', () => {}); 96 | 97 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 98 | expect(onExit).toHaveBeenCalledWith(null, 17); 99 | expect(getOpenFds()).toStrictEqual(oldFds); 100 | }); 101 | 102 | test('can be written to', async () => { 103 | const oldFds = getOpenFds(); 104 | const message = 'hello cat\n'; 105 | let buffer = ''; 106 | const onExit = vi.fn(); 107 | 108 | const pty = new Pty({ 109 | command: 'cat', 110 | onExit, 111 | }); 112 | 113 | const writeStream = pty.write; 114 | const readStream = pty.read; 115 | 116 | readStream.on('data', (data) => { 117 | buffer += data.toString(); 118 | }); 119 | 120 | writeStream.write(message); 121 | writeStream.end(EOT); 122 | 123 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 124 | expect(onExit).toHaveBeenCalledWith(null, 0); 125 | expect(pty.write.writable).toBe(false); 126 | 127 | let result = buffer.toString(); 128 | if (IS_DARWIN) { 129 | // Darwin adds the visible EOT to the stream. 130 | result = result.replace('^D\b\b', ''); 131 | } 132 | 133 | const expectedResult = 'hello cat\r\nhello cat\r\n'; 134 | expect(result.trim()).toStrictEqual(expectedResult.trim()); 135 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 136 | }); 137 | 138 | test('can be started in non-interactive fashion', async () => { 139 | const oldFds = getOpenFds(); 140 | let buffer = ''; 141 | const onExit = vi.fn(); 142 | 143 | const pty = new Pty({ 144 | command: 'cat', 145 | interactive: false, 146 | onExit, 147 | }); 148 | 149 | const readStream = pty.read; 150 | readStream.on('data', (data) => { 151 | buffer += data.toString(); 152 | }); 153 | 154 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 155 | expect(onExit).toHaveBeenCalledWith(null, 0); 156 | 157 | let result = buffer.toString(); 158 | const expectedResult = '\r\n'; 159 | expect(result.trim()).toStrictEqual(expectedResult.trim()); 160 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 161 | }); 162 | 163 | test('can be resized', async () => { 164 | const oldFds = getOpenFds(); 165 | let buffer = ''; 166 | let state: 'expectPrompt' | 'expectDone1' | 'expectDone2' | 'done' = 167 | 'expectPrompt'; 168 | const onExit = vi.fn(); 169 | 170 | const pty = new Pty({ 171 | command: 'sh', 172 | size: { rows: 24, cols: 80 }, 173 | onExit, 174 | }); 175 | 176 | const writeStream = pty.write; 177 | const readStream = pty.read; 178 | 179 | const statePromise = new Promise((resolve) => { 180 | readStream.on('data', (data) => { 181 | buffer += data.toString(); 182 | // If the test is running in a container with privileged access, the prompt is # 183 | if ( 184 | state === 'expectPrompt' && 185 | (buffer.endsWith('$ ') || buffer.endsWith('# ')) 186 | ) { 187 | writeStream.write("stty size; echo 'done1'\n"); 188 | state = 'expectDone1'; 189 | return; 190 | } 191 | 192 | if (state === 'expectDone1' && buffer.includes('done1\r\n')) { 193 | state = 'expectDone2'; 194 | expect(buffer).toContain('24 80'); 195 | pty.resize({ rows: 60, cols: 100 }); 196 | 197 | writeStream.write("stty size; echo 'done2'\n"); 198 | return; 199 | } 200 | 201 | if (state === 'expectDone2' && buffer.includes('done2\r\n')) { 202 | expect(buffer).toContain('60 100'); 203 | state = 'done'; 204 | writeStream.write(EOT); 205 | resolve(); 206 | } 207 | }); 208 | }); 209 | 210 | await statePromise; 211 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 212 | expect(onExit).toHaveBeenCalledWith(null, 0); 213 | expect(state).toBe('done'); 214 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 215 | }); 216 | 217 | test('respects working directory', async () => { 218 | const oldFds = getOpenFds(); 219 | const cwd = process.cwd(); 220 | let buffer = ''; 221 | const onExit = vi.fn(); 222 | 223 | const pty = new Pty({ 224 | command: 'pwd', 225 | dir: cwd, 226 | onExit, 227 | }); 228 | 229 | const readStream = pty.read; 230 | readStream.on('data', (data) => { 231 | buffer += data.toString(); 232 | }); 233 | 234 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 235 | expect(onExit).toHaveBeenCalledWith(null, 0); 236 | expect(buffer.trim()).toBe(cwd); 237 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 238 | }); 239 | 240 | test('respects env', async () => { 241 | const oldFds = getOpenFds(); 242 | const message = 'hello from env'; 243 | let buffer = ''; 244 | const onExit = vi.fn(); 245 | 246 | const pty = new Pty({ 247 | command: 'sh', 248 | args: ['-c', 'echo $ENV_VARIABLE && exit'], 249 | envs: { 250 | ENV_VARIABLE: message, 251 | }, 252 | onExit, 253 | }); 254 | 255 | const readStream = pty.read; 256 | readStream.on('data', (data) => { 257 | buffer += data.toString(); 258 | }); 259 | 260 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 261 | expect(onExit).toHaveBeenCalledWith(null, 0); 262 | expect(buffer.trim()).toBe(message); 263 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 264 | }); 265 | 266 | test("resize after exit shouldn't throw", async () => { 267 | const onExit = vi.fn(); 268 | const pty = new Pty({ 269 | command: 'echo', 270 | args: ['hello'], 271 | onExit, 272 | }); 273 | 274 | pty.read.on('data', () => {}); 275 | 276 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 277 | expect(onExit).toHaveBeenCalledWith(null, 0); 278 | expect(() => { 279 | pty.resize({ rows: 60, cols: 100 }); 280 | }).not.toThrow(); 281 | }); 282 | 283 | test("resize after close shouldn't throw", async () => { 284 | const onExit = vi.fn(); 285 | const pty = new Pty({ 286 | command: 'sh', 287 | onExit, 288 | }); 289 | 290 | pty.read.on('data', () => {}); 291 | 292 | pty.close(); 293 | expect(() => { 294 | pty.resize({ rows: 60, cols: 100 }); 295 | }).not.toThrow(); 296 | 297 | process.kill(pty.pid, 'SIGKILL'); 298 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 299 | expect(onExit).toHaveBeenCalledWith(null, -1); 300 | expect(pty.write.writable).toBe(false); 301 | expect(pty.read.readable).toBe(false); 302 | }); 303 | 304 | test('ordering is correct', async () => { 305 | const oldFds = getOpenFds(); 306 | let buffer = Buffer.from(''); 307 | const n = 1024; 308 | const onExit = vi.fn(); 309 | 310 | const pty = new Pty({ 311 | command: 'sh', 312 | args: ['-c', `seq 0 ${n}`], 313 | onExit, 314 | }); 315 | 316 | const readStream = pty.read; 317 | readStream.on('data', (data) => { 318 | buffer = Buffer.concat([buffer, data]); 319 | }); 320 | 321 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 322 | expect(onExit).toHaveBeenCalledWith(null, 0); 323 | 324 | const lines = buffer.toString().trim().split('\n'); 325 | expect(lines.length).toBe(n + 1); 326 | for (let i = 0; i < n + 1; i++) { 327 | expect( 328 | Number(lines[i]), 329 | `expected line ${i} to contain ${i} but got ${lines[i]}`, 330 | ).toBe(i); 331 | } 332 | 333 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 334 | }); 335 | 336 | test('doesnt miss large output from fast commands', async () => { 337 | const payload = `hello`.repeat(4096); 338 | let buffer = Buffer.from(''); 339 | const onExit = vi.fn(); 340 | 341 | const pty = new Pty({ 342 | command: 'echo', 343 | args: ['-n', payload], 344 | onExit, 345 | }); 346 | 347 | const readStream = pty.read; 348 | readStream.on('data', (data) => { 349 | buffer = Buffer.concat([buffer, data]); 350 | }); 351 | 352 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 353 | expect(onExit).toHaveBeenCalledWith(null, 0); 354 | expect(buffer.toString().length).toBe(payload.length); 355 | }); 356 | 357 | test('doesnt miss lots of lines from bash', async () => { 358 | const payload = Array.from({ length: 5000 }, (_, i) => i).join('\n'); 359 | let buffer = Buffer.from(''); 360 | const onExit = vi.fn(); 361 | 362 | const pty = new Pty({ 363 | command: 'bash', 364 | args: ['-c', `echo -n "${payload}"`], 365 | onExit, 366 | }); 367 | 368 | const readStream = pty.read; 369 | readStream.on('data', (data) => { 370 | buffer = Buffer.concat([buffer, data]); 371 | }); 372 | 373 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 374 | expect(onExit).toHaveBeenCalledWith(null, 0); 375 | expect(buffer.toString().trim().replace(/\r/g, '').length).toBe( 376 | payload.length, 377 | ); 378 | }); 379 | 380 | testSkipOnDarwin('does not leak files', async () => { 381 | const oldFds = getOpenFds(); 382 | const promises = []; 383 | 384 | for (let i = 0; i < 10; i++) { 385 | const onExit = vi.fn(); 386 | let buffer = Buffer.from(''); 387 | 388 | const pty = new Pty({ 389 | command: 'sh', 390 | args: ['-c', 'sleep 0.1 ; ls /proc/$$/fd'], 391 | onExit, 392 | }); 393 | 394 | const readStream = pty.read; 395 | readStream.on('data', (data) => { 396 | buffer = Buffer.concat([buffer, data]); 397 | }); 398 | 399 | promises.push( 400 | vi 401 | .waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)) 402 | .then(() => { 403 | expect(onExit).toHaveBeenCalledWith(null, 0); 404 | expect( 405 | buffer 406 | .toString() 407 | .trim() 408 | .split(/\s+/) 409 | .filter((fd) => { 410 | // Some shells dup stdio to fd 255 for reasons. 411 | return fd !== '255'; 412 | }) 413 | .toSorted(), 414 | ).toStrictEqual(['0', '1', '2']); 415 | }), 416 | ); 417 | } 418 | 419 | await Promise.all(promises); 420 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 421 | }); 422 | 423 | test('can run concurrent shells', async () => { 424 | const oldFds = getOpenFds(); 425 | const writeStreams: Array = []; 426 | const buffers: Array = []; 427 | const onExits: Array = []; 428 | const expectedResult = 'hello cat\r\nhello cat\r\n'; 429 | 430 | // Create 10 concurrent shells 431 | for (let i = 0; i < 10; i++) { 432 | const onExit = vi.fn(); 433 | onExits.push(onExit); 434 | buffers[i] = Buffer.from(''); 435 | 436 | const pty = new Pty({ 437 | command: 'cat', 438 | onExit, 439 | }); 440 | 441 | const readStream = pty.read; 442 | readStream.on('data', (data) => { 443 | buffers[i] = Buffer.concat([buffers[i], data]); 444 | }); 445 | 446 | writeStreams.push(pty.write); 447 | pty.write.write('hello cat\n'); 448 | } 449 | 450 | // Wait for initial output 451 | await vi.waitFor(() => 452 | buffers.every((buffer) => buffer.toString().includes('hello cat\r\n')), 453 | ); 454 | 455 | // Send EOT to all shells 456 | for (const writeStream of writeStreams) { 457 | writeStream.end(EOT); 458 | } 459 | 460 | // Wait for all shells to exit 461 | await Promise.all( 462 | onExits.map((onExit) => 463 | vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)), 464 | ), 465 | ); 466 | 467 | // Verify results 468 | for (let i = 0; i < 10; i++) { 469 | expect(onExits[i]).toHaveBeenCalledWith(null, 0); 470 | let result = buffers[i].toString(); 471 | if (IS_DARWIN) { 472 | result = result.replace('^D\b\b', ''); 473 | } 474 | expect(result).toStrictEqual(expectedResult); 475 | } 476 | 477 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 478 | }); 479 | 480 | test("doesn't break when executing non-existing binary", async () => { 481 | const oldFds = getOpenFds(); 482 | 483 | await expect(async () => { 484 | new Pty({ 485 | command: 'this-does-not-exist', 486 | onExit: () => {}, 487 | }); 488 | }).rejects.toThrow('No such file or directory'); 489 | 490 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 491 | }); 492 | 493 | test('cannot be written to after closing', async () => { 494 | const oldFds = getOpenFds(); 495 | const onExit = vi.fn(); 496 | const pty = new Pty({ 497 | command: 'echo', 498 | args: ['hello'], 499 | onExit, 500 | }); 501 | 502 | const readStream = pty.read; 503 | const writeStream = pty.write; 504 | 505 | readStream.on('data', () => {}); 506 | 507 | pty.close(); 508 | 509 | assert(!writeStream.writable); 510 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 511 | let receivedError = false; 512 | writeStream.write('hello2', (error) => { 513 | if (error) { 514 | receivedError = true; 515 | } 516 | }); 517 | await vi.waitFor(() => receivedError); 518 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 519 | }); 520 | 521 | test('cannot resize when out of range', async () => { 522 | const oldFds = getOpenFds(); 523 | 524 | const onExit = vi.fn(); 525 | const pty = new Pty({ 526 | command: 'sh', 527 | size: { rows: 24, cols: 80 }, 528 | onExit, 529 | }); 530 | 531 | pty.read.on('data', () => {}); 532 | 533 | expect(() => pty.resize({ rows: 1, cols: -1 })).toThrow(RangeError); 534 | expect(() => pty.resize({ rows: 1, cols: -1 })).toThrow( 535 | /Size \(1x-1\) out of range/, 536 | ); 537 | 538 | expect(() => pty.resize({ rows: 1, cols: 99999 })).toThrow(RangeError); 539 | expect(() => pty.resize({ rows: 1, cols: 99999 })).toThrow( 540 | /Size \(1x99999\) out of range/, 541 | ); 542 | 543 | expect(() => pty.resize({ rows: -1, cols: 1 })).toThrow(RangeError); 544 | expect(() => pty.resize({ rows: -1, cols: 1 })).toThrow( 545 | /Size \(-1x1\) out of range/, 546 | ); 547 | 548 | expect(() => pty.resize({ rows: 99999, cols: 1 })).toThrow(RangeError); 549 | expect(() => pty.resize({ rows: 99999, cols: 1 })).toThrow( 550 | /Size \(99999x1\) out of range/, 551 | ); 552 | 553 | process.kill(pty.pid, 'SIGKILL'); 554 | 555 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 556 | expect(onExit).toHaveBeenCalledWith(null, -1); 557 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 558 | }); 559 | }); 560 | 561 | type CgroupState = 562 | | { 563 | version: 'v1'; 564 | sliceDir: string; // Full path for v1 group using primary subsystem (e.g., "/sys/fs/cgroup/cpu/ruspty-xyz") 565 | originalCgroup?: string; // Path within each hierarchy (e.g., "/user.slice") 566 | sliceName: string; // Base name for the group 567 | moved: boolean; 568 | v1Subsystems: string[]; // List of subsystems to manage (e.g., ['cpu', 'memory']) 569 | } 570 | | { 571 | version: 'v2'; 572 | sliceDir: string; // Full path to the slice dir (e.g., "/sys/fs/cgroup/test.slice") 573 | originalCgroup?: string; // Full path to original cgroup dir 574 | sliceName: string; // The slice file name 575 | moved: boolean; 576 | }; 577 | 578 | async function detectCgroupVersion(): Promise<'v1' | 'v2'> { 579 | const cgroupRaw = (await exec('grep cgroup /proc/filesystems')).stdout.trim(); 580 | if (cgroupRaw.includes('cgroup2')) { 581 | return 'v2'; 582 | } 583 | return 'v1'; 584 | } 585 | 586 | async function getCgroupState(): Promise { 587 | const version = await detectCgroupVersion(); 588 | const CG_ROOT = '/sys/fs/cgroup'; 589 | const sliceBaseName = `ruspty-${Math.random().toString(36).substring(2, 15)}`; 590 | 591 | if (version === 'v2') { 592 | const sliceName = `${sliceBaseName}.scope`; 593 | const sliceDir = join(CG_ROOT, sliceName); 594 | // cgroup v2 raw format: 0::/test-cgroup/test-cgroup-nested 595 | const cgroupRaw = (await exec(`cat /proc/self/cgroup`)).stdout.trim(); 596 | const cgroupPath = cgroupRaw.split(':').pop() || ''; 597 | const originalCgroup = join(CG_ROOT, cgroupPath.replace(/^\//, '')); 598 | 599 | return { 600 | version, 601 | sliceDir, 602 | originalCgroup, 603 | sliceName, 604 | moved: false, 605 | }; 606 | } else { 607 | // Determine available subsystems to manage (common ones like cpu, memory) 608 | const availableSubsystems = readdirSync(CG_ROOT); 609 | const v1Subsystems = ['cpu', 'memory'].filter((sub) => 610 | availableSubsystems.includes(sub), 611 | ); 612 | let originalCgroup = '/'; 613 | // cgroup v1 raw format: 0:cpu:/user.slice/user-1000.slice/session-c1.scope 614 | const cgroupRaw = (await exec(`cat /proc/self/cgroup`)).stdout.trim(); 615 | const memoryLine = cgroupRaw 616 | .split('\n') 617 | .find((line) => line.includes(':memory:')); 618 | const cpuLine = cgroupRaw 619 | .split('\n') 620 | .find((line) => line.includes(':cpu:')); 621 | if (memoryLine) { 622 | originalCgroup = memoryLine.split(':').pop() || '/'; 623 | } else if (cpuLine) { 624 | originalCgroup = cpuLine.split(':').pop() || '/'; 625 | } 626 | 627 | return { 628 | version, 629 | sliceDir: `/sys/fs/cgroup/${v1Subsystems[0]}/${sliceBaseName}`, 630 | originalCgroup, // Path relative to subsystem root 631 | sliceName: sliceBaseName, // Base name 632 | moved: false, 633 | v1Subsystems, 634 | }; 635 | } 636 | } 637 | 638 | async function cgroupInit(cgroupState: CgroupState) { 639 | if (cgroupState.version === 'v2') { 640 | // create the slice - this is the cgroup that will be used for testing 641 | await exec(`sudo mkdir -p ${cgroupState.sliceDir}`); 642 | await exec(`sudo chown -R $(id -u):$(id -g) ${cgroupState.sliceDir}`); 643 | 644 | // add the current process to the slice 645 | // so the spawned pty inherits the slice 646 | await exec( 647 | `echo ${process.pid} | sudo tee ${cgroupState.sliceDir}/cgroup.procs`, 648 | ); 649 | cgroupState.moved = true; 650 | } else { 651 | // cgroup v1 logic 652 | if (!cgroupState.v1Subsystems || cgroupState.v1Subsystems.length === 0) { 653 | throw new Error('No cgroup v1 subsystems found or specified.'); 654 | } 655 | const subsystems = cgroupState.v1Subsystems.join(','); 656 | const groupPath = `/${cgroupState.sliceName}`; 657 | await exec(`sudo cgcreate -g ${subsystems}:${groupPath}`); 658 | // Move the current process into the new cgroup for all specified hierarchies 659 | // This ensures the child process inherits it. 660 | await exec(`sudo cgclassify -g ${subsystems}:${groupPath} ${process.pid}`); 661 | cgroupState.moved = true; 662 | } 663 | } 664 | 665 | async function cgroupCleanup(cgroupState: CgroupState) { 666 | if (cgroupState.version === 'v2') { 667 | // remove the current process from the test slice and return it to its original cgroup 668 | if (cgroupState.moved && cgroupState.originalCgroup) { 669 | await exec( 670 | `echo ${process.pid} | sudo tee ${cgroupState.originalCgroup}/cgroup.procs`, 671 | ); 672 | } 673 | await exec(`sudo rmdir ${cgroupState.sliceDir}`); 674 | } else { 675 | if (!cgroupState.v1Subsystems || cgroupState.v1Subsystems.length === 0) { 676 | // Nothing to clean up if no subsystems were managed 677 | return; 678 | } 679 | const subsystems = cgroupState.v1Subsystems.join(','); 680 | const groupPath = `/${cgroupState.sliceName}`; 681 | // Move the current process back to its original group before deleting the test group. 682 | // We determined originalCgroup path relative to subsystem root earlier. 683 | if (cgroupState.moved && cgroupState.originalCgroup) { 684 | try { 685 | // cgclassify might fail if the original group was removed or perms changed. 686 | await exec( 687 | `sudo cgclassify -g ${subsystems}:${cgroupState.originalCgroup} ${process.pid}`, 688 | ); 689 | } catch (e) { 690 | console.warn( 691 | `Failed to move process ${process.pid} back to original cgroup v1 '${cgroupState.originalCgroup}'.`, 692 | e, 693 | ); 694 | // Attempt to move to root as a fallback, might fail too. 695 | try { 696 | await exec(`sudo cgclassify -g ${subsystems}:/ ${process.pid}`); 697 | } catch {} 698 | } 699 | } 700 | 701 | await exec(`sudo cgdelete -g ${subsystems}:${groupPath}`); 702 | } 703 | } 704 | 705 | describe('cgroup opts', async () => { 706 | let cgroupState: CgroupState | null = null; 707 | beforeEach(async () => { 708 | if (IS_DARWIN) { 709 | return; 710 | } 711 | 712 | cgroupState = await getCgroupState(); 713 | await cgroupInit(cgroupState); 714 | 715 | return async () => { 716 | if (cgroupState) { 717 | await cgroupCleanup(cgroupState); 718 | } 719 | }; 720 | }); 721 | 722 | testSkipOnDarwin('basic cgroup', async () => { 723 | if (cgroupState === null) { 724 | return; 725 | } 726 | 727 | const oldFds = getOpenFds(); 728 | let buffer = ''; 729 | const onExit = vi.fn(); 730 | 731 | const pty = new Pty({ 732 | command: 'cat', 733 | args: ['/proc/self/cgroup'], 734 | cgroupPath: cgroupState.sliceDir, 735 | onExit, 736 | }); 737 | 738 | const readStream = pty.read; 739 | readStream.on('data', (data) => { 740 | buffer = data.toString(); 741 | }); 742 | 743 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 744 | expect(onExit).toHaveBeenCalledWith(null, 0); 745 | // Verify that the process was placed in the correct cgroup by 746 | // checking its output contains our unique slice name 747 | expect(buffer).toContain(cgroupState.sliceName); 748 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 749 | }); 750 | 751 | testOnlyOnDarwin('cgroup is not supported on darwin', async () => { 752 | expect(() => { 753 | new Pty({ 754 | command: 'cat', 755 | args: ['/proc/self/cgroup'], 756 | cgroupPath: '/sys/fs/cgroup/test.slice', 757 | onExit: vi.fn(), 758 | }); 759 | }).toThrowError(); 760 | }); 761 | }); 762 | 763 | describe('sandbox opts', { repeats: 10 }, async () => { 764 | let tempDirPath = '/inexistent/path/before'; 765 | let cgroupState: CgroupState | null; 766 | beforeEach(async () => { 767 | if (IS_DARWIN) { 768 | return; 769 | } 770 | 771 | cgroupState = await getCgroupState(); 772 | await cgroupInit(cgroupState); 773 | tempDirPath = await mkdtemp(join(tmpdir(), 'ruspty-')); 774 | 775 | return async () => { 776 | if (cgroupState) { 777 | await cgroupCleanup(cgroupState); 778 | } 779 | 780 | if (tempDirPath !== '/inexistent/path/before') { 781 | await rm(tempDirPath, { recursive: true }); 782 | tempDirPath = '/inexistent/path/after'; 783 | } 784 | }; 785 | }); 786 | 787 | testSkipOnDarwin('basic sandbox', async () => { 788 | if (cgroupState === null) { 789 | return; 790 | } 791 | 792 | const oldFds = getOpenFds(); 793 | let buffer = ''; 794 | const onExit = vi.fn(); 795 | 796 | const pty = new Pty({ 797 | command: 'sh', 798 | args: ['-c', 'echo hello'], 799 | cgroupPath: cgroupState.sliceDir, 800 | sandbox: { 801 | rules: [ 802 | { 803 | operation: Operation.Modify, 804 | prefixes: [tempDirPath], 805 | message: 'Tried to modify a forbidden path', 806 | }, 807 | { 808 | operation: Operation.Delete, 809 | prefixes: [tempDirPath], 810 | message: 'Tried to delete a forbidden path', 811 | }, 812 | ], 813 | }, 814 | onExit, 815 | }); 816 | 817 | const readStream = pty.read; 818 | readStream.on('data', (data) => { 819 | buffer = data.toString(); 820 | }); 821 | 822 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 823 | expect(onExit).toHaveBeenCalledWith(null, 0); 824 | expect(buffer).toContain('hello'); 825 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 826 | }); 827 | 828 | testSkipOnDarwin('basic protection against git-yeetage', async () => { 829 | if (cgroupState === null) { 830 | return; 831 | } 832 | 833 | const oldFds = getOpenFds(); 834 | let buffer = ''; 835 | const onExit = vi.fn(); 836 | 837 | const gitPath = `${tempDirPath}/.git`; 838 | await mkdir(gitPath); 839 | const pty = new Pty({ 840 | command: 'sh', 841 | args: ['-c', `sh -c "rm -rf ${gitPath}"`], 842 | cgroupPath: cgroupState.sliceDir, 843 | sandbox: { 844 | rules: [ 845 | { 846 | operation: Operation.Delete, 847 | prefixes: [gitPath], 848 | message: 'Tried to delete a forbidden path', 849 | }, 850 | ], 851 | }, 852 | envs: process.env.PATH 853 | ? { 854 | PATH: process.env.PATH, 855 | } 856 | : {}, 857 | onExit, 858 | }); 859 | 860 | const readStream = pty.read; 861 | readStream.on('data', (data) => { 862 | buffer = data.toString(); 863 | }); 864 | 865 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 866 | expect(onExit).toHaveBeenCalledWith(null, 254); 867 | expect(buffer.trimEnd()).toBe( 868 | `Tried to delete a forbidden path: ${gitPath}`, 869 | ); 870 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 871 | }); 872 | 873 | testSkipOnDarwin('can exclude prefixes', async () => { 874 | if (cgroupState === null) { 875 | return; 876 | } 877 | 878 | const oldFds = getOpenFds(); 879 | let buffer = ''; 880 | const onExit = vi.fn(); 881 | 882 | const gitPath = `${tempDirPath}/.git`; 883 | await mkdir(gitPath); 884 | const indexLockPath = `${gitPath}/index.lock`; 885 | await writeFile(indexLockPath, 'locked'); 886 | const pty = new Pty({ 887 | command: 'sh', 888 | args: ['-c', `sh -c "rm -f ${indexLockPath}"`], 889 | cgroupPath: cgroupState.sliceDir, 890 | sandbox: { 891 | rules: [ 892 | { 893 | operation: Operation.Delete, 894 | prefixes: [gitPath], 895 | excludePrefixes: [indexLockPath], 896 | message: 'Tried to delete a forbidden path', 897 | }, 898 | ], 899 | }, 900 | envs: process.env.PATH 901 | ? { 902 | PATH: process.env.PATH, 903 | } 904 | : {}, 905 | onExit, 906 | }); 907 | 908 | const readStream = pty.read; 909 | readStream.on('data', (data) => { 910 | buffer = data.toString(); 911 | }); 912 | 913 | await vi.waitFor(() => expect(onExit).toHaveBeenCalledTimes(1)); 914 | expect(onExit).toHaveBeenCalledWith(null, 0); 915 | expect(buffer.trimEnd()).toBe(''); 916 | await vi.waitFor(() => expect(getOpenFds()).toStrictEqual(oldFds)); 917 | }); 918 | }); 919 | 920 | describe('setCloseOnExec', () => { 921 | test('setCloseOnExec', () => { 922 | // stdio typically never has the close-on-exec flag since it's always expected to be 923 | // inheritable. But just to be safe, we'll keep it as it was when started. 924 | const originalFlag = getCloseOnExec(0); 925 | 926 | for (const closeOnExec of [true, false]) { 927 | setCloseOnExec(0, closeOnExec); 928 | expect(getCloseOnExec(0)).toBe(closeOnExec); 929 | } 930 | 931 | setCloseOnExec(0, originalFlag); 932 | }); 933 | }); 934 | -------------------------------------------------------------------------------- /src/sandbox.rs: -------------------------------------------------------------------------------- 1 | /// A minimalistic ptrace-based sandbox. 2 | /// 3 | /// Modern (2025-era) sandboxes should use seccomp-bpf + user notification, but at Replit, there's 4 | /// already one such sandbox in use, so it cannot be used. Instead, an old (2000s-era) ptrace-based 5 | /// sandbox is used. It is not intended to be secure, just to prevent accidents. 6 | /// 7 | /// Note that it is important for this whole library to consistently use [nix::libc::_exit] instead 8 | /// of [std::process:exit], because the latter runs atexit handlers, which will cause the process 9 | /// to segfault. 10 | use std::ffi::CStr; 11 | use std::fs::read_link; 12 | use std::panic::catch_unwind; 13 | use std::path::PathBuf; 14 | 15 | use anyhow::{Context, Result}; 16 | use log::{debug, error}; 17 | use nix::fcntl::OFlag; 18 | use nix::libc::{self, c_int}; 19 | use nix::sys::prctl::set_name; 20 | use nix::sys::ptrace; 21 | use nix::sys::signal::{kill, raise, signal, sigprocmask, SigSet, SigmaskHow, Signal}; 22 | use nix::sys::wait::{wait, waitpid, WaitStatus}; 23 | use nix::unistd::{fork, ForkResult, Pid}; 24 | use nix::Error; 25 | use syscalls::x86_64::Sysno; 26 | 27 | const AT_FDCWD: u64 = 0xffffff9c; 28 | const AT_FDCWD64: u64 = 0xffffffffffffff9c; 29 | 30 | static mut CHILD_PID: Pid = Pid::from_raw(-1); 31 | 32 | /// Read a path (a NUL-terminated string) from the tracee. 33 | fn read_path(pid: Pid, mut addr: u64) -> Result { 34 | // All reads must be word-aligned. 35 | const ALIGNMENT: u64 = 0x7; 36 | let mut buf = Vec::::with_capacity(1024); 37 | let mut offset = (addr & ALIGNMENT) as usize; 38 | addr &= !ALIGNMENT; 39 | // We should limit ourselves to MAX_PATH, but we'll add quite a bit of leeway. 40 | while buf.len() < 8_192 { 41 | match ptrace::read(pid, addr as ptrace::AddressType) { 42 | Ok(ret) => { 43 | let bytes = ret.to_ne_bytes(); 44 | let (slice, last) = match bytes.as_slice()[offset..].iter().position(|x| *x == 0) { 45 | Some(end) => (&bytes.as_slice()[offset..offset + end], true), 46 | None => (&bytes.as_slice()[offset..], false), 47 | }; 48 | buf.extend_from_slice(slice); 49 | if last { 50 | return Ok(PathBuf::from( 51 | String::from_utf8(buf).context("decode string")?, 52 | )); 53 | } 54 | offset = 0; 55 | addr += 8; 56 | } 57 | Err(Error::ESRCH) => { 58 | return Err(anyhow::Error::new(Error::ESRCH)) 59 | .with_context(|| format!("process exited: {pid}")); 60 | } 61 | Err(err) => { 62 | return Err(anyhow::Error::new(err)) 63 | .with_context(|| format!("failed to read string: {pid} at 0x{addr:x}")); 64 | } 65 | } 66 | } 67 | anyhow::bail!("path exceeds MAX_PATH"); 68 | } 69 | 70 | /// Get the tracee's cwd. 71 | fn get_cwd(pid: Pid) -> Result { 72 | read_link(format!("/proc/{}/cwd", pid)).with_context(|| format!("get cwd: /proc/{pid}/cwd")) 73 | } 74 | 75 | /// Get the tracee's path for a file descriptor. 76 | fn get_fd_path(pid: Pid, fd: i32) -> Result { 77 | read_link(format!("/proc/{}/fd/{}", pid, fd)) 78 | .with_context(|| format!("get path: /proc/{pid}/fd/{fd}")) 79 | } 80 | 81 | struct SyscallTarget { 82 | operation: Operation, 83 | sysno: Sysno, 84 | path: PathBuf, 85 | } 86 | 87 | /// Get the tracee's target path for the syscall that is about to be executed by the kernel. 88 | fn get_syscall_targets(pid: Pid) -> Result> { 89 | let regs = match ptrace::getregs(pid) { 90 | Ok(regs) => regs, 91 | Err(Error::ESRCH) => return Ok(vec![]), // process gone, no targets 92 | Err(err) => return Err(err).context("ptrace::getregs"), 93 | }; 94 | if regs.rax != (-(Error::ENOSYS as i32)) as u64 { 95 | // This is a syscall-exit-stop, and we have already made the decision of allowing / denying the operation. 96 | return Ok(vec![]); 97 | } 98 | match Sysno::new(regs.orig_rax as usize) { 99 | Some(sysno @ Sysno::open) => { 100 | let mut path = get_cwd(pid).context("open: get cwd")?; 101 | path.push(read_path(pid, regs.rdi as u64).context("open: read path")?); 102 | debug!(pid:? = pid, filename:?= path, sysno:?=sysno; "syscall"); 103 | let accmode = (regs.rsi & OFlag::O_ACCMODE.bits() as u64) as c_int; 104 | if accmode != OFlag::O_WRONLY.bits() && accmode != OFlag::O_RDWR.bits() { 105 | return Ok(vec![]); 106 | } 107 | Ok(vec![SyscallTarget { 108 | operation: Operation::Modify, 109 | sysno, 110 | path, 111 | }]) 112 | } 113 | Some(sysno @ Sysno::truncate) => { 114 | let mut path = get_cwd(pid).context("truncate: get cwd")?; 115 | path.push(read_path(pid, regs.rdi as u64).context("truncate: read path")?); 116 | debug!(pid:? = pid, filename:?= path, sysno:?=sysno; "syscall"); 117 | Ok(vec![SyscallTarget { 118 | operation: Operation::Modify, 119 | sysno, 120 | path, 121 | }]) 122 | } 123 | Some(sysno @ Sysno::rmdir) => { 124 | let mut path = get_cwd(pid).context("rmdir: get cwd")?; 125 | path.push(read_path(pid, regs.rdi as u64).context("rmdir: read path")?); 126 | debug!(pid:? = pid, filename:?= path, sysno:?=sysno; "syscall"); 127 | Ok(vec![SyscallTarget { 128 | operation: Operation::Delete, 129 | sysno, 130 | path, 131 | }]) 132 | } 133 | Some(sysno @ Sysno::rename) => { 134 | let cwd = get_cwd(pid).context("rename: get cwd")?; 135 | let oldname = cwd.join(read_path(pid, regs.rdi as u64).context("rename: read oldname")?); 136 | let newname = cwd.join(read_path(pid, regs.rsi as u64).context("rename: read newname")?); 137 | debug!(pid:? = pid, oldname:?= oldname, newname:? = newname, sysno:?=sysno; "syscall"); 138 | Ok(vec![ 139 | SyscallTarget { 140 | operation: Operation::Delete, 141 | sysno, 142 | path: oldname, 143 | }, 144 | SyscallTarget { 145 | operation: Operation::Modify, 146 | sysno, 147 | path: newname, 148 | }, 149 | ]) 150 | } 151 | Some(sysno @ Sysno::creat) => { 152 | let mut path = get_cwd(pid).context("creat: get cwd")?; 153 | path.push(read_path(pid, regs.rdi as u64).context("creat: read path")?); 154 | debug!(pid:? = pid, filename:?= path, sysno:?=sysno; "syscall"); 155 | Ok(vec![SyscallTarget { 156 | operation: Operation::Modify, 157 | sysno, 158 | path, 159 | }]) 160 | } 161 | Some(sysno @ Sysno::link) => { 162 | let cwd = get_cwd(pid).context("link: get cwd")?; 163 | let oldname = cwd.join(read_path(pid, regs.rdi as u64).context("link: read oldname")?); 164 | let newname = cwd.join(read_path(pid, regs.rsi as u64).context("link: read newname")?); 165 | debug!(pid:? = pid, oldname:?= oldname, newname:? = newname, sysno:?=sysno; "syscall"); 166 | Ok(vec![SyscallTarget { 167 | operation: Operation::Modify, 168 | sysno, 169 | path: newname, 170 | }]) 171 | } 172 | Some(sysno @ Sysno::unlink) => { 173 | let mut path = get_cwd(pid).context("unlink: get cwd")?; 174 | path.push(read_path(pid, regs.rdi as u64).context("unlink: read path")?); 175 | debug!(pid:? = pid, filename:?= path, sysno:?=sysno; "syscall"); 176 | Ok(vec![SyscallTarget { 177 | operation: Operation::Delete, 178 | sysno, 179 | path, 180 | }]) 181 | } 182 | Some(sysno @ Sysno::symlink) => { 183 | let cwd = get_cwd(pid).context("symlink: get cwd")?; 184 | let oldname = cwd.join(read_path(pid, regs.rdi as u64).context("symlink: read oldname")?); 185 | let newname = cwd.join(read_path(pid, regs.rsi as u64).context("symlink: read newname")?); 186 | debug!(pid:? = pid, oldname:?= oldname, newname:? = newname, sysno:?=sysno; "syscall"); 187 | Ok(vec![SyscallTarget { 188 | operation: Operation::Modify, 189 | sysno, 190 | path: newname, 191 | }]) 192 | } 193 | Some(sysno @ Sysno::openat) => { 194 | let mut path = match regs.rdi { 195 | AT_FDCWD64 | AT_FDCWD => get_cwd(pid).context("openat: get cwd")?, 196 | dirfd => get_fd_path(pid, dirfd as i32) 197 | .with_context(|| format!("openat: get fd path {:x}", regs.rdi))?, 198 | }; 199 | path.push(read_path(pid, regs.rsi as u64)?); 200 | debug!(pid:? = pid, filename:?= path, sysno:?=sysno; "syscall"); 201 | let accmode = (regs.rdx & OFlag::O_ACCMODE.bits() as u64) as c_int; 202 | if accmode != OFlag::O_WRONLY.bits() && accmode != OFlag::O_RDWR.bits() { 203 | return Ok(vec![]); 204 | } 205 | Ok(vec![SyscallTarget { 206 | operation: Operation::Modify, 207 | sysno, 208 | path, 209 | }]) 210 | } 211 | Some(sysno @ Sysno::unlinkat) => { 212 | let mut path = match regs.rdi { 213 | AT_FDCWD64 | AT_FDCWD => get_cwd(pid).context("unlinkat: get cwd")?, 214 | dirfd => get_fd_path(pid, dirfd as i32) 215 | .with_context(|| format!("unlinkat: get fd path {:x}", regs.rdi))?, 216 | }; 217 | path.push(read_path(pid, regs.rsi as u64)?); 218 | debug!(pid:? = pid, filename:?= path, sysno:?=sysno; "syscall"); 219 | Ok(vec![SyscallTarget { 220 | operation: Operation::Delete, 221 | sysno, 222 | path, 223 | }]) 224 | } 225 | Some(sysno @ Sysno::renameat) => { 226 | let mut oldname = match regs.rdi { 227 | AT_FDCWD64 | AT_FDCWD => get_cwd(pid).context("renameat: get old cwd")?, 228 | dirfd => get_fd_path(pid, dirfd as i32) 229 | .with_context(|| format!("renameat: get old fd path {:x}", regs.rdi))?, 230 | }; 231 | oldname.push(read_path(pid, regs.rsi as u64).context("renameat: get old path")?); 232 | let mut newname = match regs.rdx { 233 | AT_FDCWD64 | AT_FDCWD => get_cwd(pid).context("renameat: get new cwd")?, 234 | dirfd => get_fd_path(pid, dirfd as i32) 235 | .with_context(|| format!("renameat: get new fd path {:x}", regs.rdi))?, 236 | }; 237 | newname.push(read_path(pid, regs.r10 as u64).context("renameat: get new path")?); 238 | debug!(pid:? = pid, oldname:?= oldname, newname:? = newname, sysno:?=sysno; "syscall"); 239 | Ok(vec![ 240 | SyscallTarget { 241 | operation: Operation::Delete, 242 | sysno, 243 | path: oldname, 244 | }, 245 | SyscallTarget { 246 | operation: Operation::Modify, 247 | sysno, 248 | path: newname, 249 | }, 250 | ]) 251 | } 252 | Some(sysno @ Sysno::linkat) => { 253 | let mut oldpath = match regs.rdi { 254 | AT_FDCWD64 | AT_FDCWD => get_cwd(pid).context("linkat: get old cwd")?, 255 | dirfd => get_fd_path(pid, dirfd as i32) 256 | .with_context(|| format!("linkat: get old fd path {:x}", regs.rdi))?, 257 | }; 258 | oldpath.push(read_path(pid, regs.rsi as u64).context("linkat: get old path")?); 259 | let mut newpath = match regs.rdx { 260 | AT_FDCWD64 | AT_FDCWD => get_cwd(pid).context("linkat: get new cwd")?, 261 | dirfd => get_fd_path(pid, dirfd as i32) 262 | .with_context(|| format!("linkat: get new fd path {:x}", regs.rdi))?, 263 | }; 264 | newpath.push(read_path(pid, regs.rsi as u64).context("linkat: get new path")?); 265 | debug!(pid:? = pid, oldpath:?= oldpath, newpath:? = newpath, sysno:?=sysno; "syscall"); 266 | Ok(vec![SyscallTarget { 267 | operation: Operation::Modify, 268 | sysno, 269 | path: newpath, 270 | }]) 271 | } 272 | Some(sysno @ Sysno::symlinkat) => { 273 | let cwd = get_cwd(pid).context("symlinkat: get cwd")?; 274 | let oldname = cwd.join(read_path(pid, regs.rdi as u64).context("symlinkat: read oldname")?); 275 | let mut newname = match regs.rsi { 276 | AT_FDCWD64 | AT_FDCWD => get_cwd(pid).context("symlinkat: get new cwd")?, 277 | dirfd => get_fd_path(pid, dirfd as i32) 278 | .with_context(|| format!("symlinkat: get newfd path {:x}", regs.rsi))?, 279 | }; 280 | newname.push(read_path(pid, regs.rdx as u64).context("symlinkat: get newname")?); 281 | debug!(pid:?=pid, oldname:?=oldname, newname:?=newname, sysno:?=sysno; "syscall"); 282 | Ok(vec![SyscallTarget { 283 | operation: Operation::Modify, 284 | sysno, 285 | path: newname, 286 | }]) 287 | } 288 | Some(sysno @ Sysno::renameat2) => { 289 | let mut oldpath = match regs.rdi { 290 | AT_FDCWD64 | AT_FDCWD => get_cwd(pid).context("renameat2: get old cwd")?, 291 | dirfd => get_fd_path(pid, dirfd as i32) 292 | .with_context(|| format!("renameat2: get old fd path {:x}", regs.rdi))?, 293 | }; 294 | oldpath.push(read_path(pid, regs.rsi as u64).context("renameat2: get old path")?); 295 | let mut newpath = match regs.rdx { 296 | AT_FDCWD64 | AT_FDCWD => get_cwd(pid).context("renameat2: get new cwd")?, 297 | dirfd => get_fd_path(pid, dirfd as i32) 298 | .with_context(|| format!("renameat2: get new fd path {:x}", regs.rdi))?, 299 | }; 300 | newpath.push(read_path(pid, regs.r10 as u64).context("renameat2: get new path")?); 301 | debug!(pid:? = pid, oldpath:?= oldpath, newpath:? = newpath, sysno:?=sysno; "syscall"); 302 | Ok(vec![ 303 | SyscallTarget { 304 | operation: Operation::Delete, 305 | sysno, 306 | path: oldpath, 307 | }, 308 | SyscallTarget { 309 | operation: Operation::Modify, 310 | sysno, 311 | path: newpath, 312 | }, 313 | ]) 314 | } 315 | Some(sysno @ Sysno::openat2) => { 316 | let mut path = match regs.rdi { 317 | AT_FDCWD64 | AT_FDCWD => get_cwd(pid).context("openat2: get cwd")?, 318 | dirfd => get_fd_path(pid, dirfd as i32) 319 | .with_context(|| format!("openat2: get fd path {:x}", regs.rdi))?, 320 | }; 321 | path.push(read_path(pid, regs.rsi as u64)?); 322 | let accmode = (regs.rdx & OFlag::O_ACCMODE.bits() as u64) as c_int; 323 | if accmode != OFlag::O_WRONLY.bits() && accmode != OFlag::O_RDWR.bits() { 324 | return Ok(vec![]); 325 | } 326 | debug!(pid:? = pid, filename:?= path, sysno:?=sysno; "syscall"); 327 | Ok(vec![SyscallTarget { 328 | operation: Operation::Modify, 329 | sysno, 330 | path, 331 | }]) 332 | } 333 | Some(sysno) => { 334 | debug!(pid:? = pid, sysno:?=sysno.name(); "syscall"); 335 | 336 | Ok(vec![]) 337 | } 338 | None => { 339 | // We don't know what this is. 340 | Ok(vec![]) 341 | } 342 | } 343 | } 344 | 345 | #[derive(Debug, PartialEq, Eq)] 346 | pub struct SandboxError { 347 | sysno: Sysno, 348 | message: String, 349 | path: PathBuf, 350 | } 351 | 352 | impl std::fmt::Display for SandboxError { 353 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 354 | write!(f, "{}: {}", self.message, self.path.display()) 355 | } 356 | } 357 | 358 | impl std::error::Error for SandboxError { 359 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 360 | None 361 | } 362 | } 363 | 364 | /// Inspect the tracee's syscall that is about to be executed. 365 | fn handle_syscall(pid: Pid, options: &Options) -> Result<()> { 366 | for target in get_syscall_targets(pid).context("get_target_path")? { 367 | let path_str = match target.path.as_path().to_str() { 368 | Some(path_str) => path_str, 369 | None => { 370 | continue; 371 | } 372 | }; 373 | for rule in &options.rules { 374 | if target.operation != rule.operation { 375 | continue; 376 | } 377 | 378 | // Check if path matches any prefix 379 | let matches_prefix = rule 380 | .prefixes 381 | .iter() 382 | .any(|prefix| path_str.starts_with(prefix)); 383 | if !matches_prefix { 384 | continue; 385 | } 386 | 387 | // Path matches operation and a prefix, now check excludes 388 | if let Some(exclude_prefixes) = &rule.exclude_prefixes { 389 | let matches_exclude = exclude_prefixes 390 | .iter() 391 | .any(|exclude| path_str.starts_with(exclude)); 392 | if matches_exclude { 393 | continue; // This rule doesn't apply due to exclude 394 | } 395 | } 396 | 397 | // Rule applies - return error 398 | return Err( 399 | SandboxError { 400 | sysno: target.sysno, 401 | message: rule.message.clone(), 402 | path: target.path, 403 | } 404 | .into(), 405 | ); 406 | } 407 | } 408 | 409 | Ok(()) 410 | } 411 | 412 | extern "C" fn forward_signal(signum: c_int) { 413 | debug!(signum; "received signal"); 414 | if let Ok(signal) = Signal::try_from(signum) { 415 | let err = unsafe { kill(CHILD_PID, signal) }; 416 | debug!(signum, err:? = err, pid:? = unsafe { CHILD_PID }; "sent signal"); 417 | } 418 | } 419 | 420 | /// Run the tracee under the sandbox. 421 | fn run_parent(main_pid: Pid, options: &Options) -> Result { 422 | set_name(CStr::from_bytes_with_nul(b"sandbox\0").context("create process name")?) 423 | .context("set process name")?; 424 | unsafe { 425 | CHILD_PID = main_pid; 426 | // Forward all signals to the child process. 427 | for signum in Signal::iterator() { 428 | if signum == Signal::SIGKILL || signum == Signal::SIGCHLD || signum == Signal::SIGSTOP { 429 | continue; 430 | } 431 | match signal( 432 | signum, 433 | nix::sys::signal::SigHandler::Handler(forward_signal), 434 | ) { 435 | Ok(_) => {} 436 | Err(err) => { 437 | return Err(err).with_context(|| { 438 | format!( 439 | "failed to install signal handler for {:?}: {:?}", 440 | signum, err 441 | ) 442 | }); 443 | } 444 | } 445 | } 446 | 447 | // Close all open file descriptors, except stderr. 448 | let close_range_flags: c_int = 0; 449 | libc::syscall(libc::SYS_close_range, 0, 1, close_range_flags); 450 | libc::syscall( 451 | libc::SYS_close_range, 452 | 3, 453 | libc::c_uint::MAX, 454 | close_range_flags, 455 | ); 456 | } 457 | 458 | // The child process will send a SIGCHLD. 459 | match waitpid(main_pid, None).with_context(|| format!("waitpid {main_pid}"))? { 460 | WaitStatus::Exited(_, status_code) => { 461 | return Ok(status_code); 462 | } 463 | WaitStatus::Signaled(_, sig_num, _core_dump) => { 464 | return Ok(128 + sig_num as i32); 465 | } 466 | WaitStatus::Stopped(..) 467 | | WaitStatus::Continued(..) 468 | | WaitStatus::StillAlive 469 | | WaitStatus::PtraceEvent(..) 470 | | WaitStatus::PtraceSyscall(..) => {} 471 | } 472 | match ptrace::setoptions( 473 | main_pid, 474 | ptrace::Options::PTRACE_O_TRACESYSGOOD 475 | | ptrace::Options::PTRACE_O_TRACEFORK 476 | | ptrace::Options::PTRACE_O_TRACEVFORK 477 | | ptrace::Options::PTRACE_O_TRACECLONE 478 | | ptrace::Options::PTRACE_O_EXITKILL 479 | | ptrace::Options::PTRACE_O_TRACEEXIT, 480 | ) { 481 | Ok(_) => {} 482 | Err(Error::ESRCH) => { 483 | // The child process has already exited. 484 | return Ok(0); 485 | } 486 | Err(err) => { 487 | return Err(err).context("ptrace::setoptions"); 488 | } 489 | } 490 | match ptrace::syscall(main_pid, None) { 491 | Ok(_) => {} 492 | Err(Error::ESRCH) => { 493 | // The child process has already exited. 494 | return Ok(0); 495 | } 496 | Err(err) => { 497 | return Err(err).context("failed to continue process"); 498 | } 499 | } 500 | 501 | loop { 502 | match wait() { 503 | Ok(WaitStatus::Stopped(pid, sig_num)) => match sig_num { 504 | signum @ Signal::SIGTRAP => { 505 | debug!(signal:?=signum, pid:? = pid; "signal"); 506 | match handle_syscall(pid, options) 507 | .with_context(|| format!("handle_sigtrap pid={pid}, signum={signum}")) 508 | { 509 | Ok(()) => match ptrace::syscall(pid, None) { 510 | Ok(_) => {} 511 | Err(Error::ESRCH) => {} 512 | Err(err) => { 513 | return Err(anyhow::Error::new(err).context("failed to continue process")); 514 | } 515 | }, 516 | Err(err) => { 517 | match ptrace::kill(pid) { 518 | Ok(_) => {} 519 | Err(Error::ESRCH) => {} 520 | Err(err) => { 521 | error!(pid:? = pid, error:?=err; "failed to kill process"); 522 | } 523 | } 524 | return Err(err); 525 | } 526 | } 527 | } 528 | signum @ Signal::SIGSTOP => { 529 | debug!(signal:?=signum, pid:? = pid; "signal"); 530 | match ptrace::setoptions( 531 | pid, 532 | ptrace::Options::PTRACE_O_TRACESYSGOOD 533 | | ptrace::Options::PTRACE_O_TRACEFORK 534 | | ptrace::Options::PTRACE_O_TRACEVFORK 535 | | ptrace::Options::PTRACE_O_TRACECLONE 536 | | ptrace::Options::PTRACE_O_EXITKILL 537 | | ptrace::Options::PTRACE_O_TRACEEXIT, 538 | ) { 539 | Ok(_) => {} 540 | Err(Error::ESRCH) => {} 541 | Err(err) => { 542 | return Err(anyhow::Error::new(err).context("setoptions")); 543 | } 544 | } 545 | match ptrace::syscall(pid, None) { 546 | Ok(_) => {} 547 | Err(Error::ESRCH) => {} 548 | Err(err) => { 549 | return Err(anyhow::Error::new(err).context("failed to continue process")); 550 | } 551 | } 552 | } 553 | signum => { 554 | debug!(signal:?=signum, pid:? = pid; "signal"); 555 | match ptrace::syscall(pid, Some(signum)) { 556 | Ok(_) => {} 557 | Err(Error::ESRCH) => {} 558 | Err(err) => { 559 | return Err(anyhow::Error::new(err).context("failed to continue process")); 560 | } 561 | } 562 | } 563 | }, 564 | 565 | Ok(WaitStatus::PtraceSyscall(pid)) => { 566 | match handle_syscall(pid, options).with_context(|| format!("handle_syscall pid={pid}")) { 567 | Ok(()) => match ptrace::syscall(pid, None) { 568 | Ok(_) => {} 569 | Err(Error::ESRCH) => {} 570 | Err(err) => { 571 | return Err(anyhow::Error::new(err).context("failed to continue process")); 572 | } 573 | }, 574 | Err(err) => { 575 | match ptrace::kill(pid) { 576 | Ok(_) => {} 577 | Err(Error::ESRCH) => {} 578 | Err(err) => { 579 | error!(pid:? = pid, error:?=err; "failed to kill process"); 580 | } 581 | } 582 | return Err(err); 583 | } 584 | } 585 | } 586 | 587 | Ok(WaitStatus::PtraceEvent(pid, _sig_num, _data)) => match ptrace::syscall(pid, None) { 588 | Ok(_) => {} 589 | Err(Error::ESRCH) => {} 590 | Err(err) => { 591 | return Err(anyhow::Error::new(err).context("failed to continue process")); 592 | } 593 | }, 594 | 595 | Ok(WaitStatus::Exited(pid, exit_status)) => { 596 | debug!(pid:? = pid, exit_status:? = exit_status; "exited"); 597 | if pid == main_pid { 598 | return Ok(exit_status); 599 | } 600 | } 601 | 602 | Ok(WaitStatus::Signaled(pid, sig_num, _core_dump)) => { 603 | debug!(pid:? = pid, signal:? = sig_num; "signaled"); 604 | if pid == main_pid { 605 | return Ok(128 + sig_num as i32); 606 | } 607 | match ptrace::syscall(pid, Some(sig_num)) { 608 | Ok(_) => {} 609 | Err(Error::ESRCH) => {} 610 | Err(err) => { 611 | return Err(anyhow::Error::new(err).context("failed to continue process")); 612 | } 613 | } 614 | } 615 | 616 | Ok(status) => { 617 | debug!(pid:? = main_pid, status:? = status; "wait"); 618 | match ptrace::syscall(main_pid, None) { 619 | Ok(_) => {} 620 | Err(Error::ESRCH) => {} 621 | Err(err) => { 622 | return Err(anyhow::Error::new(err).context("failed to continue process")); 623 | } 624 | } 625 | } 626 | 627 | Err(Error::ECHILD) => { 628 | // No more children! We're done. 629 | break; 630 | } 631 | 632 | Err(err) => { 633 | error!("Some kind of error - {:?}", err); 634 | break; 635 | } 636 | } 637 | } 638 | 639 | Ok(0) 640 | } 641 | 642 | #[derive(PartialEq, Eq, Clone)] 643 | pub enum Operation { 644 | Modify, 645 | Delete, 646 | } 647 | 648 | /// Sandboxing rules. Deleting / modifying a path with any of the prefixes is forbidden and will 649 | /// cause process termination. 650 | #[derive(Clone)] 651 | pub struct Rule { 652 | /// The forbidden operation. 653 | pub operation: Operation, 654 | /// The list of prefixes that are matched by this rule. 655 | pub prefixes: Vec, 656 | /// The list of prefixes that are excluded from this rule. 657 | pub exclude_prefixes: Option>, 658 | /// The message to be shown if this rule triggers. 659 | pub message: String, 660 | } 661 | 662 | /// Options for the sandbox. 663 | #[derive(Clone)] 664 | pub struct Options { 665 | pub rules: Vec, 666 | } 667 | 668 | /// Install a sandbox in "the current process". 669 | /// 670 | /// In reality this forks the process and the child process is the one that is run under the sandbox. 671 | /// The parent process is not accessible and is the one that actually runs the sandbox. 672 | /// This is intended to be used as a "pre-execve" hook. 673 | /// 674 | /// Modifying the forbidden paths / unlinking the forbidden prefixes will result in the sandboxed process being killed. 675 | pub fn install_sandbox(options: Options) -> Result<()> { 676 | // Reset signal handlers 677 | for signum in Signal::iterator() { 678 | if signum == Signal::SIGKILL || signum == Signal::SIGCHLD || signum == Signal::SIGSTOP { 679 | continue; 680 | } 681 | match unsafe { signal(signum, nix::sys::signal::SigHandler::SigDfl) } { 682 | Ok(_) => {} 683 | Err(err) => { 684 | return Err(err).with_context(|| { 685 | format!( 686 | "failed to install signal handler for {:?}: {:?}", 687 | signum, err 688 | ) 689 | }); 690 | } 691 | } 692 | } 693 | sigprocmask(SigmaskHow::SIG_SETMASK, Some(&SigSet::empty()), None).context("sigprocmask")?; 694 | 695 | match unsafe { fork() }.context("fork")? { 696 | ForkResult::Child => { 697 | ptrace::traceme().context("ptrace::traceme")?; 698 | raise(Signal::SIGSTOP).context("raise SIGSTOP")?; 699 | 700 | Ok(()) 701 | } 702 | 703 | ForkResult::Parent { child } => { 704 | let err = catch_unwind(|| { 705 | let status_code = match run_parent(child, &options).context("run_parent") { 706 | Ok(result) => result, 707 | Err(err) => match err.downcast_ref::() { 708 | Some(err) => { 709 | eprintln!("{}", err); 710 | 254 711 | } 712 | None => { 713 | eprintln!("run process: {:?}", err); 714 | 1 715 | } 716 | }, 717 | }; 718 | 719 | unsafe { libc::_exit(status_code) }; 720 | }); 721 | if err.is_ok() { 722 | unsafe { libc::_exit(0) }; 723 | } else { 724 | eprintln!("{:#?}", err); 725 | unsafe { libc::_exit(253) }; 726 | } 727 | } 728 | } 729 | } 730 | 731 | #[cfg(test)] 732 | mod tests { 733 | use super::*; 734 | 735 | use std::ffi::c_void; 736 | use std::fs::{read, read_dir, File}; 737 | use std::os::fd::AsRawFd; 738 | use std::os::unix::process::CommandExt; 739 | use std::path::Path; 740 | use std::process::Command; 741 | 742 | use nix::sys::wait::waitpid; 743 | use nix::unistd::{dup2, getppid}; 744 | use tempfile::TempDir; 745 | 746 | fn test_install_sandbox(child: fn() -> !, tempdir: &Path) -> Result<(i32, String, String)> { 747 | let stdout_path = tempdir.join("stdout.txt"); 748 | let stdout_file = File::create(&stdout_path).context("create stdout")?; 749 | let stderr_path = tempdir.join("stderr.txt"); 750 | let stderr_file = File::create(&stderr_path).context("create stderr")?; 751 | 752 | // We do a double-fork so that the tracer exists in its own little process. That lets the PTRACE_O_EXITKILL magic kick in. 753 | match unsafe { fork() }.context("fork")? { 754 | ForkResult::Child => { 755 | let err = catch_unwind(|| { 756 | if let Err(err) = dup2(stdout_file.as_raw_fd(), 1) { 757 | eprintln!("failed to redirect stdout: {err}"); 758 | unsafe { libc::_exit(2) }; 759 | } 760 | drop(stdout_file); 761 | if let Err(err) = dup2(stderr_file.as_raw_fd(), 2) { 762 | eprintln!("failed to redirect stderr: {err}"); 763 | unsafe { libc::_exit(3) }; 764 | } 765 | drop(stderr_file); 766 | 767 | if let Err(err) = install_sandbox(Options { 768 | rules: vec![ 769 | Rule { 770 | operation: Operation::Modify, 771 | prefixes: vec![ 772 | "/home/runner/workspace/.replit".to_string(), 773 | "/home/runner/workspace/replit.nix".to_string(), 774 | "/home/runner/workspace/.git/refs/replit/agent-ledger".to_string(), 775 | ], 776 | exclude_prefixes: None, 777 | message: "Tried to modify a forbidden path".to_string(), 778 | }, 779 | Rule { 780 | operation: Operation::Delete, 781 | prefixes: vec!["/home/runner/workspace/.git/".to_string()], 782 | exclude_prefixes: Some(vec!["/home/runner/workspace/.git/index.lock".to_string()]), 783 | message: "Tried to delete a forbidden path".to_string(), 784 | }, 785 | ], 786 | }) { 787 | eprintln!("failed to fork sandbox: {err}"); 788 | unsafe { libc::_exit(4) }; 789 | } 790 | child() 791 | }); 792 | if err.is_ok() { 793 | unsafe { libc::_exit(0) }; 794 | } else { 795 | eprintln!("{:#?}", err); 796 | unsafe { libc::_exit(253) }; 797 | } 798 | } 799 | ForkResult::Parent { child } => { 800 | drop(stdout_file); 801 | drop(stderr_file); 802 | 803 | let wait_status = match waitpid(child, None) { 804 | Ok(WaitStatus::Exited(_pid, exit_status)) => exit_status, 805 | Ok(wait_status) => { 806 | panic!("unexpected wait status: {:#?}", wait_status); 807 | } 808 | Err(err) => { 809 | panic!("unexpected wait error: {:#?}", err); 810 | } 811 | }; 812 | let stdout = String::from_utf8( 813 | read(&stdout_path).with_context(|| format!("read {:#?}", stdout_path))?, 814 | ) 815 | .context("decode stdout")?; 816 | let stderr = String::from_utf8( 817 | read(&stderr_path).with_context(|| format!("read {:#?}", stderr_path))?, 818 | ) 819 | .context("decode stderr")?; 820 | Ok((wait_status, stdout, stderr)) 821 | } 822 | } 823 | } 824 | 825 | #[test] 826 | fn it_lets_safe_commands_proceed() { 827 | fn exec_hook() -> ! { 828 | let err = Command::new("bash").args(["-c", "echo hello"]).exec(); 829 | eprintln!("failed to exec: {err:#?}"); 830 | unsafe { libc::_exit(1) }; 831 | } 832 | 833 | let tmp_dir = 834 | TempDir::with_prefix("pid2sandbox-").expect("Failed to create temporary directory"); 835 | assert_eq!( 836 | test_install_sandbox(exec_hook, tmp_dir.path()).expect("test_install_sandbox"), 837 | (0, "hello\n".to_string(), "".to_string()) 838 | ); 839 | } 840 | 841 | #[test] 842 | fn it_doesnt_leak_fds() { 843 | fn exec_hook() -> ! { 844 | let self_fds = read_dir("/proc/self/fd") 845 | .expect("read fds") 846 | .map(|res| res.map(|e| e.file_name())) 847 | .collect::, std::io::Error>>() 848 | .expect("get paths"); 849 | let parent_fds = read_dir(format!("/proc/{}/fd", getppid())) 850 | .expect("read fds") 851 | .map(|res| res.map(|e| e.file_name())) 852 | .collect::, std::io::Error>>() 853 | .expect("get paths"); 854 | let result = format!("parent={:?}\nself={:?}\n", parent_fds, self_fds); 855 | unsafe { libc::write(2, result.as_bytes().as_ptr() as *const c_void, result.len()) }; 856 | unsafe { libc::_exit(0) }; 857 | } 858 | 859 | let tmp_dir = 860 | TempDir::with_prefix("pid2sandbox-").expect("Failed to create temporary directory"); 861 | // The parent should only contain stderr. The child should only contain the three stdio fds 862 | // plus a fourth fd: the one opening /proc/self/fd. 863 | assert_eq!( 864 | test_install_sandbox(exec_hook, tmp_dir.path()).expect("test_install_sandbox"), 865 | ( 866 | 0, 867 | "".to_string(), 868 | "parent=[\"2\"]\nself=[\"0\", \"1\", \"2\", \"3\"]\n".to_string() 869 | ) 870 | ); 871 | } 872 | 873 | #[test] 874 | fn it_prevents_modifying_dot_replit() { 875 | fn exec_hook() -> ! { 876 | std::fs::write("/home/runner/workspace/.replit", "yo").expect("write .replit"); 877 | unsafe { libc::_exit(0) }; 878 | } 879 | 880 | let tmp_dir = 881 | TempDir::with_prefix("pid2sandbox-").expect("Failed to create temporary directory"); 882 | // Cargo captures the error message, but we only care about the exit code. 883 | let (exit_status, _, _) = 884 | test_install_sandbox(exec_hook, tmp_dir.path()).expect("test_install_sandbox"); 885 | assert_eq!(exit_status, 254); 886 | } 887 | 888 | #[test] 889 | fn it_allows_modifying_dot_git_index_lock() { 890 | fn exec_hook() -> ! { 891 | std::fs::write("/home/runner/workspace/.git/index.lock", "yo") 892 | .expect("write .git/index.lock"); 893 | unsafe { libc::_exit(0) }; 894 | } 895 | 896 | let tmp_dir = 897 | TempDir::with_prefix("pid2sandbox-").expect("Failed to create temporary directory"); 898 | // Cargo captures the error message, but we only care about the exit code. 899 | let (exit_status, _, _) = 900 | test_install_sandbox(exec_hook, tmp_dir.path()).expect("test_install_sandbox"); 901 | assert_eq!(exit_status, 0); 902 | } 903 | } 904 | --------------------------------------------------------------------------------