├── .gitignore ├── rust-toolchain.toml ├── src ├── main.rs ├── prompt_for_init_command.md ├── lib.rs ├── local_spawner.rs ├── prompt_args.rs ├── codex_agent.rs └── conversation.rs ├── npm ├── template │ └── package.json ├── package.json ├── publish │ ├── update-base-package.sh │ └── create-platform-packages.sh ├── README.md ├── bin │ └── codex-acp.js └── testing │ ├── validate.sh │ └── test-platform-detection.js ├── .github └── workflows │ ├── quick-check.yml │ ├── ci.yml │ └── release.yml ├── README.md ├── Cargo.toml ├── script ├── sign-mac └── sign-windows.ps1 └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | node_modules 3 | npm-packages 4 | .DS_Store 5 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "stable" 3 | components = ["clippy", "rustfmt", "rust-src"] 4 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | use codex_arg0::arg0_dispatch_or_else; 4 | use codex_common::CliConfigOverrides; 5 | 6 | fn main() -> Result<()> { 7 | arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move { 8 | let cli_config_overrides = CliConfigOverrides::parse(); 9 | codex_acp::run_main(codex_linux_sandbox_exe, cli_config_overrides).await?; 10 | Ok(()) 11 | }) 12 | } 13 | -------------------------------------------------------------------------------- /npm/template/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@zed-industries/${PACKAGE_NAME}", 3 | "version": "${VERSION}", 4 | "description": "An ACP-compatible coding agent powered by Codex - ${OS} ${ARCH} binary", 5 | "license": "Apache-2.0", 6 | "author": "Zed ", 7 | "homepage": "https://github.com/zed-industries/codex-acp", 8 | "repository": { 9 | "type": "git", 10 | "url": "git+https://github.com/zed-industries/codex-acp.git" 11 | }, 12 | "bin": { 13 | "${PACKAGE_NAME}": "bin/codex-acp" 14 | }, 15 | "os": [ 16 | "${OS}" 17 | ], 18 | "cpu": [ 19 | "${ARCH}" 20 | ], 21 | "files": [ 22 | "bin" 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /npm/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@zed-industries/codex-acp", 3 | "version": "0.7.3", 4 | "type": "module", 5 | "description": "An ACP-compatible coding agent powered by Codex", 6 | "license": "Apache-2.0", 7 | "author": "Zed ", 8 | "homepage": "https://github.com/zed-industries/codex-acp", 9 | "repository": { 10 | "type": "git", 11 | "url": "git+https://github.com/zed-industries/codex-acp.git" 12 | }, 13 | "bugs": { 14 | "url": "https://github.com/zed-industries/codex-acp/issues" 15 | }, 16 | "keywords": [ 17 | "codex", 18 | "acp", 19 | "agent", 20 | "coding", 21 | "ai", 22 | "assistant" 23 | ], 24 | "bin": { 25 | "codex-acp": "bin/codex-acp.js" 26 | }, 27 | "files": [ 28 | "bin" 29 | ], 30 | "optionalDependencies": { 31 | "@zed-industries/codex-acp-darwin-arm64": "0.7.3", 32 | "@zed-industries/codex-acp-darwin-x64": "0.7.3", 33 | "@zed-industries/codex-acp-linux-arm64": "0.7.3", 34 | "@zed-industries/codex-acp-linux-x64": "0.7.3", 35 | "@zed-industries/codex-acp-win32-arm64": "0.7.3", 36 | "@zed-industries/codex-acp-win32-x64": "0.7.3" 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /.github/workflows/quick-check.yml: -------------------------------------------------------------------------------- 1 | name: Quick Check 2 | 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | branches: [main, master] 8 | workflow_dispatch: 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | 13 | jobs: 14 | quick-check: 15 | name: Format and Clippy Check 16 | runs-on: ubuntu-latest 17 | 18 | steps: 19 | - name: Checkout codex-acp 20 | uses: actions/checkout@v4 21 | 22 | - name: Install Rust 23 | uses: dtolnay/rust-toolchain@stable 24 | with: 25 | components: rustfmt, clippy 26 | 27 | - name: Cache cargo registry 28 | uses: actions/cache@v4 29 | with: 30 | path: ~/.cargo/registry 31 | key: ${{ runner.os }}-quick-cargo-registry-${{ hashFiles('**/Cargo.lock') }} 32 | restore-keys: | 33 | ${{ runner.os }}-quick-cargo-registry- 34 | 35 | - name: Cache cargo index 36 | uses: actions/cache@v4 37 | with: 38 | path: ~/.cargo/git 39 | key: ${{ runner.os }}-quick-cargo-git-${{ hashFiles('**/Cargo.lock') }} 40 | restore-keys: | 41 | ${{ runner.os }}-quick-cargo-git- 42 | 43 | - name: Check formatting 44 | run: cargo fmt --all -- --check 45 | 46 | - name: Run clippy on all targets 47 | run: cargo clippy --all-targets --all-features -- -D warnings 48 | 49 | - name: Run clippy on tests 50 | run: cargo clippy --tests --all-features -- -D warnings 51 | -------------------------------------------------------------------------------- /npm/publish/update-base-package.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # Used in CI, extract here for readability 5 | 6 | # Script to update version in base package.json 7 | # Usage: update-base-package.sh 8 | 9 | VERSION="${1:?Missing version}" 10 | 11 | echo "Updating base package.json to version $VERSION..." 12 | 13 | # Find the package.json relative to this script 14 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 15 | PACKAGE_JSON="$SCRIPT_DIR/../package.json" 16 | 17 | if [[ ! -f "$PACKAGE_JSON" ]]; then 18 | echo "❌ Error: package.json not found at $PACKAGE_JSON" 19 | exit 1 20 | fi 21 | 22 | # Update version in base package.json 23 | sed -i.bak "s/\"version\": \".*\"/\"version\": \"$VERSION\"/" "$PACKAGE_JSON" 24 | 25 | # Update optionalDependencies versions 26 | sed -i.bak "s/\"codex-acp-darwin-arm64\": \".*\"/\"codex-acp-darwin-arm64\": \"$VERSION\"/" "$PACKAGE_JSON" 27 | sed -i.bak "s/\"codex-acp-darwin-x64\": \".*\"/\"codex-acp-darwin-x64\": \"$VERSION\"/" "$PACKAGE_JSON" 28 | sed -i.bak "s/\"codex-acp-linux-arm64\": \".*\"/\"codex-acp-linux-arm64\": \"$VERSION\"/" "$PACKAGE_JSON" 29 | sed -i.bak "s/\"codex-acp-linux-x64\": \".*\"/\"codex-acp-linux-x64\": \"$VERSION\"/" "$PACKAGE_JSON" 30 | sed -i.bak "s/\"codex-acp-win32-arm64\": \".*\"/\"codex-acp-win32-arm64\": \"$VERSION\"/" "$PACKAGE_JSON" 31 | sed -i.bak "s/\"codex-acp-win32-x64\": \".*\"/\"codex-acp-win32-x64\": \"$VERSION\"/" "$PACKAGE_JSON" 32 | 33 | # Remove backup file 34 | rm -f "$PACKAGE_JSON.bak" 35 | 36 | echo "✅ Updated package.json:" 37 | cat "$PACKAGE_JSON" 38 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ACP adapter for Codex 2 | 3 | Use [Codex](https://github.com/openai/codex) from [ACP-compatible](https://agentclientprotocol.com) clients such as [Zed](https://zed.dev)! 4 | 5 | This tool implements an ACP adapter around the Codex CLI, supporting: 6 | 7 | - Context @-mentions 8 | - Images 9 | - Tool calls (with permission requests) 10 | - Following 11 | - Edit review 12 | - TODO lists 13 | - Slash commands: 14 | - /review (with optional instructions) 15 | - /review-branch 16 | - /review-commit 17 | - /init 18 | - /compact 19 | - /logout 20 | - Custom Prompts 21 | - Client MCP servers 22 | - Auth Methods: 23 | - ChatGPT subscription (requires paid subscription and doesn't work in remote projects) 24 | - CODEX_API_KEY 25 | - OPENAI_API_KEY 26 | 27 | Learn more about the [Agent Client Protocol](https://agentclientprotocol.com/). 28 | 29 | ## How to use 30 | 31 | ### Zed 32 | 33 | The latest version of Zed can already use this adapter out of the box. 34 | 35 | To use Codex, open the Agent Panel and click "New Codex Thread" from the `+` button menu in the top-right. 36 | 37 | Read the docs on [External Agent](https://zed.dev/docs/ai/external-agents) support. 38 | 39 | ### Other clients 40 | 41 | [Submit a PR](https://github.com/zed-industries/codex-acp/pulls) to add yours! 42 | 43 | #### Installation 44 | 45 | Install the adapter from the latest release for your architecture and OS: https://github.com/zed-industries/codex-acp/releases 46 | 47 | You can then use `codex-acp` as a regular ACP agent: 48 | 49 | ``` 50 | OPENAI_API_KEY=sk-... codex-acp 51 | ``` 52 | 53 | Or via npm: 54 | 55 | ``` 56 | npx @zed-industries/codex-acp 57 | ``` 58 | 59 | ## License 60 | 61 | Apache-2.0 62 | -------------------------------------------------------------------------------- /npm/README.md: -------------------------------------------------------------------------------- 1 | # ACP adapter for Codex 2 | 3 | Use [Codex](https://github.com/openai/codex) from [ACP-compatible](https://agentclientprotocol.com) clients such as [Zed](https://zed.dev)! 4 | 5 | This tool implements an ACP adapter around the Codex CLI, supporting: 6 | 7 | - Context @-mentions 8 | - Images 9 | - Tool calls (with permission requests) 10 | - Following 11 | - Edit review 12 | - TODO lists 13 | - Slash commands: 14 | - /review (with optional instructions) 15 | - /review-branch 16 | - /review-commit 17 | - /init 18 | - /compact 19 | - /logout 20 | - Custom Prompts 21 | - Client MCP servers 22 | - Auth Methods: 23 | - ChatGPT subscription (requires paid subscription and doesn't work in remote projects) 24 | - CODEX_API_KEY 25 | - OPENAI_API_KEY 26 | 27 | Learn more about the [Agent Client Protocol](https://agentclientprotocol.com/). 28 | 29 | ## How to use 30 | 31 | ### Zed 32 | 33 | The latest version of Zed can already use this adapter out of the box. 34 | 35 | To use Codex, open the Agent Panel and click "New Codex Thread" from the `+` button menu in the top-right. 36 | 37 | Read the docs on [External Agent](https://zed.dev/docs/ai/external-agents) support. 38 | 39 | ### Other clients 40 | 41 | [Submit a PR](https://github.com/zed-industries/codex-acp/pulls) to add yours! 42 | 43 | #### Installation 44 | 45 | Install the adapter from the latest release for your architecture and OS: https://github.com/zed-industries/codex-acp/releases 46 | 47 | You can then use `codex-acp` as a regular ACP agent: 48 | 49 | ``` 50 | OPENAI_API_KEY=sk-... codex-acp 51 | ``` 52 | 53 | Or via npm: 54 | 55 | ``` 56 | npx @zed-industries/codex-acp 57 | ``` 58 | 59 | ## License 60 | 61 | Apache-2.0 62 | -------------------------------------------------------------------------------- /src/prompt_for_init_command.md: -------------------------------------------------------------------------------- 1 | Generate a file named AGENTS.md that serves as a contributor guide for this repository. 2 | Your goal is to produce a clear, concise, and well-structured document with descriptive headings and actionable explanations for each section. 3 | Follow the outline below, but adapt as needed — add sections if relevant, and omit those that do not apply to this project. 4 | 5 | Document Requirements 6 | 7 | - Title the document "Repository Guidelines". 8 | - Use Markdown headings (#, ##, etc.) for structure. 9 | - Keep the document concise. 200-400 words is optimal. 10 | - Keep explanations short, direct, and specific to this repository. 11 | - Provide examples where helpful (commands, directory paths, naming patterns). 12 | - Maintain a professional, instructional tone. 13 | 14 | Recommended Sections 15 | 16 | Project Structure & Module Organization 17 | 18 | - Outline the project structure, including where the source code, tests, and assets are located. 19 | 20 | Build, Test, and Development Commands 21 | 22 | - List key commands for building, testing, and running locally (e.g., npm test, make build). 23 | - Briefly explain what each command does. 24 | 25 | Coding Style & Naming Conventions 26 | 27 | - Specify indentation rules, language-specific style preferences, and naming patterns. 28 | - Include any formatting or linting tools used. 29 | 30 | Testing Guidelines 31 | 32 | - Identify testing frameworks and coverage requirements. 33 | - State test naming conventions and how to run tests. 34 | 35 | Commit & Pull Request Guidelines 36 | 37 | - Summarize commit message conventions found in the project’s Git history. 38 | - Outline pull request requirements (descriptions, linked issues, screenshots, etc.). 39 | 40 | (Optional) Add other sections if relevant, such as Security & Configuration Tips, Architecture Overview, or Agent-Specific Instructions. 41 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "codex-acp" 3 | version = "0.7.3" 4 | edition = "2024" 5 | authors = ["Zed "] 6 | license = "Apache-2.0" 7 | description = "An ACP-compatible coding agent powered by Codex" 8 | repository = "https://github.com/zed-industries/codex-acp" 9 | homepage = "https://github.com/zed-industries/codex-acp" 10 | 11 | [[bin]] 12 | name = "codex-acp" 13 | path = "src/main.rs" 14 | 15 | [lib] 16 | name = "codex_acp" 17 | path = "src/lib.rs" 18 | 19 | [dependencies] 20 | agent-client-protocol = { version = "=0.9.0", features = ["unstable"] } 21 | anyhow = "1" 22 | async-trait = "0.1" 23 | clap = "4" 24 | codex-apply-patch = { git = "https://github.com/zed-industries/codex", branch = "acp" } 25 | codex-arg0 = { git = "https://github.com/zed-industries/codex", branch = "acp" } 26 | codex-common = { git = "https://github.com/zed-industries/codex", branch = "acp", features = ["cli"] } 27 | codex-core = { git = "https://github.com/zed-industries/codex", branch = "acp" } 28 | codex-mcp-server = { git = "https://github.com/zed-industries/codex", branch = "acp" } 29 | codex-protocol = { git = "https://github.com/zed-industries/codex", branch = "acp" } 30 | codex-login = { git = "https://github.com/zed-industries/codex", branch = "acp" } 31 | itertools = "0.14.0" 32 | mcp-types = { git = "https://github.com/zed-industries/codex", branch = "acp" } 33 | regex-lite = "0.1" 34 | serde = { version = "1", features = ["derive"] } 35 | serde_json = "1" 36 | shlex = "1" 37 | tokio = { version = "1", features = ["rt-multi-thread", "macros", "io-std", "io-util"] } 38 | tokio-util = { version = "0.7", features = ["compat"] } 39 | tracing = { version = "0.1", features = ["log"] } 40 | tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] } 41 | 42 | [dev-dependencies] 43 | codex-core = { git = "https://github.com/zed-industries/codex", branch = "acp", features = ["test-support"] } 44 | 45 | [lints.rust] 46 | let-underscore = "warn" 47 | rust-2018-idioms = "warn" 48 | unused = "warn" 49 | -------------------------------------------------------------------------------- /npm/bin/codex-acp.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { spawnSync } from "node:child_process"; 4 | import { existsSync } from "node:fs"; 5 | import { fileURLToPath } from "node:url"; 6 | 7 | // Map Node.js platform/arch to package names 8 | function getPlatformPackage() { 9 | const platform = process.platform; 10 | const arch = process.arch; 11 | 12 | const platformMap = { 13 | darwin: { 14 | arm64: "codex-acp-darwin-arm64", 15 | x64: "codex-acp-darwin-x64", 16 | }, 17 | linux: { 18 | arm64: "codex-acp-linux-arm64", 19 | x64: "codex-acp-linux-x64", 20 | }, 21 | win32: { 22 | arm64: "codex-acp-win32-arm64", 23 | x64: "codex-acp-win32-x64", 24 | }, 25 | }; 26 | 27 | const packages = platformMap[platform]; 28 | if (!packages) { 29 | console.error(`Unsupported platform: ${platform}`); 30 | process.exit(1); 31 | } 32 | 33 | const packageName = packages[arch]; 34 | if (!packageName) { 35 | console.error(`Unsupported architecture: ${arch} on ${platform}`); 36 | process.exit(1); 37 | } 38 | 39 | return `@zed-industries/${packageName}`; 40 | } 41 | 42 | // Locate the binary 43 | function getBinaryPath() { 44 | const packageName = getPlatformPackage(); 45 | const binaryName = 46 | process.platform === "win32" ? "codex-acp.exe" : "codex-acp"; 47 | 48 | try { 49 | // Try to resolve the platform-specific package 50 | const binaryPath = fileURLToPath( 51 | import.meta.resolve(`${packageName}/bin/${binaryName}`), 52 | ); 53 | 54 | if (existsSync(binaryPath)) { 55 | return binaryPath; 56 | } 57 | } catch (e) { 58 | console.error(`Error resolving package: ${e}`); 59 | // Package not found 60 | } 61 | 62 | console.error( 63 | `Failed to locate ${packageName} binary. This usually means the optional dependency was not installed.`, 64 | ); 65 | console.error(`Platform: ${process.platform}, Architecture: ${process.arch}`); 66 | process.exit(1); 67 | } 68 | 69 | // Execute the binary 70 | function run() { 71 | const binaryPath = getBinaryPath(); 72 | const result = spawnSync(binaryPath, process.argv.slice(2), { 73 | stdio: "inherit", 74 | windowsHide: true, 75 | }); 76 | 77 | if (result.error) { 78 | console.error(`Failed to execute ${binaryPath}:`, result.error); 79 | process.exit(1); 80 | } 81 | 82 | process.exit(result.status || 0); 83 | } 84 | 85 | run(); 86 | -------------------------------------------------------------------------------- /script/sign-mac: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | if [[ $# -lt 1 ]]; then 6 | echo "Usage: $0 " 7 | exit 1 8 | fi 9 | 10 | binary_path="$1" 11 | 12 | if [[ ! -f "$binary_path" ]]; then 13 | echo "Error: Binary not found at $binary_path" 14 | exit 1 15 | fi 16 | 17 | IDENTITY="Zed Industries, Inc." 18 | 19 | echo "Code signing binary at $binary_path" 20 | 21 | if [[ -z "${MACOS_CERTIFICATE:-}" || -z "${MACOS_CERTIFICATE_PASSWORD:-}" || -z "${APPLE_NOTARIZATION_KEY:-}" || -z "${APPLE_NOTARIZATION_KEY_ID:-}" || -z "${APPLE_NOTARIZATION_ISSUER_ID:-}" ]]; then 22 | echo "Error: One or more required variables are missing:" 23 | echo " - MACOS_CERTIFICATE" 24 | echo " - MACOS_CERTIFICATE_PASSWORD" 25 | echo " - APPLE_NOTARIZATION_KEY" 26 | echo " - APPLE_NOTARIZATION_KEY_ID" 27 | echo " - APPLE_NOTARIZATION_ISSUER_ID" 28 | exit 1 29 | fi 30 | 31 | echo "Setting up keychain for code signing..." 32 | security create-keychain -p "$MACOS_CERTIFICATE_PASSWORD" codex-acp.keychain || echo "" 33 | security default-keychain -s codex-acp.keychain 34 | security unlock-keychain -p "$MACOS_CERTIFICATE_PASSWORD" codex-acp.keychain 35 | security set-keychain-settings codex-acp.keychain 36 | echo "$MACOS_CERTIFICATE" | base64 --decode > /tmp/codex-acp-certificate.p12 37 | security import /tmp/codex-acp-certificate.p12 -k codex-acp.keychain -P "$MACOS_CERTIFICATE_PASSWORD" -T /usr/bin/codesign 38 | rm /tmp/codex-acp-certificate.p12 39 | security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k "$MACOS_CERTIFICATE_PASSWORD" codex-acp.keychain 40 | 41 | function cleanup() { 42 | echo "Cleaning up keychain" 43 | security default-keychain -s login.keychain 44 | security delete-keychain codex-acp.keychain 45 | } 46 | 47 | trap cleanup EXIT 48 | 49 | echo "Signing binary ${binary_path}" 50 | /usr/bin/codesign --deep --force --timestamp --options runtime --sign "$IDENTITY" "${binary_path}" -v 51 | echo "✓ Successfully signed ${binary_path}" 52 | 53 | echo "Notarizing binary with Apple" 54 | 55 | # Create temporary zip file for notarization 56 | temp_zip=$(mktemp).zip 57 | zip -q "$temp_zip" "${binary_path}" 58 | 59 | # Create temporary file for the API key 60 | notarization_key_file=$(mktemp) 61 | echo "$APPLE_NOTARIZATION_KEY" > "$notarization_key_file" 62 | 63 | # Submit for notarization and wait for completion 64 | xcode_bin_dir_path="$(xcode-select -p)/usr/bin" 65 | "${xcode_bin_dir_path}/notarytool" submit --wait \ 66 | --key "$notarization_key_file" \ 67 | --key-id "$APPLE_NOTARIZATION_KEY_ID" \ 68 | --issuer "$APPLE_NOTARIZATION_ISSUER_ID" \ 69 | "$temp_zip" 70 | 71 | rm "$notarization_key_file" 72 | rm "$temp_zip" 73 | 74 | echo "✓ Successfully notarized ${binary_path}" 75 | -------------------------------------------------------------------------------- /npm/publish/create-platform-packages.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # Used in CI, extract here for readability 5 | 6 | # Script to create platform-specific npm packages from release artifacts 7 | # Usage: create-platform-packages.sh 8 | 9 | ARTIFACTS_DIR="${1:?Missing artifacts directory}" 10 | OUTPUT_DIR="${2:?Missing output directory}" 11 | VERSION="${3:?Missing version}" 12 | 13 | echo "Creating platform-specific npm packages..." 14 | echo "Artifacts: $ARTIFACTS_DIR" 15 | echo "Output: $OUTPUT_DIR" 16 | echo "Version: $VERSION" 17 | echo 18 | 19 | mkdir -p "$OUTPUT_DIR" 20 | 21 | # Define platform mappings: target -> (npm-os, npm-arch, binary-extension) 22 | # Note: We only package gnu variants for Linux 23 | declare -A platforms=( 24 | ["aarch64-apple-darwin"]="darwin arm64 " 25 | ["x86_64-apple-darwin"]="darwin x64 " 26 | ["x86_64-unknown-linux-gnu"]="linux x64 " 27 | ["aarch64-unknown-linux-gnu"]="linux arm64 " 28 | ["x86_64-pc-windows-msvc"]="win32 x64 .exe" 29 | ["aarch64-pc-windows-msvc"]="win32 arm64 .exe" 30 | ) 31 | 32 | for target in "${!platforms[@]}"; do 33 | read os arch ext <<< "${platforms[$target]}" 34 | 35 | # Determine archive extension 36 | if [[ "$os" == "win32" ]]; then 37 | archive_ext="zip" 38 | else 39 | archive_ext="tar.gz" 40 | fi 41 | 42 | # Find and extract the archive 43 | archive_path=$(find "$ARTIFACTS_DIR" -name "*-${target}.${archive_ext}" | head -n 1) 44 | 45 | if [[ -z "$archive_path" ]]; then 46 | echo "⚠️ Warning: No archive found for target $target" 47 | continue 48 | fi 49 | 50 | echo "📦 Processing $target from $(basename "$archive_path")" 51 | 52 | # Create package name 53 | pkg_name="codex-acp-${os}-${arch}" 54 | pkg_dir="$OUTPUT_DIR/${pkg_name}" 55 | mkdir -p "${pkg_dir}/bin" 56 | 57 | # Extract binary 58 | if [[ "$archive_ext" == "zip" ]]; then 59 | unzip -q -j "$archive_path" "codex-acp${ext}" -d "${pkg_dir}/bin/" 60 | else 61 | tar xzf "$archive_path" -C "${pkg_dir}/bin/" "codex-acp${ext}" 62 | fi 63 | 64 | # Make binary executable (important for Unix-like systems) 65 | chmod +x "${pkg_dir}/bin/codex-acp${ext}" 2>/dev/null || echo "Failed to make binary executable" 66 | 67 | # Create package.json from template 68 | export PACKAGE_NAME="$pkg_name" 69 | export VERSION="$VERSION" 70 | export OS="$os" 71 | export ARCH="$arch" 72 | 73 | # Find the template relative to this script 74 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 75 | TEMPLATE_PATH="$SCRIPT_DIR/../template/package.json" 76 | 77 | envsubst < "$TEMPLATE_PATH" > "${pkg_dir}/package.json" 78 | 79 | # Update bin field for Windows to include .exe extension 80 | if [[ "$os" == "win32" ]]; then 81 | # Use sed to update the bin path in package.json 82 | sed -i.bak 's|"bin/codex-acp"|"bin/codex-acp.exe"|' "${pkg_dir}/package.json" 83 | rm "${pkg_dir}/package.json.bak" 84 | fi 85 | 86 | echo " ✓ Created package: ${pkg_name}" 87 | done 88 | 89 | echo 90 | echo "✅ Platform packages created in: $OUTPUT_DIR" 91 | ls -1 "$OUTPUT_DIR" 92 | -------------------------------------------------------------------------------- /npm/testing/validate.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # Used in CI, extract here for readability 5 | 6 | RED='\033[0;31m' 7 | GREEN='\033[0;32m' 8 | YELLOW='\033[1;33m' 9 | NC='\033[0m' # No Color 10 | 11 | echo "NPM Package Setup Validation" 12 | echo "=============================" 13 | echo 14 | 15 | check_command() { 16 | if ! command -v "$1" &> /dev/null; then 17 | echo -e "${RED}✗ Required command not found: $1${NC}" 18 | exit 1 19 | fi 20 | } 21 | 22 | check_command node 23 | check_command grep 24 | 25 | # 1. Validate wrapper script syntax 26 | echo "1. Validating wrapper script syntax..." 27 | if node -c npm/bin/codex-acp.js 2>/dev/null; then 28 | echo -e "${GREEN}✓ Wrapper script syntax is valid${NC}" 29 | else 30 | echo -e "${RED}✗ Wrapper script has syntax errors${NC}" 31 | exit 1 32 | fi 33 | echo 34 | 35 | # 2. Validate package.json files 36 | echo "2. Validating package.json files..." 37 | if node -e "JSON.parse(require('fs').readFileSync('npm/package.json', 'utf8'))" 2>/dev/null; then 38 | echo -e "${GREEN}✓ Base package.json is valid${NC}" 39 | else 40 | echo -e "${RED}✗ Base package.json is invalid${NC}" 41 | exit 1 42 | fi 43 | 44 | # 3. Check template has required placeholders 45 | echo "3. Validating template placeholders..." 46 | missing_placeholders=0 47 | for placeholder in PACKAGE_NAME VERSION OS ARCH; do 48 | if ! grep -q "\${${placeholder}}" npm/template/package.json; then 49 | echo -e "${RED}✗ Template missing ${placeholder} placeholder${NC}" 50 | missing_placeholders=1 51 | fi 52 | done 53 | 54 | if [ $missing_placeholders -eq 0 ]; then 55 | echo -e "${GREEN}✓ Template has all required placeholders${NC}" 56 | else 57 | exit 1 58 | fi 59 | echo 60 | 61 | # 4. Check version consistency 62 | echo "4. Checking version consistency..." 63 | CARGO_VERSION=$(grep -m1 "^version" Cargo.toml | sed 's/.*"\(.*\)".*/\1/') 64 | NPM_VERSION=$(node -e "console.log(require('./npm/package.json').version)") 65 | 66 | echo " Cargo.toml version: $CARGO_VERSION" 67 | echo " npm package.json version: $NPM_VERSION" 68 | 69 | if [ "$CARGO_VERSION" != "$NPM_VERSION" ]; then 70 | echo -e "${RED}✗ Version mismatch${NC}" 71 | exit 1 72 | fi 73 | echo -e "${GREEN}✓ Versions are in sync${NC}" 74 | echo 75 | 76 | # 5. Verify optional dependencies list 77 | echo "5. Verifying platform packages..." 78 | EXPECTED_PACKAGES=( 79 | "@zed-industries/codex-acp-darwin-arm64" 80 | "@zed-industries/codex-acp-darwin-x64" 81 | "@zed-industries/codex-acp-linux-arm64" 82 | "@zed-industries/codex-acp-linux-x64" 83 | "@zed-industries/codex-acp-win32-arm64" 84 | "@zed-industries/codex-acp-win32-x64" 85 | ) 86 | 87 | missing_packages=0 88 | for pkg in "${EXPECTED_PACKAGES[@]}"; do 89 | if ! grep -q "\"$pkg\":" npm/package.json; then 90 | echo -e "${RED}✗ Missing package: $pkg${NC}" 91 | missing_packages=1 92 | fi 93 | done 94 | 95 | if [ $missing_packages -eq 0 ]; then 96 | echo -e "${GREEN}✓ All platform packages listed in optionalDependencies${NC}" 97 | else 98 | exit 1 99 | fi 100 | echo 101 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Codex ACP - An Agent Client Protocol implementation for Codex. 2 | #![deny(clippy::print_stdout, clippy::print_stderr)] 3 | 4 | use agent_client_protocol::AgentSideConnection; 5 | use codex_common::CliConfigOverrides; 6 | use codex_core::config::{Config, ConfigOverrides}; 7 | use std::path::PathBuf; 8 | use std::sync::{Arc, OnceLock}; 9 | use std::{io::Result as IoResult, rc::Rc}; 10 | use tokio::task::LocalSet; 11 | use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt}; 12 | use tracing_subscriber::EnvFilter; 13 | 14 | mod codex_agent; 15 | mod conversation; 16 | mod local_spawner; 17 | mod prompt_args; 18 | 19 | pub static ACP_CLIENT: OnceLock> = OnceLock::new(); 20 | 21 | /// Run the Codex ACP agent. 22 | /// 23 | /// This sets up an ACP agent that communicates over stdio, bridging 24 | /// the ACP protocol with the existing codex-rs infrastructure. 25 | /// 26 | /// # Errors 27 | /// 28 | /// If unable to parse the config or start the program. 29 | pub async fn run_main( 30 | codex_linux_sandbox_exe: Option, 31 | cli_config_overrides: CliConfigOverrides, 32 | ) -> IoResult<()> { 33 | // Install a simple subscriber so `tracing` output is visible. 34 | // Users can control the log level with `RUST_LOG`. 35 | tracing_subscriber::fmt() 36 | .with_writer(std::io::stderr) 37 | .with_env_filter(EnvFilter::from_default_env()) 38 | .init(); 39 | 40 | // Parse CLI overrides and load configuration 41 | let cli_kv_overrides = cli_config_overrides.parse_overrides().map_err(|e| { 42 | std::io::Error::new( 43 | std::io::ErrorKind::InvalidInput, 44 | format!("error parsing -c overrides: {e}"), 45 | ) 46 | })?; 47 | 48 | let config_overrides = ConfigOverrides { 49 | codex_linux_sandbox_exe, 50 | ..ConfigOverrides::default() 51 | }; 52 | 53 | let config = Config::load_with_cli_overrides(cli_kv_overrides, config_overrides) 54 | .await 55 | .map_err(|e| { 56 | std::io::Error::new( 57 | std::io::ErrorKind::InvalidData, 58 | format!("error loading config: {e}"), 59 | ) 60 | })?; 61 | 62 | // Create our Agent implementation with notification channel 63 | let agent = Rc::new(codex_agent::CodexAgent::new(config)); 64 | 65 | let stdin = tokio::io::stdin().compat(); 66 | let stdout = tokio::io::stdout().compat_write(); 67 | 68 | // Run the I/O task to handle the actual communication 69 | LocalSet::new() 70 | .run_until(async move { 71 | // Create the ACP connection 72 | let (client, io_task) = AgentSideConnection::new(agent.clone(), stdout, stdin, |fut| { 73 | tokio::task::spawn_local(fut); 74 | }); 75 | 76 | if ACP_CLIENT.set(Arc::new(client)).is_err() { 77 | return Err(std::io::Error::other("ACP client already set")); 78 | } 79 | 80 | io_task 81 | .await 82 | .map_err(|e| std::io::Error::other(format!("ACP I/O error: {e}"))) 83 | }) 84 | .await?; 85 | 86 | Ok(()) 87 | } 88 | 89 | // Re-export the MCP server types for compatibility 90 | pub use codex_mcp_server::{ 91 | CodexToolCallParam, CodexToolCallReplyParam, ExecApprovalElicitRequestParams, 92 | ExecApprovalResponse, PatchApprovalElicitRequestParams, PatchApprovalResponse, 93 | }; 94 | -------------------------------------------------------------------------------- /npm/testing/test-platform-detection.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Test the platform detection logic from the wrapper script 5 | */ 6 | 7 | function getPlatformPackage() { 8 | const platform = process.platform; 9 | const arch = process.arch; 10 | 11 | const platformMap = { 12 | darwin: { 13 | arm64: "codex-acp-darwin-arm64", 14 | x64: "codex-acp-darwin-x64", 15 | }, 16 | linux: { 17 | arm64: "codex-acp-linux-arm64", 18 | x64: "codex-acp-linux-x64", 19 | }, 20 | win32: { 21 | arm64: "codex-acp-win32-arm64", 22 | x64: "codex-acp-win32-x64", 23 | }, 24 | }; 25 | 26 | const packages = platformMap[platform]; 27 | if (!packages) { 28 | console.error(`Unsupported platform: ${platform}`); 29 | process.exit(1); 30 | } 31 | 32 | const packageName = packages[arch]; 33 | if (!packageName) { 34 | console.error(`Unsupported architecture: ${arch} on ${platform}`); 35 | process.exit(1); 36 | } 37 | 38 | return packageName; 39 | } 40 | 41 | // Test all known platform/arch combinations 42 | function testAllPlatforms() { 43 | const testCases = [ 44 | { platform: "darwin", arch: "arm64", expected: "codex-acp-darwin-arm64" }, 45 | { platform: "darwin", arch: "x64", expected: "codex-acp-darwin-x64" }, 46 | { platform: "linux", arch: "arm64", expected: "codex-acp-linux-arm64" }, 47 | { platform: "linux", arch: "x64", expected: "codex-acp-linux-x64" }, 48 | { platform: "win32", arch: "arm64", expected: "codex-acp-win32-arm64" }, 49 | { platform: "win32", arch: "x64", expected: "codex-acp-win32-x64" }, 50 | ]; 51 | 52 | console.log("Testing platform detection logic...\n"); 53 | 54 | let allPassed = true; 55 | 56 | for (const testCase of testCases) { 57 | // Mock the platform and arch 58 | const originalPlatform = process.platform; 59 | const originalArch = process.arch; 60 | 61 | Object.defineProperty(process, "platform", { 62 | value: testCase.platform, 63 | configurable: true, 64 | }); 65 | Object.defineProperty(process, "arch", { 66 | value: testCase.arch, 67 | configurable: true, 68 | }); 69 | 70 | try { 71 | const result = getPlatformPackage(); 72 | if (result === testCase.expected) { 73 | console.log(`✓ ${testCase.platform}-${testCase.arch} -> ${result}`); 74 | } else { 75 | console.error( 76 | `✗ ${testCase.platform}-${testCase.arch} -> Expected: ${testCase.expected}, Got: ${result}`, 77 | ); 78 | allPassed = false; 79 | } 80 | } catch (e) { 81 | console.error( 82 | `✗ ${testCase.platform}-${testCase.arch} -> Error: ${e.message}`, 83 | ); 84 | allPassed = false; 85 | } finally { 86 | // Restore original values 87 | Object.defineProperty(process, "platform", { 88 | value: originalPlatform, 89 | configurable: true, 90 | }); 91 | Object.defineProperty(process, "arch", { 92 | value: originalArch, 93 | configurable: true, 94 | }); 95 | } 96 | } 97 | 98 | console.log(); 99 | if (allPassed) { 100 | console.log("✓ All platform detection tests passed!"); 101 | return 0; 102 | } else { 103 | console.error("✗ Some platform detection tests failed"); 104 | return 1; 105 | } 106 | } 107 | 108 | // Run tests 109 | const exitCode = testAllPlatforms(); 110 | 111 | // Show current platform info 112 | console.log("\nCurrent platform:"); 113 | console.log(` Platform: ${process.platform}`); 114 | console.log(` Arch: ${process.arch}`); 115 | console.log(` Package: ${getPlatformPackage()}`); 116 | 117 | process.exit(exitCode); 118 | -------------------------------------------------------------------------------- /script/sign-windows.ps1: -------------------------------------------------------------------------------- 1 | [CmdletBinding()] 2 | param ( 3 | [Parameter(Mandatory = $true, Position = 0)] 4 | [string]$BinaryPath 5 | ) 6 | 7 | $ErrorActionPreference = 'Stop' 8 | 9 | Write-Host "Windows code signing script for codex-acp" 10 | Write-Host "Binary path: $BinaryPath" 11 | 12 | if (-not (Test-Path $BinaryPath)) { 13 | Write-Error "Error: Binary not found at $BinaryPath" 14 | exit 1 15 | } 16 | 17 | $BinaryPath = Convert-Path $BinaryPath 18 | Write-Host "Binary path (absolute): $BinaryPath" 19 | 20 | # Verify the binary exists 21 | if (-not (Test-Path $BinaryPath)) { 22 | Write-Error "Error: Binary not found at $BinaryPath" 23 | exit 1 24 | } 25 | 26 | $canCodeSign = $false 27 | 28 | # Check if all required environment variables are present 29 | $requiredVars = @( 30 | 'AZURE_TENANT_ID', 31 | 'AZURE_CLIENT_ID', 32 | 'AZURE_CLIENT_SECRET', 33 | 'ACCOUNT_NAME', 34 | 'CERT_PROFILE_NAME', 35 | 'ENDPOINT', 36 | 'FILE_DIGEST', 37 | 'TIMESTAMP_DIGEST', 38 | 'TIMESTAMP_SERVER' 39 | ) 40 | 41 | $missingVars = @() 42 | foreach ($var in $requiredVars) { 43 | if (-not (Test-Path "env:$var") -or [string]::IsNullOrWhiteSpace((Get-Item "env:$var").Value)) { 44 | $missingVars += $var 45 | } 46 | } 47 | 48 | if ($missingVars.Count -eq 0) { 49 | $canCodeSign = $true 50 | Write-Host "All required environment variables found." 51 | } else { 52 | Write-Host "Missing environment variables: $($missingVars -join ', ')" 53 | } 54 | 55 | if ($canCodeSign) { 56 | Write-Host "Signing binary with Azure Trusted Signing..." 57 | 58 | try { 59 | # Check if Az.CodeSigning module is available 60 | if (-not (Get-Module -ListAvailable -Name Az.CodeSigning)) { 61 | Write-Host "Installing Az.CodeSigning module..." 62 | Install-Module -Name Az.CodeSigning -Repository PSGallery -Force -Scope CurrentUser 63 | } 64 | Import-Module Az.CodeSigning -ErrorAction Stop 65 | 66 | # Check if TrustedSigning module is available 67 | if (-not (Get-Module -ListAvailable -Name TrustedSigning)) { 68 | Write-Host "Installing TrustedSigning module..." 69 | Install-Module -Name TrustedSigning -Repository PSGallery -Force -Scope CurrentUser 70 | } 71 | Import-Module TrustedSigning -ErrorAction Stop 72 | 73 | # Authenticate with Azure using service principal 74 | $securePassword = ConvertTo-SecureString $env:AZURE_CLIENT_SECRET -AsPlainText -Force 75 | $credential = New-Object System.Management.Automation.PSCredential($env:AZURE_CLIENT_ID, $securePassword) 76 | 77 | Connect-AzAccount -ServicePrincipal -Tenant $env:AZURE_TENANT_ID -Credential $credential -ErrorAction Stop | Out-Null 78 | 79 | Write-Host "Connected to Azure successfully." 80 | 81 | # Prepare signing parameters 82 | $params = @{ 83 | Endpoint = $env:ENDPOINT 84 | CodeSigningAccountName = $env:ACCOUNT_NAME 85 | CertificateProfileName = $env:CERT_PROFILE_NAME 86 | FileDigest = $env:FILE_DIGEST 87 | TimestampDigest = $env:TIMESTAMP_DIGEST 88 | TimestampRfc3161 = $env:TIMESTAMP_SERVER 89 | Files = $BinaryPath 90 | } 91 | 92 | # Enable trace if requested 93 | if ($env:TRACE -and [System.Convert]::ToBoolean($env:TRACE)) { 94 | Set-PSDebug -Trace 2 95 | } 96 | 97 | # Invoke signing 98 | Write-Host "Invoking Trusted Signing..." 99 | Invoke-TrustedSigning @params 100 | 101 | Write-Host "✓ Successfully signed $BinaryPath" 102 | exit 0 103 | } 104 | catch { 105 | Write-Error "Failed to sign binary: $_" 106 | Write-Host "Error details: $($_.Exception.Message)" 107 | exit 1 108 | } 109 | finally { 110 | # Clean up 111 | if (Get-Command Disconnect-AzAccount -ErrorAction SilentlyContinue) { 112 | Disconnect-AzAccount -ErrorAction SilentlyContinue | Out-Null 113 | } 114 | Set-PSDebug -Off 115 | } 116 | } else { 117 | exit 1 118 | } 119 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | branches: [main, master] 8 | workflow_dispatch: 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | 13 | jobs: 14 | test: 15 | name: Test - ${{ matrix.os }} ${{ matrix.target }} 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | include: 20 | - os: macos-14 21 | target: aarch64-apple-darwin 22 | - os: macos-14 23 | target: x86_64-apple-darwin 24 | - os: ubuntu-22.04 25 | target: x86_64-unknown-linux-gnu 26 | - os: ubuntu-22.04 27 | target: x86_64-unknown-linux-musl 28 | - os: ubuntu-22.04-arm 29 | target: aarch64-unknown-linux-gnu 30 | - os: ubuntu-22.04-arm 31 | target: aarch64-unknown-linux-musl 32 | - os: windows-latest 33 | target: x86_64-pc-windows-msvc 34 | - os: windows-11-arm 35 | target: aarch64-pc-windows-msvc 36 | 37 | runs-on: ${{ matrix.os }} 38 | 39 | steps: 40 | - uses: actions/checkout@v4 41 | 42 | - name: Install Rust 43 | uses: dtolnay/rust-toolchain@stable 44 | with: 45 | targets: ${{ matrix.target }} 46 | 47 | - name: Cache cargo registry 48 | uses: actions/cache@v4 49 | with: 50 | path: ~/.cargo/registry 51 | key: ${{ matrix.os }}-${{ matrix.target }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} 52 | restore-keys: | 53 | ${{ matrix.os }}-${{ matrix.target }}-cargo-registry- 54 | 55 | - name: Cache cargo index 56 | uses: actions/cache@v4 57 | with: 58 | path: ~/.cargo/git 59 | key: ${{ matrix.os }}-${{ matrix.target }}-cargo-git-${{ hashFiles('**/Cargo.lock') }} 60 | restore-keys: | 61 | ${{ matrix.os }}-${{ matrix.target }}-cargo-git- 62 | 63 | - name: Cache target directory 64 | uses: actions/cache@v4 65 | with: 66 | path: target 67 | key: ${{ matrix.os }}-${{ matrix.target }}-cargo-target-${{ hashFiles('**/Cargo.lock') }} 68 | restore-keys: | 69 | ${{ matrix.os }}-${{ matrix.target }}-cargo-target- 70 | 71 | - if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}} 72 | name: Install musl build tools 73 | run: | 74 | sudo apt-get update 75 | sudo apt-get install -y musl-tools pkg-config 76 | 77 | - name: Run tests 78 | run: cargo test --release --target ${{ matrix.target }} 79 | 80 | lint: 81 | name: Lint 82 | runs-on: ubuntu-latest 83 | 84 | steps: 85 | - uses: actions/checkout@v4 86 | 87 | - name: Install Rust 88 | uses: dtolnay/rust-toolchain@stable 89 | with: 90 | components: rustfmt, clippy 91 | 92 | - name: Cache cargo registry 93 | uses: actions/cache@v4 94 | with: 95 | path: ~/.cargo/registry 96 | key: ${{ runner.os }}-lint-cargo-registry-${{ hashFiles('**/Cargo.lock') }} 97 | restore-keys: | 98 | ${{ runner.os }}-lint-cargo-registry- 99 | 100 | - name: Cache cargo index 101 | uses: actions/cache@v4 102 | with: 103 | path: ~/.cargo/git 104 | key: ${{ runner.os }}-lint-cargo-git-${{ hashFiles('**/Cargo.lock') }} 105 | restore-keys: | 106 | ${{ runner.os }}-lint-cargo-git- 107 | 108 | - name: Check formatting 109 | run: cargo fmt --all -- --check 110 | 111 | - name: Run clippy 112 | run: cargo clippy --all-targets --all-features -- -D warnings 113 | 114 | npm-validation: 115 | name: NPM Package Validation 116 | runs-on: ubuntu-latest 117 | 118 | steps: 119 | - uses: actions/checkout@v4 120 | 121 | - name: Setup Node.js 122 | uses: actions/setup-node@v4 123 | with: 124 | node-version: "lts/*" 125 | 126 | - name: Run validation script 127 | run: bash npm/testing/validate.sh 128 | 129 | - name: Run platform detection tests 130 | run: node npm/testing/test-platform-detection.js 131 | -------------------------------------------------------------------------------- /src/local_spawner.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io::Cursor, 3 | path::PathBuf, 4 | sync::{Arc, Mutex}, 5 | }; 6 | 7 | use agent_client_protocol::{ 8 | AgentSideConnection, Client, ClientCapabilities, ReadTextFileRequest, SessionId, 9 | WriteTextFileRequest, 10 | }; 11 | use codex_apply_patch::StdFs; 12 | use tokio::sync::mpsc; 13 | 14 | use crate::ACP_CLIENT; 15 | 16 | #[derive(Debug)] 17 | pub enum FsTask { 18 | ReadFile { 19 | session_id: SessionId, 20 | path: PathBuf, 21 | tx: std::sync::mpsc::Sender>, 22 | }, 23 | ReadFileLimit { 24 | session_id: SessionId, 25 | path: PathBuf, 26 | limit: usize, 27 | tx: tokio::sync::oneshot::Sender>, 28 | }, 29 | WriteFile { 30 | session_id: SessionId, 31 | path: PathBuf, 32 | content: String, 33 | tx: std::sync::mpsc::Sender>, 34 | }, 35 | } 36 | 37 | impl FsTask { 38 | async fn run(self) { 39 | match self { 40 | FsTask::ReadFile { 41 | session_id, 42 | path, 43 | tx, 44 | } => { 45 | let read_text_file = 46 | Self::client().read_text_file(ReadTextFileRequest::new(session_id, path)); 47 | let response = read_text_file 48 | .await 49 | .map(|response| response.content) 50 | .map_err(|e| std::io::Error::other(e.to_string())); 51 | tx.send(response).ok(); 52 | } 53 | FsTask::ReadFileLimit { 54 | session_id, 55 | path, 56 | limit, 57 | tx, 58 | } => { 59 | let read_text_file = Self::client().read_text_file( 60 | ReadTextFileRequest::new(session_id, path) 61 | .limit(limit.try_into().unwrap_or(u32::MAX)), 62 | ); 63 | let response = read_text_file 64 | .await 65 | .map(|response| response.content) 66 | .map_err(|e| std::io::Error::other(e.to_string())); 67 | tx.send(response).ok(); 68 | } 69 | FsTask::WriteFile { 70 | session_id, 71 | path, 72 | content, 73 | tx, 74 | } => { 75 | let response = Self::client() 76 | .write_text_file(WriteTextFileRequest::new(session_id, path, content)) 77 | .await 78 | .map(|_| ()) 79 | .map_err(|e| std::io::Error::other(e.to_string())); 80 | tx.send(response).ok(); 81 | } 82 | } 83 | } 84 | 85 | fn client() -> &'static AgentSideConnection { 86 | ACP_CLIENT.get().expect("Missing ACP client") 87 | } 88 | } 89 | 90 | pub struct AcpFs { 91 | client_capabilities: Arc>, 92 | local_spawner: LocalSpawner, 93 | session_id: SessionId, 94 | } 95 | 96 | impl AcpFs { 97 | pub fn new( 98 | session_id: SessionId, 99 | client_capabilities: Arc>, 100 | local_spawner: LocalSpawner, 101 | ) -> Self { 102 | Self { 103 | client_capabilities, 104 | local_spawner, 105 | session_id, 106 | } 107 | } 108 | } 109 | 110 | impl codex_apply_patch::Fs for AcpFs { 111 | fn read_to_string(&self, path: &std::path::Path) -> std::io::Result { 112 | if !self.client_capabilities.lock().unwrap().fs.read_text_file { 113 | return StdFs.read_to_string(path); 114 | } 115 | let (tx, rx) = std::sync::mpsc::channel(); 116 | self.local_spawner.spawn(FsTask::ReadFile { 117 | session_id: self.session_id.clone(), 118 | path: std::path::absolute(path)?, 119 | tx, 120 | }); 121 | rx.recv() 122 | .map_err(|e| std::io::Error::other(e.to_string())) 123 | .flatten() 124 | } 125 | 126 | fn write(&self, path: &std::path::Path, contents: &[u8]) -> std::io::Result<()> { 127 | if !self.client_capabilities.lock().unwrap().fs.write_text_file { 128 | return StdFs.write(path, contents); 129 | } 130 | let (tx, rx) = std::sync::mpsc::channel(); 131 | self.local_spawner.spawn(FsTask::WriteFile { 132 | session_id: self.session_id.clone(), 133 | path: std::path::absolute(path)?, 134 | content: String::from_utf8(contents.to_vec()) 135 | .map_err(|e| std::io::Error::other(e.to_string()))?, 136 | tx, 137 | }); 138 | rx.recv() 139 | .map_err(|e| std::io::Error::other(e.to_string())) 140 | .flatten() 141 | } 142 | } 143 | 144 | impl codex_core::codex::Fs for AcpFs { 145 | fn file_buffer( 146 | &self, 147 | path: &std::path::Path, 148 | limit: usize, 149 | ) -> std::pin::Pin< 150 | Box< 151 | dyn Future>> 152 | + Send, 153 | >, 154 | > { 155 | if !self.client_capabilities.lock().unwrap().fs.read_text_file { 156 | return StdFs.file_buffer(path, limit); 157 | } 158 | let (tx, rx) = tokio::sync::oneshot::channel(); 159 | let path = match std::path::absolute(path) { 160 | Ok(path) => path, 161 | Err(e) => return Box::pin(async move { Err(std::io::Error::other(e.to_string())) }), 162 | }; 163 | self.local_spawner.spawn(FsTask::ReadFileLimit { 164 | session_id: self.session_id.clone(), 165 | path, 166 | limit, 167 | tx, 168 | }); 169 | Box::pin(async move { 170 | let file = rx 171 | .await 172 | .map_err(|e| std::io::Error::other(e.to_string())) 173 | .flatten()?; 174 | 175 | Ok(Box::new(tokio::io::BufReader::new(Cursor::new(file.into_bytes()))) as _) 176 | }) 177 | } 178 | } 179 | 180 | #[derive(Clone)] 181 | pub struct LocalSpawner { 182 | send: mpsc::UnboundedSender, 183 | } 184 | 185 | impl LocalSpawner { 186 | pub fn new() -> Self { 187 | let (send, mut recv) = mpsc::unbounded_channel::(); 188 | 189 | let rt = tokio::runtime::Builder::new_current_thread() 190 | .enable_all() 191 | .build() 192 | .unwrap(); 193 | 194 | std::thread::spawn(move || { 195 | let local = tokio::task::LocalSet::new(); 196 | 197 | local.spawn_local(async move { 198 | while let Some(new_task) = recv.recv().await { 199 | tokio::task::spawn_local(new_task.run()); 200 | } 201 | // If the while loop returns, then all the LocalSpawner 202 | // objects have been dropped. 203 | }); 204 | 205 | // This will return once all senders are dropped and all 206 | // spawned tasks have returned. 207 | rt.block_on(local); 208 | }); 209 | 210 | Self { send } 211 | } 212 | 213 | pub fn spawn(&self, task: FsTask) { 214 | self.send 215 | .send(task) 216 | .expect("Thread with LocalSet has shut down."); 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | tag_name: 7 | description: "Tag name for release (optional - uses version from Cargo.toml if not specified)" 8 | required: false 9 | type: string 10 | 11 | permissions: 12 | contents: write 13 | 14 | jobs: 15 | get-version: 16 | name: Get Version 17 | runs-on: ubuntu-latest 18 | outputs: 19 | version: ${{ steps.get_version.outputs.version }} 20 | tag_name: ${{ steps.get_version.outputs.tag_name }} 21 | steps: 22 | - name: Checkout codex-acp 23 | uses: actions/checkout@v4 24 | 25 | - name: Get version from Cargo.toml 26 | id: get_version 27 | run: | 28 | VERSION=$(grep -m1 "^version" Cargo.toml | sed 's/.*"\(.*\)".*/\1/') 29 | echo "version=$VERSION" >> $GITHUB_OUTPUT 30 | if [ -n "${{ github.event.inputs.tag_name }}" ]; then 31 | echo "tag_name=${{ github.event.inputs.tag_name }}" >> $GITHUB_OUTPUT 32 | else 33 | echo "tag_name=v$VERSION" >> $GITHUB_OUTPUT 34 | fi 35 | 36 | build: 37 | name: Build - ${{ matrix.os }} 38 | needs: get-version 39 | strategy: 40 | fail-fast: false 41 | matrix: 42 | include: 43 | - os: macos-14 44 | target: aarch64-apple-darwin 45 | binary_extension: "" 46 | - os: macos-14 47 | target: x86_64-apple-darwin 48 | binary_extension: "" 49 | - os: ubuntu-22.04 50 | target: x86_64-unknown-linux-gnu 51 | binary_extension: "" 52 | - os: ubuntu-22.04 53 | target: x86_64-unknown-linux-musl 54 | binary_extension: "" 55 | - os: ubuntu-22.04-arm 56 | target: aarch64-unknown-linux-gnu 57 | - os: ubuntu-22.04-arm 58 | target: aarch64-unknown-linux-musl 59 | binary_extension: "" 60 | - os: windows-latest 61 | target: x86_64-pc-windows-msvc 62 | binary_extension: ".exe" 63 | - os: windows-11-arm 64 | target: aarch64-pc-windows-msvc 65 | binary_extension: ".exe" 66 | 67 | runs-on: ${{ matrix.os }} 68 | 69 | steps: 70 | - uses: actions/checkout@v4 71 | 72 | - name: Install Rust 73 | uses: dtolnay/rust-toolchain@stable 74 | with: 75 | targets: ${{ matrix.target }} 76 | 77 | - name: Cache cargo registry 78 | uses: actions/cache@v4 79 | with: 80 | path: ~/.cargo/registry 81 | key: ${{ matrix.os }}-${{ matrix.target }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} 82 | restore-keys: | 83 | ${{ matrix.os }}-${{ matrix.target }}-cargo-registry- 84 | 85 | - name: Cache cargo index 86 | uses: actions/cache@v4 87 | with: 88 | path: ~/.cargo/git 89 | key: ${{ matrix.os }}-${{ matrix.target }}-cargo-git-${{ hashFiles('**/Cargo.lock') }} 90 | restore-keys: | 91 | ${{ matrix.os }}-${{ matrix.target }}-cargo-git- 92 | 93 | - if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}} 94 | name: Install musl build tools 95 | run: | 96 | sudo apt-get update 97 | sudo apt-get install -y musl-tools pkg-config 98 | 99 | - name: Build release binary 100 | run: cargo build --release --target ${{ matrix.target }} 101 | 102 | - name: Code sign macOS binary 103 | if: startsWith(matrix.os, 'macos') 104 | env: 105 | MACOS_CERTIFICATE: ${{ secrets.MACOS_CERTIFICATE }} 106 | MACOS_CERTIFICATE_PASSWORD: ${{ secrets.MACOS_CERTIFICATE_PASSWORD }} 107 | APPLE_NOTARIZATION_KEY: ${{ secrets.APPLE_NOTARIZATION_KEY }} 108 | APPLE_NOTARIZATION_KEY_ID: ${{ secrets.APPLE_NOTARIZATION_KEY_ID }} 109 | APPLE_NOTARIZATION_ISSUER_ID: ${{ secrets.APPLE_NOTARIZATION_ISSUER_ID }} 110 | run: | 111 | ./script/sign-mac target/${{ matrix.target }}/release/codex-acp 112 | 113 | - name: Code sign Windows binary 114 | if: startsWith(matrix.os, 'windows') 115 | env: 116 | AZURE_TENANT_ID: ${{ secrets.AZURE_SIGNING_TENANT_ID }} 117 | AZURE_CLIENT_ID: ${{ secrets.AZURE_SIGNING_CLIENT_ID }} 118 | AZURE_CLIENT_SECRET: ${{ secrets.AZURE_SIGNING_CLIENT_SECRET }} 119 | ACCOUNT_NAME: ${{ vars.AZURE_SIGNING_ACCOUNT_NAME }} 120 | CERT_PROFILE_NAME: ${{ vars.AZURE_SIGNING_CERT_PROFILE_NAME }} 121 | ENDPOINT: ${{ vars.AZURE_SIGNING_ENDPOINT }} 122 | FILE_DIGEST: SHA256 123 | TIMESTAMP_DIGEST: SHA256 124 | TIMESTAMP_SERVER: http://timestamp.acs.microsoft.com 125 | run: | 126 | .\script\sign-windows.ps1 target\${{ matrix.target }}\release\codex-acp.exe 127 | 128 | - name: Create archive 129 | id: create_archive 130 | shell: bash 131 | run: | 132 | BINARY_NAME="codex-acp${{ matrix.binary_extension }}" 133 | ARCHIVE_NAME="codex-acp-${{ needs.get-version.outputs.version }}-${{ matrix.target }}" 134 | 135 | cd target/${{ matrix.target }}/release 136 | 137 | if [ "${{ matrix.os }}" = "windows-latest" ] || [ "${{ matrix.os }}" = "windows-11-arm" ]; then 138 | 7z a -tzip "${ARCHIVE_NAME}.zip" "${BINARY_NAME}" 139 | echo "archive_path=${ARCHIVE_NAME}.zip" >> $GITHUB_OUTPUT 140 | echo "archive_name=${ARCHIVE_NAME}.zip" >> $GITHUB_OUTPUT 141 | mv "${ARCHIVE_NAME}.zip" ../../../ 142 | else 143 | tar czf "${ARCHIVE_NAME}.tar.gz" "${BINARY_NAME}" 144 | echo "archive_path=${ARCHIVE_NAME}.tar.gz" >> $GITHUB_OUTPUT 145 | echo "archive_name=${ARCHIVE_NAME}.tar.gz" >> $GITHUB_OUTPUT 146 | mv "${ARCHIVE_NAME}.tar.gz" ../../../ 147 | fi 148 | 149 | - name: Upload artifact 150 | uses: actions/upload-artifact@v4 151 | with: 152 | name: ${{ steps.create_archive.outputs.archive_name }} 153 | path: ${{ steps.create_archive.outputs.archive_path }} 154 | retention-days: 1 155 | 156 | npm-packages: 157 | name: Create NPM Packages 158 | needs: [get-version, build] 159 | runs-on: ubuntu-latest 160 | outputs: 161 | packages: ${{ steps.create_packages.outputs.packages }} 162 | 163 | steps: 164 | - uses: actions/checkout@v4 165 | 166 | - name: Setup Node.js 167 | uses: actions/setup-node@v6 168 | with: 169 | node-version: "lts/*" 170 | registry-url: "https://registry.npmjs.org" 171 | 172 | - name: Download all artifacts 173 | uses: actions/download-artifact@v5 174 | with: 175 | path: artifacts 176 | merge-multiple: true 177 | 178 | - name: Display structure of downloaded files 179 | run: ls -la artifacts/ 180 | 181 | - name: Create platform-specific packages 182 | run: bash npm/publish/create-platform-packages.sh ./artifacts ./npm-packages ${{ needs.get-version.outputs.version }} 183 | 184 | - name: Update base package version 185 | run: bash npm/publish/update-base-package.sh ${{ needs.get-version.outputs.version }} 186 | 187 | - name: Upload NPM packages 188 | uses: actions/upload-artifact@v4 189 | with: 190 | name: npm-packages 191 | path: npm-packages/ 192 | retention-days: 1 193 | 194 | - name: Upload base package 195 | uses: actions/upload-artifact@v4 196 | with: 197 | name: npm-base-package 198 | path: npm/ 199 | retention-days: 1 200 | 201 | publish-npm-platform: 202 | name: Publish NPM Platform Packages 203 | needs: [get-version, npm-packages] 204 | runs-on: ubuntu-latest 205 | if: ${{ !github.event.repository.fork }} 206 | environment: release # Optional: for enhanced security 207 | permissions: 208 | contents: read 209 | id-token: write 210 | 211 | steps: 212 | - name: Setup Node.js 213 | uses: actions/setup-node@v6 214 | with: 215 | node-version: "lts/*" 216 | registry-url: "https://registry.npmjs.org" 217 | - name: Update npm 218 | run: npm install -g npm@latest 219 | 220 | - name: Download NPM packages 221 | uses: actions/download-artifact@v5 222 | with: 223 | name: npm-packages 224 | path: npm-packages 225 | 226 | - name: Publish platform packages 227 | run: | 228 | for pkg in npm-packages/*; do 229 | if [ -d "$pkg" ]; then 230 | echo "Publishing $(basename $pkg)..." 231 | cd "$pkg" 232 | npm publish 233 | cd ../.. 234 | fi 235 | done 236 | 237 | publish-npm-base: 238 | name: Publish NPM Base Package 239 | needs: [get-version, npm-packages, publish-npm-platform] 240 | runs-on: ubuntu-latest 241 | if: ${{ !github.event.repository.fork }} 242 | environment: release # Optional: for enhanced security 243 | permissions: 244 | contents: read 245 | id-token: write 246 | 247 | steps: 248 | - name: Setup Node.js 249 | uses: actions/setup-node@v6 250 | with: 251 | node-version: "lts/*" 252 | registry-url: "https://registry.npmjs.org" 253 | - name: Update npm 254 | run: npm install -g npm@latest 255 | 256 | - name: Download base package 257 | uses: actions/download-artifact@v5 258 | with: 259 | name: npm-base-package 260 | path: npm 261 | 262 | - name: Wait for platform packages to be available 263 | run: | 264 | echo "Waiting 30 seconds for platform packages to be available on npm..." 265 | sleep 30 266 | 267 | - name: Publish base package 268 | run: | 269 | cd npm 270 | npm publish 271 | 272 | create-release: 273 | name: Create Release 274 | needs: [get-version, build, publish-npm-base] 275 | runs-on: ubuntu-latest 276 | 277 | steps: 278 | - uses: actions/checkout@v4 279 | 280 | - name: Download all artifacts 281 | uses: actions/download-artifact@v5 282 | with: 283 | path: artifacts 284 | 285 | - name: Display structure of downloaded files 286 | run: ls -la artifacts/ 287 | 288 | - name: Create Release 289 | id: create_release 290 | uses: softprops/action-gh-release@v1 291 | with: 292 | tag_name: ${{ needs.get-version.outputs.tag_name }} 293 | name: Release ${{ needs.get-version.outputs.version }} 294 | draft: false 295 | prerelease: false 296 | generate_release_notes: true 297 | files: | 298 | artifacts/**/*.tar.gz 299 | artifacts/**/*.zip 300 | env: 301 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 302 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2022 - 2025 Zed Industries, Inc. 2 | 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | 18 | 19 | 20 | 21 | Apache License 22 | Version 2.0, January 2004 23 | http://www.apache.org/licenses/ 24 | 25 | 26 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 27 | 28 | 29 | 1. Definitions. 30 | 31 | 32 | "License" shall mean the terms and conditions for use, reproduction, 33 | and distribution as defined by Sections 1 through 9 of this document. 34 | 35 | 36 | "Licensor" shall mean the copyright owner or entity authorized by 37 | the copyright owner that is granting the License. 38 | 39 | 40 | "Legal Entity" shall mean the union of the acting entity and all 41 | other entities that control, are controlled by, or are under common 42 | control with that entity. For the purposes of this definition, 43 | "control" means (i) the power, direct or indirect, to cause the 44 | direction or management of such entity, whether by contract or 45 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 46 | outstanding shares, or (iii) beneficial ownership of such entity. 47 | 48 | 49 | "You" (or "Your") shall mean an individual or Legal Entity 50 | exercising permissions granted by this License. 51 | 52 | 53 | "Source" form shall mean the preferred form for making modifications, 54 | including but not limited to software source code, documentation 55 | source, and configuration files. 56 | 57 | 58 | "Object" form shall mean any form resulting from mechanical 59 | transformation or translation of a Source form, including but 60 | not limited to compiled object code, generated documentation, 61 | and conversions to other media types. 62 | 63 | 64 | "Work" shall mean the work of authorship, whether in Source or 65 | Object form, made available under the License, as indicated by a 66 | copyright notice that is included in or attached to the work 67 | (an example is provided in the Appendix below). 68 | 69 | 70 | "Derivative Works" shall mean any work, whether in Source or Object 71 | form, that is based on (or derived from) the Work and for which the 72 | editorial revisions, annotations, elaborations, or other modifications 73 | represent, as a whole, an original work of authorship. For the purposes 74 | of this License, Derivative Works shall not include works that remain 75 | separable from, or merely link (or bind by name) to the interfaces of, 76 | the Work and Derivative Works thereof. 77 | 78 | 79 | "Contribution" shall mean any work of authorship, including 80 | the original version of the Work and any modifications or additions 81 | to that Work or Derivative Works thereof, that is intentionally 82 | submitted to Licensor for inclusion in the Work by the copyright owner 83 | or by an individual or Legal Entity authorized to submit on behalf of 84 | the copyright owner. For the purposes of this definition, "submitted" 85 | means any form of electronic, verbal, or written communication sent 86 | to the Licensor or its representatives, including but not limited to 87 | communication on electronic mailing lists, source code control systems, 88 | and issue tracking systems that are managed by, or on behalf of, the 89 | Licensor for the purpose of discussing and improving the Work, but 90 | excluding communication that is conspicuously marked or otherwise 91 | designated in writing by the copyright owner as "Not a Contribution." 92 | 93 | 94 | "Contributor" shall mean Licensor and any individual or Legal Entity 95 | on behalf of whom a Contribution has been received by Licensor and 96 | subsequently incorporated within the Work. 97 | 98 | 99 | 2. Grant of Copyright License. Subject to the terms and conditions of 100 | this License, each Contributor hereby grants to You a perpetual, 101 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 102 | copyright license to reproduce, prepare Derivative Works of, 103 | publicly display, publicly perform, sublicense, and distribute the 104 | Work and such Derivative Works in Source or Object form. 105 | 106 | 107 | 3. Grant of Patent License. Subject to the terms and conditions of 108 | this License, each Contributor hereby grants to You a perpetual, 109 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 110 | (except as stated in this section) patent license to make, have made, 111 | use, offer to sell, sell, import, and otherwise transfer the Work, 112 | where such license applies only to those patent claims licensable 113 | by such Contributor that are necessarily infringed by their 114 | Contribution(s) alone or by combination of their Contribution(s) 115 | with the Work to which such Contribution(s) was submitted. If You 116 | institute patent litigation against any entity (including a 117 | cross-claim or counterclaim in a lawsuit) alleging that the Work 118 | or a Contribution incorporated within the Work constitutes direct 119 | or contributory patent infringement, then any patent licenses 120 | granted to You under this License for that Work shall terminate 121 | as of the date such litigation is filed. 122 | 123 | 124 | 4. Redistribution. You may reproduce and distribute copies of the 125 | Work or Derivative Works thereof in any medium, with or without 126 | modifications, and in Source or Object form, provided that You 127 | meet the following conditions: 128 | 129 | 130 | (a) You must give any other recipients of the Work or 131 | Derivative Works a copy of this License; and 132 | 133 | 134 | (b) You must cause any modified files to carry prominent notices 135 | stating that You changed the files; and 136 | 137 | 138 | (c) You must retain, in the Source form of any Derivative Works 139 | that You distribute, all copyright, patent, trademark, and 140 | attribution notices from the Source form of the Work, 141 | excluding those notices that do not pertain to any part of 142 | the Derivative Works; and 143 | 144 | 145 | (d) If the Work includes a "NOTICE" text file as part of its 146 | distribution, then any Derivative Works that You distribute must 147 | include a readable copy of the attribution notices contained 148 | within such NOTICE file, excluding those notices that do not 149 | pertain to any part of the Derivative Works, in at least one 150 | of the following places: within a NOTICE text file distributed 151 | as part of the Derivative Works; within the Source form or 152 | documentation, if provided along with the Derivative Works; or, 153 | within a display generated by the Derivative Works, if and 154 | wherever such third-party notices normally appear. The contents 155 | of the NOTICE file are for informational purposes only and 156 | do not modify the License. You may add Your own attribution 157 | notices within Derivative Works that You distribute, alongside 158 | or as an addendum to the NOTICE text from the Work, provided 159 | that such additional attribution notices cannot be construed 160 | as modifying the License. 161 | 162 | 163 | You may add Your own copyright statement to Your modifications and 164 | may provide additional or different license terms and conditions 165 | for use, reproduction, or distribution of Your modifications, or 166 | for any such Derivative Works as a whole, provided Your use, 167 | reproduction, and distribution of the Work otherwise complies with 168 | the conditions stated in this License. 169 | 170 | 171 | 5. Submission of Contributions. Unless You explicitly state otherwise, 172 | any Contribution intentionally submitted for inclusion in the Work 173 | by You to the Licensor shall be under the terms and conditions of 174 | this License, without any additional terms or conditions. 175 | Notwithstanding the above, nothing herein shall supersede or modify 176 | the terms of any separate license agreement you may have executed 177 | with Licensor regarding such Contributions. 178 | 179 | 180 | 6. Trademarks. This License does not grant permission to use the trade 181 | names, trademarks, service marks, or product names of the Licensor, 182 | except as required for reasonable and customary use in describing the 183 | origin of the Work and reproducing the content of the NOTICE file. 184 | 185 | 186 | 7. Disclaimer of Warranty. Unless required by applicable law or 187 | agreed to in writing, Licensor provides the Work (and each 188 | Contributor provides its Contributions) on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 190 | implied, including, without limitation, any warranties or conditions 191 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 192 | PARTICULAR PURPOSE. You are solely responsible for determining the 193 | appropriateness of using or redistributing the Work and assume any 194 | risks associated with Your exercise of permissions under this License. 195 | 196 | 197 | 8. Limitation of Liability. In no event and under no legal theory, 198 | whether in tort (including negligence), contract, or otherwise, 199 | unless required by applicable law (such as deliberate and grossly 200 | negligent acts) or agreed to in writing, shall any Contributor be 201 | liable to You for damages, including any direct, indirect, special, 202 | incidental, or consequential damages of any character arising as a 203 | result of this License or out of the use or inability to use the 204 | Work (including but not limited to damages for loss of goodwill, 205 | work stoppage, computer failure or malfunction, or any and all 206 | other commercial damages or losses), even if such Contributor 207 | has been advised of the possibility of such damages. 208 | 209 | 210 | 9. Accepting Warranty or Additional Liability. While redistributing 211 | the Work or Derivative Works thereof, You may choose to offer, 212 | and charge a fee for, acceptance of support, warranty, indemnity, 213 | or other liability obligations and/or rights consistent with this 214 | License. However, in accepting such obligations, You may act only 215 | on Your own behalf and on Your sole responsibility, not on behalf 216 | of any other Contributor, and only if You agree to indemnify, 217 | defend, and hold each Contributor harmless for any liability 218 | incurred by, or claims asserted against, such Contributor by reason 219 | of your accepting any such warranty or additional liability. 220 | 221 | 222 | END OF TERMS AND CONDITIONS 223 | -------------------------------------------------------------------------------- /src/prompt_args.rs: -------------------------------------------------------------------------------- 1 | /// Mostly copied from `codex_tui::bottom_pane::prompt_args`: 2 | use codex_protocol::custom_prompts::CustomPrompt; 3 | use regex_lite::Regex; 4 | use shlex::Shlex; 5 | use std::collections::HashMap; 6 | use std::collections::HashSet; 7 | use std::sync::LazyLock; 8 | 9 | static PROMPT_ARG_REGEX: LazyLock = 10 | LazyLock::new(|| Regex::new(r"\$[A-Z][A-Z0-9_]*").unwrap_or_else(|_| std::process::abort())); 11 | 12 | #[derive(Debug)] 13 | pub enum PromptArgsError { 14 | MissingAssignment { token: String }, 15 | MissingKey { token: String }, 16 | } 17 | 18 | impl PromptArgsError { 19 | fn describe(&self, command: &str) -> String { 20 | match self { 21 | PromptArgsError::MissingAssignment { token } => format!( 22 | "Could not parse {command}: expected key=value but found '{token}'. Wrap values in double quotes if they contain spaces." 23 | ), 24 | PromptArgsError::MissingKey { token } => { 25 | format!("Could not parse {command}: expected a name before '=' in '{token}'.") 26 | } 27 | } 28 | } 29 | } 30 | 31 | #[derive(Debug)] 32 | pub enum PromptExpansionError { 33 | Args { 34 | command: String, 35 | error: PromptArgsError, 36 | }, 37 | MissingArgs { 38 | command: String, 39 | missing: Vec, 40 | }, 41 | } 42 | 43 | impl PromptExpansionError { 44 | pub fn user_message(&self) -> String { 45 | match self { 46 | PromptExpansionError::Args { command, error } => error.describe(command), 47 | PromptExpansionError::MissingArgs { command, missing } => { 48 | let list = missing.join(", "); 49 | format!( 50 | "Missing required args for {command}: {list}. Provide as key=value (quote values with spaces)." 51 | ) 52 | } 53 | } 54 | } 55 | } 56 | 57 | /// Parse a first-line slash command of the form `/name `. 58 | /// Returns `(name, rest_after_name)` if the line begins with `/` and contains 59 | /// a non-empty name; otherwise returns `None`. 60 | pub fn parse_slash_name(line: &str) -> Option<(&str, &str)> { 61 | let stripped = line.strip_prefix('/')?; 62 | let mut name_end = stripped.len(); 63 | for (idx, ch) in stripped.char_indices() { 64 | if ch.is_whitespace() { 65 | name_end = idx; 66 | break; 67 | } 68 | } 69 | let name = &stripped[..name_end]; 70 | if name.is_empty() { 71 | return None; 72 | } 73 | let rest = stripped[name_end..].trim_start(); 74 | Some((name, rest)) 75 | } 76 | 77 | /// Extracts the unique placeholder variable names from a prompt template. 78 | /// 79 | /// A placeholder is any token that matches the pattern `$[A-Z][A-Z0-9_]*` 80 | /// (for example `$USER`). The function returns the variable names without 81 | /// the leading `$`, de-duplicated and in the order of first appearance. 82 | pub fn prompt_argument_names(content: &str) -> Vec { 83 | let mut seen = HashSet::new(); 84 | let mut names = Vec::new(); 85 | for m in PROMPT_ARG_REGEX.find_iter(content) { 86 | if m.start() > 0 && content.as_bytes()[m.start() - 1] == b'$' { 87 | continue; 88 | } 89 | let name = &content[m.start() + 1..m.end()]; 90 | // Exclude special positional aggregate token from named args. 91 | if name == "ARGUMENTS" { 92 | continue; 93 | } 94 | let name = name.to_string(); 95 | if seen.insert(name.clone()) { 96 | names.push(name); 97 | } 98 | } 99 | names 100 | } 101 | 102 | /// Parses the `key=value` pairs that follow a custom prompt name. 103 | /// 104 | /// The input is split using shlex rules, so quoted values are supported 105 | /// (for example `USER="Alice Smith"`). The function returns a map of parsed 106 | /// arguments, or an error if a token is missing `=` or if the key is empty. 107 | pub fn parse_prompt_inputs(rest: &str) -> Result, PromptArgsError> { 108 | let mut map = HashMap::new(); 109 | if rest.trim().is_empty() { 110 | return Ok(map); 111 | } 112 | 113 | for token in Shlex::new(rest) { 114 | let Some((key, value)) = token.split_once('=') else { 115 | return Err(PromptArgsError::MissingAssignment { token }); 116 | }; 117 | if key.is_empty() { 118 | return Err(PromptArgsError::MissingKey { token }); 119 | } 120 | map.insert(key.to_string(), value.to_string()); 121 | } 122 | Ok(map) 123 | } 124 | 125 | /// Expands a message of the form `/prompts:name [value] [value] …` using a matching saved prompt. 126 | /// 127 | /// If the text does not start with `/prompts:`, or if no prompt named `name` exists, 128 | /// the function returns `Ok(None)`. On success it returns 129 | /// `Ok(Some(expanded))`; otherwise it returns a descriptive error. 130 | pub fn expand_custom_prompt( 131 | name: &str, 132 | rest: &str, 133 | custom_prompts: &[CustomPrompt], 134 | ) -> Result, PromptExpansionError> { 135 | let Some(prompt) = custom_prompts.iter().find(|p| p.name == name) else { 136 | return Ok(None); 137 | }; 138 | // If there are named placeholders, expect key=value inputs. 139 | let required = prompt_argument_names(&prompt.content); 140 | if !required.is_empty() { 141 | let inputs = parse_prompt_inputs(rest).map_err(|error| PromptExpansionError::Args { 142 | command: format!("/{name}"), 143 | error, 144 | })?; 145 | let missing: Vec = required 146 | .into_iter() 147 | .filter(|k| !inputs.contains_key(k)) 148 | .collect(); 149 | if !missing.is_empty() { 150 | return Err(PromptExpansionError::MissingArgs { 151 | command: format!("/{name}"), 152 | missing, 153 | }); 154 | } 155 | let content = &prompt.content; 156 | let replaced = PROMPT_ARG_REGEX.replace_all(content, |caps: ®ex_lite::Captures<'_>| { 157 | if let Some(matched) = caps.get(0) 158 | && matched.start() > 0 159 | && content.as_bytes()[matched.start() - 1] == b'$' 160 | { 161 | return matched.as_str().to_string(); 162 | } 163 | let whole = &caps[0]; 164 | let key = &whole[1..]; 165 | inputs 166 | .get(key) 167 | .cloned() 168 | .unwrap_or_else(|| whole.to_string()) 169 | }); 170 | return Ok(Some(replaced.into_owned())); 171 | } 172 | 173 | // Otherwise, treat it as numeric/positional placeholder prompt (or none). 174 | let pos_args: Vec = Shlex::new(rest).collect(); 175 | let expanded = expand_numeric_placeholders(&prompt.content, &pos_args); 176 | Ok(Some(expanded)) 177 | } 178 | 179 | /// Expand `$1..$9` and `$ARGUMENTS` in `content` with values from `args`. 180 | pub fn expand_numeric_placeholders(content: &str, args: &[String]) -> String { 181 | let mut out = String::with_capacity(content.len()); 182 | let mut i = 0; 183 | let mut cached_joined_args: Option = None; 184 | while let Some(off) = content[i..].find('$') { 185 | let j = i + off; 186 | out.push_str(&content[i..j]); 187 | let rest = &content[j..]; 188 | let bytes = rest.as_bytes(); 189 | if bytes.len() >= 2 { 190 | match bytes[1] { 191 | b'$' => { 192 | out.push_str("$$"); 193 | i = j + 2; 194 | continue; 195 | } 196 | b'1'..=b'9' => { 197 | let idx = (bytes[1] - b'1') as usize; 198 | if let Some(val) = args.get(idx) { 199 | out.push_str(val); 200 | } 201 | i = j + 2; 202 | continue; 203 | } 204 | _ => {} 205 | } 206 | } 207 | if rest.len() > "ARGUMENTS".len() && rest[1..].starts_with("ARGUMENTS") { 208 | if !args.is_empty() { 209 | let joined = cached_joined_args.get_or_insert_with(|| args.join(" ")); 210 | out.push_str(joined); 211 | } 212 | i = j + 1 + "ARGUMENTS".len(); 213 | continue; 214 | } 215 | out.push('$'); 216 | i = j + 1; 217 | } 218 | out.push_str(&content[i..]); 219 | out 220 | } 221 | 222 | #[cfg(test)] 223 | mod tests { 224 | use super::*; 225 | 226 | #[test] 227 | fn expand_arguments_basic() { 228 | let prompts = vec![CustomPrompt { 229 | name: "my-prompt".to_string(), 230 | path: "/tmp/my-prompt.md".to_string().into(), 231 | content: "Review $USER changes on $BRANCH".to_string(), 232 | description: None, 233 | argument_hint: None, 234 | }]; 235 | 236 | let out = expand_custom_prompt("my-prompt", "USER=Alice BRANCH=main", &prompts).unwrap(); 237 | assert_eq!(out, Some("Review Alice changes on main".to_string())); 238 | } 239 | 240 | #[test] 241 | fn quoted_values_ok() { 242 | let prompts = vec![CustomPrompt { 243 | name: "my-prompt".to_string(), 244 | path: "/tmp/my-prompt.md".to_string().into(), 245 | content: "Pair $USER with $BRANCH".to_string(), 246 | description: None, 247 | argument_hint: None, 248 | }]; 249 | 250 | let out = expand_custom_prompt( 251 | "my-prompt", 252 | "USER=\"Alice Smith\" BRANCH=dev-main", 253 | &prompts, 254 | ) 255 | .unwrap(); 256 | assert_eq!(out, Some("Pair Alice Smith with dev-main".to_string())); 257 | } 258 | 259 | #[test] 260 | fn invalid_arg_token_reports_error() { 261 | let prompts = vec![CustomPrompt { 262 | name: "my-prompt".to_string(), 263 | path: "/tmp/my-prompt.md".to_string().into(), 264 | content: "Review $USER changes".to_string(), 265 | description: None, 266 | argument_hint: None, 267 | }]; 268 | let err = expand_custom_prompt("my-prompt", "USER=Alice stray", &prompts) 269 | .unwrap_err() 270 | .user_message(); 271 | assert!(err.contains("expected key=value")); 272 | } 273 | 274 | #[test] 275 | fn missing_required_args_reports_error() { 276 | let prompts = vec![CustomPrompt { 277 | name: "my-prompt".to_string(), 278 | path: "/tmp/my-prompt.md".to_string().into(), 279 | content: "Review $USER changes on $BRANCH".to_string(), 280 | description: None, 281 | argument_hint: None, 282 | }]; 283 | let err = expand_custom_prompt("my-prompt", "USER=Alice", &prompts) 284 | .unwrap_err() 285 | .user_message(); 286 | assert!(err.to_lowercase().contains("missing required args")); 287 | assert!(err.contains("BRANCH")); 288 | } 289 | 290 | #[test] 291 | fn escaped_placeholder_is_ignored() { 292 | assert_eq!( 293 | prompt_argument_names("literal $$USER"), 294 | Vec::::new() 295 | ); 296 | assert_eq!( 297 | prompt_argument_names("literal $$USER and $REAL"), 298 | vec!["REAL".to_string()] 299 | ); 300 | } 301 | 302 | #[test] 303 | fn escaped_placeholder_remains_literal() { 304 | let prompts = vec![CustomPrompt { 305 | name: "my-prompt".to_string(), 306 | path: "/tmp/my-prompt.md".to_string().into(), 307 | content: "literal $$USER".to_string(), 308 | description: None, 309 | argument_hint: None, 310 | }]; 311 | 312 | let out = expand_custom_prompt("my-prompt", "", &prompts).unwrap(); 313 | assert_eq!(out, Some("literal $$USER".to_string())); 314 | } 315 | } 316 | -------------------------------------------------------------------------------- /src/codex_agent.rs: -------------------------------------------------------------------------------- 1 | use agent_client_protocol::{ 2 | Agent, AgentCapabilities, AuthMethod, AuthMethodId, AuthenticateRequest, AuthenticateResponse, 3 | CancelNotification, ClientCapabilities, Error, Implementation, InitializeRequest, 4 | InitializeResponse, LoadSessionRequest, LoadSessionResponse, McpCapabilities, McpServer, 5 | McpServerHttp, McpServerStdio, NewSessionRequest, NewSessionResponse, PromptCapabilities, 6 | PromptRequest, PromptResponse, ProtocolVersion, SessionId, SetSessionModeRequest, 7 | SetSessionModeResponse, SetSessionModelRequest, SetSessionModelResponse, 8 | }; 9 | use codex_core::{ 10 | ConversationManager, NewConversation, 11 | auth::{AuthManager, read_codex_api_key_from_env, read_openai_api_key_from_env}, 12 | config::{ 13 | Config, 14 | types::{McpServerConfig, McpServerTransportConfig}, 15 | }, 16 | protocol::SessionSource, 17 | }; 18 | use codex_login::{AuthMode, CODEX_API_KEY_ENV_VAR, OPENAI_API_KEY_ENV_VAR}; 19 | use codex_protocol::ConversationId; 20 | use std::{ 21 | cell::RefCell, 22 | collections::HashMap, 23 | rc::Rc, 24 | sync::{Arc, Mutex}, 25 | }; 26 | use tracing::{debug, info}; 27 | 28 | use crate::{ 29 | conversation::Conversation, 30 | local_spawner::{AcpFs, LocalSpawner}, 31 | }; 32 | 33 | /// The Codex implementation of the ACP Agent trait. 34 | /// 35 | /// This bridges the ACP protocol with the existing codex-rs infrastructure, 36 | /// allowing codex to be used as an ACP agent. 37 | pub struct CodexAgent { 38 | /// Handle to the current authentication 39 | auth_manager: Arc, 40 | /// Capabilities of the connected client 41 | client_capabilities: Arc>, 42 | /// The underlying codex configuration 43 | config: Config, 44 | /// Conversation manager for handling sessions 45 | conversation_manager: ConversationManager, 46 | /// Active sessions mapped by `SessionId` 47 | sessions: Rc>>>, 48 | } 49 | 50 | impl CodexAgent { 51 | /// Create a new `CodexAgent` with the given configuration 52 | pub fn new(config: Config) -> Self { 53 | let auth_manager = AuthManager::shared( 54 | config.codex_home.clone(), 55 | false, 56 | config.cli_auth_credentials_store_mode, 57 | ); 58 | 59 | let client_capabilities: Arc> = Arc::default(); 60 | 61 | let local_spawner = LocalSpawner::new(); 62 | let capabilities_clone = client_capabilities.clone(); 63 | let conversation_manager = 64 | ConversationManager::new(auth_manager.clone(), SessionSource::Unknown).with_fs( 65 | Box::new(move |conversation_id| { 66 | Arc::new(AcpFs::new( 67 | Self::session_id_from_conversation_id(conversation_id), 68 | capabilities_clone.clone(), 69 | local_spawner.clone(), 70 | )) 71 | }), 72 | ); 73 | Self { 74 | auth_manager, 75 | client_capabilities, 76 | config, 77 | conversation_manager, 78 | sessions: Rc::default(), 79 | } 80 | } 81 | 82 | fn session_id_from_conversation_id(conversation_id: ConversationId) -> SessionId { 83 | SessionId::new(conversation_id.to_string()) 84 | } 85 | 86 | fn get_conversation(&self, session_id: &SessionId) -> Result, Error> { 87 | Ok(self 88 | .sessions 89 | .borrow() 90 | .get(session_id) 91 | .ok_or_else(|| Error::resource_not_found(None))? 92 | .clone()) 93 | } 94 | 95 | fn check_auth(&self) -> Result<(), Error> { 96 | if self.config.model_provider_id == "openai" && self.auth_manager.auth().is_none() { 97 | return Err(Error::auth_required()); 98 | } 99 | Ok(()) 100 | } 101 | } 102 | 103 | #[async_trait::async_trait(?Send)] 104 | impl Agent for CodexAgent { 105 | async fn initialize(&self, request: InitializeRequest) -> Result { 106 | let InitializeRequest { 107 | protocol_version, 108 | client_capabilities, 109 | client_info: _, // TODO: save and pass into Codex somehow 110 | .. 111 | } = request; 112 | debug!("Received initialize request with protocol version {protocol_version:?}",); 113 | let protocol_version = ProtocolVersion::V1; 114 | 115 | *self.client_capabilities.lock().unwrap() = client_capabilities; 116 | 117 | let agent_capabilities = AgentCapabilities::new() 118 | .prompt_capabilities(PromptCapabilities::new().embedded_context(true).image(true)) 119 | .mcp_capabilities(McpCapabilities::new().http(true)); 120 | 121 | let mut auth_methods = vec![ 122 | CodexAuthMethod::ChatGpt.into(), 123 | CodexAuthMethod::CodexApiKey.into(), 124 | CodexAuthMethod::OpenAiApiKey.into(), 125 | ]; 126 | // Until codex device code auth works, we can't use this in remote ssh projects 127 | if std::env::var("NO_BROWSER").is_ok() { 128 | auth_methods.remove(0); 129 | } 130 | 131 | Ok(InitializeResponse::new(protocol_version) 132 | .agent_capabilities(agent_capabilities) 133 | .agent_info(Implementation::new("codex-acp", env!("CARGO_PKG_VERSION")).title("Codex")) 134 | .auth_methods(auth_methods)) 135 | } 136 | 137 | async fn authenticate( 138 | &self, 139 | request: AuthenticateRequest, 140 | ) -> Result { 141 | let auth_method = CodexAuthMethod::try_from(request.method_id)?; 142 | 143 | // Check before starting login flow if already authenticated with the same method 144 | if let Some(auth) = self.auth_manager.auth() { 145 | match (auth.mode, auth_method) { 146 | ( 147 | AuthMode::ApiKey, 148 | CodexAuthMethod::CodexApiKey | CodexAuthMethod::OpenAiApiKey, 149 | ) 150 | | (AuthMode::ChatGPT, CodexAuthMethod::ChatGpt) => { 151 | return Ok(AuthenticateResponse::new()); 152 | } 153 | _ => {} 154 | } 155 | } 156 | 157 | match auth_method { 158 | CodexAuthMethod::ChatGpt => { 159 | // Perform browser/device login via codex-rs, then report success/failure to the client. 160 | let opts = codex_login::ServerOptions::new( 161 | self.config.codex_home.clone(), 162 | codex_core::auth::CLIENT_ID.to_string(), 163 | None, 164 | self.config.cli_auth_credentials_store_mode, 165 | ); 166 | 167 | let server = 168 | codex_login::run_login_server(opts).map_err(Error::into_internal_error)?; 169 | 170 | server 171 | .block_until_done() 172 | .await 173 | .map_err(Error::into_internal_error)?; 174 | 175 | self.auth_manager.reload(); 176 | } 177 | CodexAuthMethod::CodexApiKey => { 178 | let api_key = read_codex_api_key_from_env().ok_or_else(|| { 179 | Error::internal_error().data(format!("{CODEX_API_KEY_ENV_VAR} is not set")) 180 | })?; 181 | codex_login::login_with_api_key( 182 | &self.config.codex_home, 183 | &api_key, 184 | self.config.cli_auth_credentials_store_mode, 185 | ) 186 | .map_err(Error::into_internal_error)?; 187 | } 188 | CodexAuthMethod::OpenAiApiKey => { 189 | let api_key = read_openai_api_key_from_env().ok_or_else(|| { 190 | Error::internal_error().data(format!("{OPENAI_API_KEY_ENV_VAR} is not set")) 191 | })?; 192 | codex_login::login_with_api_key( 193 | &self.config.codex_home, 194 | &api_key, 195 | self.config.cli_auth_credentials_store_mode, 196 | ) 197 | .map_err(Error::into_internal_error)?; 198 | } 199 | } 200 | 201 | self.auth_manager.reload(); 202 | 203 | Ok(AuthenticateResponse::new()) 204 | } 205 | 206 | async fn new_session(&self, request: NewSessionRequest) -> Result { 207 | // Check before sending if authentication was successful or not 208 | self.check_auth()?; 209 | 210 | let NewSessionRequest { 211 | cwd, mcp_servers, .. 212 | } = request; 213 | info!("Creating new session with cwd: {}", cwd.display()); 214 | 215 | let mut config = self.config.clone(); 216 | // Allows us to support HTTP MCP servers 217 | config.use_experimental_use_rmcp_client = true; 218 | // Make sure we are going through the `apply_patch` code path 219 | config.include_apply_patch_tool = true; 220 | config.cwd.clone_from(&cwd); 221 | 222 | // Propagate any client-provided MCP servers that codex-rs supports. 223 | for mcp_server in mcp_servers { 224 | match mcp_server { 225 | // Not supported in codex 226 | McpServer::Sse(..) => {} 227 | McpServer::Http(McpServerHttp { 228 | name, url, headers, .. 229 | }) => { 230 | config.mcp_servers.insert( 231 | name, 232 | McpServerConfig { 233 | transport: McpServerTransportConfig::StreamableHttp { 234 | url, 235 | bearer_token_env_var: None, 236 | http_headers: if headers.is_empty() { 237 | None 238 | } else { 239 | Some(headers.into_iter().map(|h| (h.name, h.value)).collect()) 240 | }, 241 | env_http_headers: None, 242 | }, 243 | enabled: true, 244 | startup_timeout_sec: None, 245 | tool_timeout_sec: None, 246 | disabled_tools: None, 247 | enabled_tools: None, 248 | }, 249 | ); 250 | } 251 | McpServer::Stdio(McpServerStdio { 252 | name, 253 | command, 254 | args, 255 | env, 256 | .. 257 | }) => { 258 | config.mcp_servers.insert( 259 | name, 260 | McpServerConfig { 261 | transport: McpServerTransportConfig::Stdio { 262 | command: command.display().to_string(), 263 | args, 264 | env: if env.is_empty() { 265 | None 266 | } else { 267 | Some(env.into_iter().map(|env| (env.name, env.value)).collect()) 268 | }, 269 | env_vars: vec![], 270 | cwd: Some(cwd.clone()), 271 | }, 272 | enabled: true, 273 | startup_timeout_sec: None, 274 | tool_timeout_sec: None, 275 | disabled_tools: None, 276 | enabled_tools: None, 277 | }, 278 | ); 279 | } 280 | _ => {} 281 | } 282 | } 283 | 284 | let num_mcp_servers = config.mcp_servers.len(); 285 | 286 | let NewConversation { 287 | conversation_id, 288 | conversation, 289 | session_configured: _, 290 | } = Box::pin(self.conversation_manager.new_conversation(config.clone())) 291 | .await 292 | .map_err(|_e| Error::internal_error())?; 293 | 294 | let session_id = Self::session_id_from_conversation_id(conversation_id); 295 | let conversation = Rc::new(Conversation::new( 296 | session_id.clone(), 297 | conversation, 298 | self.auth_manager.clone(), 299 | self.conversation_manager.get_models_manager(), 300 | self.client_capabilities.clone(), 301 | config.clone(), 302 | )); 303 | let load = conversation.load().await?; 304 | 305 | self.sessions 306 | .borrow_mut() 307 | .insert(session_id.clone(), conversation); 308 | 309 | debug!("Created new session with {} MCP servers", num_mcp_servers); 310 | 311 | Ok(NewSessionResponse::new(session_id) 312 | .modes(load.modes) 313 | .models(load.models)) 314 | } 315 | 316 | async fn load_session( 317 | &self, 318 | request: LoadSessionRequest, 319 | ) -> Result { 320 | info!("Loading session: {}", request.session_id); 321 | // Check before sending if authentication was successful or not 322 | self.check_auth()?; 323 | 324 | // Check if we have this session already 325 | let Some(conversation) = self.sessions.borrow().get(&request.session_id).cloned() else { 326 | // For now, we can't actually load sessions from disk 327 | // The conversation manager doesn't have a direct load method 328 | // We would need to use resume_conversation_from_rollout with a rollout path 329 | return Err(Error::resource_not_found(None)); 330 | }; 331 | 332 | Ok(conversation.load().await?) 333 | } 334 | 335 | async fn prompt(&self, request: PromptRequest) -> Result { 336 | info!("Processing prompt for session: {}", request.session_id); 337 | // Check before sending if authentication was successful or not 338 | self.check_auth()?; 339 | 340 | // Get the session state 341 | let conversation = self.get_conversation(&request.session_id)?; 342 | let stop_reason = conversation.prompt(request).await?; 343 | 344 | Ok(PromptResponse::new(stop_reason)) 345 | } 346 | 347 | async fn cancel(&self, args: CancelNotification) -> Result<(), Error> { 348 | info!("Cancelling operations for session: {}", args.session_id); 349 | self.get_conversation(&args.session_id)?.cancel().await?; 350 | Ok(()) 351 | } 352 | 353 | async fn set_session_mode( 354 | &self, 355 | args: SetSessionModeRequest, 356 | ) -> Result { 357 | info!("Setting session mode for session: {}", args.session_id); 358 | self.get_conversation(&args.session_id)? 359 | .set_mode(args.mode_id) 360 | .await?; 361 | Ok(SetSessionModeResponse::default()) 362 | } 363 | 364 | async fn set_session_model( 365 | &self, 366 | args: SetSessionModelRequest, 367 | ) -> Result { 368 | info!("Setting session model for session: {}", args.session_id); 369 | 370 | self.get_conversation(&args.session_id)? 371 | .set_model(args.model_id) 372 | .await?; 373 | 374 | Ok(SetSessionModelResponse::default()) 375 | } 376 | } 377 | 378 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 379 | enum CodexAuthMethod { 380 | ChatGpt, 381 | CodexApiKey, 382 | OpenAiApiKey, 383 | } 384 | 385 | impl From for AuthMethodId { 386 | fn from(method: CodexAuthMethod) -> Self { 387 | Self::new(match method { 388 | CodexAuthMethod::ChatGpt => "chatgpt", 389 | CodexAuthMethod::CodexApiKey => "codex-api-key", 390 | CodexAuthMethod::OpenAiApiKey => "openai-api-key", 391 | }) 392 | } 393 | } 394 | 395 | impl From for AuthMethod { 396 | fn from(method: CodexAuthMethod) -> Self { 397 | match method { 398 | CodexAuthMethod::ChatGpt => Self::new(method, "Login with ChatGPT").description( 399 | "Use your ChatGPT login with Codex CLI (requires a paid ChatGPT subscription)", 400 | ), 401 | CodexAuthMethod::CodexApiKey => { 402 | Self::new(method, format!("Use {CODEX_API_KEY_ENV_VAR}")).description(format!( 403 | "Requires setting the `{CODEX_API_KEY_ENV_VAR}` environment variable." 404 | )) 405 | } 406 | CodexAuthMethod::OpenAiApiKey => { 407 | Self::new(method, format!("Use {OPENAI_API_KEY_ENV_VAR}")).description(format!( 408 | "Requires setting the `{OPENAI_API_KEY_ENV_VAR}` environment variable." 409 | )) 410 | } 411 | } 412 | } 413 | } 414 | 415 | impl TryFrom for CodexAuthMethod { 416 | type Error = Error; 417 | 418 | fn try_from(value: AuthMethodId) -> Result { 419 | match value.0.as_ref() { 420 | "chatgpt" => Ok(CodexAuthMethod::ChatGpt), 421 | "codex-api-key" => Ok(CodexAuthMethod::CodexApiKey), 422 | "openai-api-key" => Ok(CodexAuthMethod::OpenAiApiKey), 423 | _ => Err(Error::invalid_params().data("unsupported authentication method")), 424 | } 425 | } 426 | } 427 | -------------------------------------------------------------------------------- /src/conversation.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::RefCell, 3 | collections::HashMap, 4 | ops::DerefMut, 5 | path::{Path, PathBuf}, 6 | rc::Rc, 7 | sync::{Arc, LazyLock, Mutex}, 8 | }; 9 | 10 | use agent_client_protocol::{ 11 | Annotations, AudioContent, AvailableCommand, AvailableCommandInput, AvailableCommandsUpdate, 12 | BlobResourceContents, Client, ClientCapabilities, Content, ContentBlock, ContentChunk, Diff, 13 | EmbeddedResource, EmbeddedResourceResource, Error, ImageContent, LoadSessionResponse, Meta, 14 | ModelId, ModelInfo, PermissionOption, PermissionOptionKind, Plan, PlanEntry, PlanEntryPriority, 15 | PlanEntryStatus, PromptRequest, RequestPermissionOutcome, RequestPermissionRequest, 16 | RequestPermissionResponse, ResourceLink, SelectedPermissionOutcome, SessionId, SessionMode, 17 | SessionModeId, SessionModeState, SessionModelState, SessionNotification, SessionUpdate, 18 | StopReason, Terminal, TextContent, TextResourceContents, ToolCall, ToolCallContent, ToolCallId, 19 | ToolCallLocation, ToolCallStatus, ToolCallUpdate, ToolCallUpdateFields, ToolKind, 20 | UnstructuredCommandInput, 21 | }; 22 | use codex_common::approval_presets::{ApprovalPreset, builtin_approval_presets}; 23 | use codex_core::{ 24 | AuthManager, CodexConversation, 25 | config::{Config, set_project_trust_level}, 26 | error::CodexErr, 27 | openai_models::models_manager::ModelsManager, 28 | protocol::{ 29 | AgentMessageContentDeltaEvent, AgentMessageEvent, AgentReasoningEvent, 30 | AgentReasoningSectionBreakEvent, ApplyPatchApprovalRequestEvent, ElicitationAction, 31 | ErrorEvent, Event, EventMsg, ExecApprovalRequestEvent, ExecCommandBeginEvent, 32 | ExecCommandEndEvent, ExecCommandOutputDeltaEvent, ExitedReviewModeEvent, FileChange, 33 | ItemCompletedEvent, ItemStartedEvent, ListCustomPromptsResponseEvent, McpInvocation, 34 | McpStartupCompleteEvent, McpStartupUpdateEvent, McpToolCallBeginEvent, McpToolCallEndEvent, 35 | Op, PatchApplyBeginEvent, PatchApplyEndEvent, ReasoningContentDeltaEvent, 36 | ReasoningRawContentDeltaEvent, ReviewDecision, ReviewOutputEvent, ReviewRequest, 37 | ReviewTarget, SandboxPolicy, StreamErrorEvent, TaskCompleteEvent, TaskStartedEvent, 38 | TerminalInteractionEvent, TurnAbortedEvent, UserMessageEvent, ViewImageToolCallEvent, 39 | WarningEvent, WebSearchBeginEvent, WebSearchEndEvent, 40 | }, 41 | review_format::format_review_findings_block, 42 | review_prompts::user_facing_hint, 43 | }; 44 | use codex_protocol::{ 45 | approvals::ElicitationRequestEvent, 46 | config_types::TrustLevel, 47 | custom_prompts::CustomPrompt, 48 | openai_models::{ModelPreset, ReasoningEffort}, 49 | parse_command::ParsedCommand, 50 | plan_tool::{PlanItemArg, StepStatus, UpdatePlanArgs}, 51 | user_input::UserInput, 52 | }; 53 | use itertools::Itertools; 54 | use mcp_types::{CallToolResult, RequestId}; 55 | use serde_json::json; 56 | use tokio::sync::{mpsc, oneshot}; 57 | use tracing::{error, info, warn}; 58 | 59 | use crate::{ 60 | ACP_CLIENT, 61 | prompt_args::{expand_custom_prompt, parse_slash_name}, 62 | }; 63 | 64 | static APPROVAL_PRESETS: LazyLock> = LazyLock::new(builtin_approval_presets); 65 | const INIT_COMMAND_PROMPT: &str = include_str!("./prompt_for_init_command.md"); 66 | 67 | /// Trait for abstracting over the `CodexConversation` to make testing easier. 68 | #[async_trait::async_trait] 69 | pub trait CodexConversationImpl { 70 | async fn submit(&self, op: Op) -> Result; 71 | async fn next_event(&self) -> Result; 72 | } 73 | 74 | #[async_trait::async_trait] 75 | impl CodexConversationImpl for CodexConversation { 76 | async fn submit(&self, op: Op) -> Result { 77 | self.submit(op).await 78 | } 79 | 80 | async fn next_event(&self) -> Result { 81 | self.next_event().await 82 | } 83 | } 84 | 85 | #[async_trait::async_trait] 86 | pub trait ModelsManagerImpl { 87 | async fn get_model(&self, model_id: &Option, config: &Config) -> String; 88 | async fn list_models(&self, config: &Config) -> Vec; 89 | } 90 | 91 | #[async_trait::async_trait] 92 | impl ModelsManagerImpl for ModelsManager { 93 | async fn get_model(&self, model_id: &Option, config: &Config) -> String { 94 | self.get_model(model_id, config).await 95 | } 96 | 97 | async fn list_models(&self, config: &Config) -> Vec { 98 | self.list_models(config).await 99 | } 100 | } 101 | 102 | pub trait Auth { 103 | fn logout(&self) -> Result; 104 | } 105 | 106 | impl Auth for Arc { 107 | fn logout(&self) -> Result { 108 | self.as_ref() 109 | .logout() 110 | .map_err(|e| Error::internal_error().data(e.to_string())) 111 | } 112 | } 113 | 114 | enum ConversationMessage { 115 | Load { 116 | response_tx: oneshot::Sender>, 117 | }, 118 | Prompt { 119 | request: PromptRequest, 120 | response_tx: oneshot::Sender>, Error>>, 121 | }, 122 | SetMode { 123 | mode: SessionModeId, 124 | response_tx: oneshot::Sender>, 125 | }, 126 | SetModel { 127 | model: ModelId, 128 | response_tx: oneshot::Sender>, 129 | }, 130 | Cancel { 131 | response_tx: oneshot::Sender>, 132 | }, 133 | } 134 | 135 | pub struct Conversation { 136 | /// A sender for interacting with the conversation. 137 | message_tx: mpsc::UnboundedSender, 138 | /// A handle to the spawned task. 139 | _handle: tokio::task::JoinHandle<()>, 140 | } 141 | 142 | impl Conversation { 143 | pub fn new( 144 | session_id: SessionId, 145 | conversation: Arc, 146 | auth: Arc, 147 | models_manager: Arc, 148 | client_capabilities: Arc>, 149 | config: Config, 150 | ) -> Self { 151 | let (message_tx, message_rx) = mpsc::unbounded_channel(); 152 | 153 | let actor = ConversationActor::new( 154 | auth, 155 | SessionClient::new(session_id, client_capabilities), 156 | conversation, 157 | models_manager, 158 | config, 159 | message_rx, 160 | ); 161 | let handle = tokio::task::spawn_local(actor.spawn()); 162 | 163 | Self { 164 | message_tx, 165 | _handle: handle, 166 | } 167 | } 168 | 169 | pub async fn load(&self) -> Result { 170 | let (response_tx, response_rx) = oneshot::channel(); 171 | 172 | let message = ConversationMessage::Load { response_tx }; 173 | drop(self.message_tx.send(message)); 174 | 175 | response_rx 176 | .await 177 | .map_err(|e| Error::internal_error().data(e.to_string()))? 178 | } 179 | 180 | pub async fn prompt(&self, request: PromptRequest) -> Result { 181 | let (response_tx, response_rx) = oneshot::channel(); 182 | 183 | let message = ConversationMessage::Prompt { 184 | request, 185 | response_tx, 186 | }; 187 | drop(self.message_tx.send(message)); 188 | 189 | response_rx 190 | .await 191 | .map_err(|e| Error::internal_error().data(e.to_string()))?? 192 | .await 193 | .map_err(|e| Error::internal_error().data(e.to_string()))? 194 | } 195 | 196 | pub async fn set_mode(&self, mode: SessionModeId) -> Result<(), Error> { 197 | let (response_tx, response_rx) = oneshot::channel(); 198 | 199 | let message = ConversationMessage::SetMode { mode, response_tx }; 200 | drop(self.message_tx.send(message)); 201 | 202 | response_rx 203 | .await 204 | .map_err(|e| Error::internal_error().data(e.to_string()))? 205 | } 206 | 207 | pub async fn set_model(&self, model: ModelId) -> Result<(), Error> { 208 | let (response_tx, response_rx) = oneshot::channel(); 209 | 210 | let message = ConversationMessage::SetModel { model, response_tx }; 211 | drop(self.message_tx.send(message)); 212 | 213 | response_rx 214 | .await 215 | .map_err(|e| Error::internal_error().data(e.to_string()))? 216 | } 217 | 218 | pub async fn cancel(&self) -> Result<(), Error> { 219 | let (response_tx, response_rx) = oneshot::channel(); 220 | 221 | let message = ConversationMessage::Cancel { response_tx }; 222 | drop(self.message_tx.send(message)); 223 | 224 | response_rx 225 | .await 226 | .map_err(|e| Error::internal_error().data(e.to_string()))? 227 | } 228 | } 229 | 230 | enum SubmissionState { 231 | /// Loading custom prompts from the project 232 | CustomPrompts(CustomPromptsState), 233 | /// User prompts + some slash commands like /init or /review 234 | Prompt(PromptState), 235 | /// Subtask, like /compact 236 | Task(TaskState), 237 | } 238 | 239 | impl SubmissionState { 240 | fn is_active(&self) -> bool { 241 | match self { 242 | Self::CustomPrompts(state) => state.is_active(), 243 | Self::Prompt(state) => state.is_active(), 244 | Self::Task(state) => state.is_active(), 245 | } 246 | } 247 | 248 | async fn handle_event(&mut self, client: &SessionClient, event: EventMsg) { 249 | match self { 250 | Self::CustomPrompts(state) => state.handle_event(event), 251 | Self::Prompt(state) => state.handle_event(client, event).await, 252 | Self::Task(state) => state.handle_event(client, event).await, 253 | } 254 | } 255 | } 256 | 257 | struct CustomPromptsState { 258 | response_tx: Option, Error>>>, 259 | } 260 | 261 | impl CustomPromptsState { 262 | fn new(response_tx: oneshot::Sender, Error>>) -> Self { 263 | Self { 264 | response_tx: Some(response_tx), 265 | } 266 | } 267 | 268 | fn is_active(&self) -> bool { 269 | let Some(response_tx) = &self.response_tx else { 270 | return false; 271 | }; 272 | !response_tx.is_closed() 273 | } 274 | 275 | fn handle_event(&mut self, event: EventMsg) { 276 | match event { 277 | EventMsg::ListCustomPromptsResponse(ListCustomPromptsResponseEvent { 278 | custom_prompts, 279 | }) => { 280 | if let Some(tx) = self.response_tx.take() { 281 | drop(tx.send(Ok(custom_prompts))); 282 | } 283 | } 284 | e => { 285 | warn!("Unexpected event: {e:?}"); 286 | } 287 | } 288 | } 289 | } 290 | 291 | struct ActiveCommand { 292 | call_id: String, 293 | tool_call_id: ToolCallId, 294 | terminal_output: bool, 295 | output: String, 296 | file_extension: Option, 297 | } 298 | 299 | struct PromptState { 300 | active_command: Option, 301 | active_web_search: Option, 302 | conversation: Arc, 303 | event_count: usize, 304 | response_tx: Option>>, 305 | submission_id: String, 306 | seen_message_deltas: bool, 307 | seen_reasoning_deltas: bool, 308 | } 309 | 310 | impl PromptState { 311 | fn new( 312 | conversation: Arc, 313 | response_tx: oneshot::Sender>, 314 | submission_id: String, 315 | ) -> Self { 316 | Self { 317 | active_command: None, 318 | active_web_search: None, 319 | conversation, 320 | event_count: 0, 321 | response_tx: Some(response_tx), 322 | submission_id, 323 | seen_message_deltas: false, 324 | seen_reasoning_deltas: false, 325 | } 326 | } 327 | 328 | fn is_active(&self) -> bool { 329 | let Some(response_tx) = &self.response_tx else { 330 | return false; 331 | }; 332 | !response_tx.is_closed() 333 | } 334 | 335 | #[expect(clippy::too_many_lines)] 336 | async fn handle_event(&mut self, client: &SessionClient, event: EventMsg) { 337 | self.event_count += 1; 338 | 339 | // Complete any previous web search before starting a new one 340 | match &event { 341 | EventMsg::Error(..) 342 | | EventMsg::StreamError(..) 343 | | EventMsg::WebSearchBegin(..) 344 | | EventMsg::UserMessage(..) 345 | | EventMsg::ExecApprovalRequest(..) 346 | | EventMsg::ExecCommandBegin(..) 347 | | EventMsg::ExecCommandOutputDelta(..) 348 | | EventMsg::ExecCommandEnd(..) 349 | | EventMsg::McpToolCallBegin(..) 350 | | EventMsg::McpToolCallEnd(..) 351 | | EventMsg::ApplyPatchApprovalRequest(..) 352 | | EventMsg::PatchApplyBegin(..) 353 | | EventMsg::PatchApplyEnd(..) 354 | | EventMsg::TaskStarted(..) 355 | | EventMsg::TaskComplete(..) 356 | | EventMsg::TokenCount(..) 357 | | EventMsg::TurnDiff(..) 358 | | EventMsg::TurnAborted(..) 359 | | EventMsg::EnteredReviewMode(..) 360 | | EventMsg::ExitedReviewMode(..) 361 | | EventMsg::ShutdownComplete => { 362 | self.complete_web_search(client).await; 363 | } 364 | _ => {} 365 | } 366 | 367 | match event { 368 | EventMsg::TaskStarted(TaskStartedEvent { 369 | model_context_window, 370 | }) => { 371 | info!("Task started with context window of {model_context_window:?}"); 372 | } 373 | EventMsg::ItemStarted(ItemStartedEvent { thread_id, turn_id, item }) => { 374 | 375 | info!("Item started with thread_id: {thread_id}, turn_id: {turn_id}, item: {item:?}"); 376 | } 377 | EventMsg::UserMessage(UserMessageEvent { 378 | message, 379 | images: _, 380 | }) => { 381 | info!("User message: {message:?}"); 382 | } 383 | EventMsg::AgentMessageContentDelta(AgentMessageContentDeltaEvent { thread_id, turn_id, item_id, delta }) => { 384 | info!("Agent message content delta received: thread_id: {thread_id}, turn_id: {turn_id}, item_id: {item_id}, delta: {delta:?}"); 385 | self.seen_message_deltas = true; 386 | client.send_agent_text(delta).await; 387 | } 388 | EventMsg::ReasoningContentDelta(ReasoningContentDeltaEvent { thread_id, turn_id, item_id, delta, summary_index: index }) 389 | | EventMsg::ReasoningRawContentDelta(ReasoningRawContentDeltaEvent { thread_id, turn_id, item_id, delta, content_index: index }) => { 390 | info!("Agent reasoning content delta received: thread_id: {thread_id}, turn_id: {turn_id}, item_id: {item_id}, index: {index}, delta: {delta:?}"); 391 | self.seen_reasoning_deltas = true; 392 | client.send_agent_thought(delta).await; 393 | } 394 | EventMsg::AgentReasoningSectionBreak(AgentReasoningSectionBreakEvent { item_id, summary_index}) => { 395 | info!("Agent reasoning section break received: item_id: {item_id}, index: {summary_index}"); 396 | // Make sure the section heading actually get spacing 397 | self.seen_reasoning_deltas = true; 398 | client.send_agent_thought("\n\n").await; 399 | } 400 | EventMsg::AgentMessage(AgentMessageEvent { message }) => { 401 | info!("Agent message (non-delta) received: {message:?}"); 402 | // We didn't receive this message via streaming 403 | if !std::mem::take(&mut self.seen_message_deltas) { 404 | client.send_agent_text(message).await; 405 | } 406 | } 407 | EventMsg::AgentReasoning(AgentReasoningEvent { text }) => { 408 | info!("Agent reasoning (non-delta) received: {text:?}"); 409 | // We didn't receive this message via streaming 410 | if !std::mem::take(&mut self.seen_reasoning_deltas) { 411 | client.send_agent_thought(text).await; 412 | } 413 | } 414 | EventMsg::PlanUpdate(UpdatePlanArgs { explanation, plan }) => { 415 | // Send this to the client via session/update notification 416 | info!("Agent plan updated. Explanation: {:?}", explanation); 417 | client.update_plan(plan).await; 418 | } 419 | EventMsg::WebSearchBegin(WebSearchBeginEvent { call_id }) => { 420 | info!("Web search started: call_id={}", call_id); 421 | // Create a ToolCall notification for the search beginning 422 | self.start_web_search(client, call_id).await; 423 | } 424 | EventMsg::WebSearchEnd(WebSearchEndEvent { call_id, query }) => { 425 | info!("Web search query received: call_id={call_id}, query={query}"); 426 | // Send update that the search is in progress with the query 427 | // (WebSearchEnd just means we have the query, not that results are ready) 428 | self.update_web_search_query(client, call_id, query).await; 429 | // The actual search results will come through AgentMessage events 430 | // We mark as completed when a new tool call begins 431 | } 432 | EventMsg::ExecApprovalRequest(event) => { 433 | info!("Command execution started: call_id={}, command={:?}", event.call_id, event.command); 434 | if let Err(err) = self.exec_approval(client, event).await && let Some(response_tx) = self.response_tx.take() { 435 | drop(response_tx.send(Err(err))); 436 | } 437 | } 438 | EventMsg::ExecCommandBegin(event) => { 439 | info!( 440 | "Command execution started: call_id={}, command={:?}", 441 | event.call_id, event.command 442 | ); 443 | self.exec_command_begin(client, event).await; 444 | } 445 | EventMsg::ExecCommandOutputDelta(delta_event) => { 446 | self.exec_command_output_delta(client, delta_event).await; 447 | } 448 | EventMsg::ExecCommandEnd(end_event) => { 449 | info!( 450 | "Command execution ended: call_id={}, exit_code={}", 451 | end_event.call_id, end_event.exit_code 452 | ); 453 | self.exec_command_end(client, end_event).await; 454 | } 455 | EventMsg::TerminalInteraction(event) => { 456 | info!( 457 | "Terminal interaction: call_id={}, process_id={}, stdin={}", 458 | event.call_id, event.process_id, event.stdin 459 | ); 460 | self.terminal_interaction(client, event).await; 461 | } 462 | EventMsg::McpToolCallBegin(McpToolCallBeginEvent { call_id, invocation }) => { 463 | info!("MCP tool call begin: call_id={call_id}, invocation={} {}", invocation.server, invocation.tool); 464 | self.start_mcp_tool_call(client, call_id, invocation).await; 465 | } 466 | EventMsg::McpToolCallEnd(McpToolCallEndEvent { call_id, invocation, duration, result }) => { 467 | info!("MCP tool call ended: call_id={call_id}, invocation={} {}, duration={duration:?}", invocation.server, invocation.tool); 468 | self.end_mcp_tool_call(client, call_id, result).await; 469 | } 470 | EventMsg::ApplyPatchApprovalRequest(event) => { 471 | info!("Apply patch approval request: call_id={}, reason={:?}", event.call_id, event.reason); 472 | if let Err(err) = self.patch_approval(client, event).await && let Some(response_tx) = self.response_tx.take() { 473 | drop(response_tx.send(Err(err))); 474 | } 475 | } 476 | EventMsg::PatchApplyBegin(event) => { 477 | info!("Patch apply begin: call_id={}, auto_approved={}", event.call_id,event.auto_approved); 478 | self.start_patch_apply(client, event).await; 479 | } 480 | EventMsg::PatchApplyEnd(event) => { 481 | info!("Patch apply end: call_id={}, success={}", event.call_id, event.success); 482 | self.end_patch_apply(client, event).await; 483 | } 484 | EventMsg::ItemCompleted(ItemCompletedEvent { thread_id, turn_id, item }) => { 485 | info!("Item completed: thread_id={}, turn_id={}, item={:?}", thread_id, turn_id, item); 486 | } 487 | EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message}) => { 488 | info!( 489 | "Task completed successfully after {} events. Last agent message: {last_agent_message:?}", self.event_count 490 | ); 491 | if let Some(response_tx) = self.response_tx.take() { 492 | response_tx.send(Ok(StopReason::EndTurn)).ok(); 493 | } 494 | } 495 | EventMsg::UndoStarted(event) => { 496 | client 497 | .send_agent_text( 498 | event 499 | .message 500 | .unwrap_or_else(|| "Undo in progress...".to_string()), 501 | ) 502 | .await; 503 | } 504 | EventMsg::UndoCompleted(event) => { 505 | let fallback = if event.success { 506 | "Undo completed.".to_string() 507 | } else { 508 | "Undo failed.".to_string() 509 | }; 510 | client.send_agent_text(event.message.unwrap_or(fallback)).await; 511 | } 512 | EventMsg::StreamError(StreamErrorEvent { message , codex_error_info}) => { 513 | error!("Handled error during turn: {message} {codex_error_info:?}"); 514 | } 515 | EventMsg::Error(ErrorEvent { message, codex_error_info }) => { 516 | error!("Unhandled error during turn: {message} {codex_error_info:?}"); 517 | if let Some(response_tx) = self.response_tx.take() { 518 | response_tx.send(Err(Error::internal_error().data(json!({ "message": message, "codex_error_info": codex_error_info })))).ok(); 519 | } 520 | } 521 | EventMsg::TurnAborted(TurnAbortedEvent { reason }) => { 522 | info!("Turn aborted: {reason:?}"); 523 | if let Some(response_tx) = self.response_tx.take() { 524 | response_tx.send(Ok(StopReason::Cancelled)).ok(); 525 | } 526 | } 527 | EventMsg::ShutdownComplete => { 528 | info!("Agent shutting down"); 529 | if let Some(response_tx) = self.response_tx.take() { 530 | response_tx.send(Ok(StopReason::Cancelled)).ok(); 531 | } 532 | } 533 | EventMsg::ViewImageToolCall(ViewImageToolCallEvent { call_id, path }) => { 534 | info!("ViewImageToolCallEvent received"); 535 | let display_path = path.display().to_string(); 536 | client 537 | .send_notification( 538 | SessionUpdate::ToolCall( 539 | ToolCall::new(call_id, format!("View Image {display_path}")) 540 | .kind(ToolKind::Read).status(ToolCallStatus::Completed) 541 | .content(vec![ToolCallContent::Content(Content::new(ContentBlock::ResourceLink(ResourceLink::new(display_path.clone(), display_path.clone()) 542 | ) 543 | ) 544 | )]).locations(vec![ToolCallLocation::new(path)]))) 545 | .await; 546 | } 547 | EventMsg::EnteredReviewMode(review_request) => { 548 | info!("Review begin: request={review_request:?}"); 549 | } 550 | EventMsg::ExitedReviewMode(event) => { 551 | info!("Review end: output={event:?}"); 552 | if let Err(err) = self.review_mode_exit(client, event).await && let Some(response_tx) = self.response_tx.take() { 553 | drop(response_tx.send(Err(err))); 554 | } 555 | } 556 | EventMsg::Warning(WarningEvent { message }) => { 557 | warn!("Warning: {message}"); 558 | } 559 | EventMsg::McpStartupUpdate(McpStartupUpdateEvent { server, status }) => { 560 | info!("MCP startup update: server={server}, status={status:?}"); 561 | } 562 | EventMsg::McpStartupComplete(McpStartupCompleteEvent { 563 | ready, 564 | failed, 565 | cancelled, 566 | }) => { 567 | info!( 568 | "MCP startup complete: ready={ready:?}, failed={failed:?}, cancelled={cancelled:?}" 569 | ); 570 | } 571 | EventMsg::ElicitationRequest(event) => { 572 | info!("Elicitation request: server={}, id={:?}, message={}", event.server_name, event.id, event.message); 573 | if let Err(err) = self.mcp_elicitation(client, event).await && let Some(response_tx) = self.response_tx.take() { 574 | drop(response_tx.send(Err(err))); 575 | } 576 | } 577 | 578 | // Ignore these events 579 | EventMsg::AgentReasoningRawContent(..) 580 | // In the future we can use this to update usage stats 581 | | EventMsg::TokenCount(..) 582 | // we already have a way to diff the turn, so ignore 583 | | EventMsg::TurnDiff(..) 584 | // Revisit when we can emit status updates 585 | | EventMsg::BackgroundEvent(..) 586 | | EventMsg::ContextCompacted(..) 587 | // Old events 588 | | EventMsg::AgentMessageDelta(..) | EventMsg::AgentReasoningDelta(..) | EventMsg::AgentReasoningRawContentDelta(..) 589 | | EventMsg::RawResponseItem(..) 590 | | EventMsg::SessionConfigured(..) => {} 591 | 592 | // Unexpected events for this submission 593 | e @ (EventMsg::McpListToolsResponse(..) 594 | // returned from Op::ListCustomPrompts, ignore 595 | | EventMsg::ListCustomPromptsResponse(..) 596 | | EventMsg::ListSkillsResponse(..) 597 | // Used for returning a single history entry 598 | | EventMsg::GetHistoryEntryResponse(..) 599 | | EventMsg::DeprecationNotice(..) 600 | ) => { 601 | warn!("Unexpected event: {:?}", e); 602 | } 603 | } 604 | } 605 | 606 | async fn mcp_elicitation( 607 | &self, 608 | client: &SessionClient, 609 | event: ElicitationRequestEvent, 610 | ) -> Result<(), Error> { 611 | let raw_input = serde_json::json!(&event); 612 | let ElicitationRequestEvent { 613 | server_name, 614 | id, 615 | message, 616 | } = event; 617 | let tool_call_id = ToolCallId::new(match &id { 618 | RequestId::String(s) => s.clone(), 619 | RequestId::Integer(i) => i.to_string(), 620 | }); 621 | let response = client 622 | .request_permission( 623 | ToolCallUpdate::new( 624 | tool_call_id.clone(), 625 | ToolCallUpdateFields::new() 626 | .title(server_name.clone()) 627 | .status(ToolCallStatus::Pending) 628 | .content(vec![message.into()]) 629 | .raw_input(raw_input), 630 | ), 631 | vec![ 632 | PermissionOption::new( 633 | "approved", 634 | "Yes, provide the requested info", 635 | PermissionOptionKind::AllowOnce, 636 | ), 637 | PermissionOption::new( 638 | "abort", 639 | "No, but continue without it", 640 | PermissionOptionKind::RejectOnce, 641 | ), 642 | PermissionOption::new( 643 | "cancel", 644 | "Cancel this request", 645 | PermissionOptionKind::RejectOnce, 646 | ), 647 | ], 648 | ) 649 | .await?; 650 | 651 | let decision = match response.outcome { 652 | RequestPermissionOutcome::Selected(SelectedPermissionOutcome { option_id, .. }) => { 653 | match option_id.0.as_ref() { 654 | "approved" => ElicitationAction::Accept, 655 | "abort" => ElicitationAction::Decline, 656 | _ => ElicitationAction::Cancel, 657 | } 658 | } 659 | RequestPermissionOutcome::Cancelled | _ => ElicitationAction::Cancel, 660 | }; 661 | 662 | self.conversation 663 | .submit(Op::ResolveElicitation { 664 | server_name, 665 | request_id: id, 666 | decision, 667 | }) 668 | .await 669 | .map_err(|e| Error::from(anyhow::anyhow!(e)))?; 670 | 671 | client 672 | .send_notification(SessionUpdate::ToolCallUpdate(ToolCallUpdate::new( 673 | tool_call_id, 674 | ToolCallUpdateFields::new().status(if decision == ElicitationAction::Accept { 675 | ToolCallStatus::Completed 676 | } else { 677 | ToolCallStatus::Failed 678 | }), 679 | ))) 680 | .await; 681 | 682 | Ok(()) 683 | } 684 | 685 | async fn review_mode_exit( 686 | &self, 687 | client: &SessionClient, 688 | event: ExitedReviewModeEvent, 689 | ) -> Result<(), Error> { 690 | let ExitedReviewModeEvent { review_output } = event; 691 | let Some(ReviewOutputEvent { 692 | findings, 693 | overall_correctness: _, 694 | overall_explanation, 695 | overall_confidence_score: _, 696 | }) = review_output 697 | else { 698 | return Ok(()); 699 | }; 700 | 701 | let text = if findings.is_empty() { 702 | let explanation = overall_explanation.trim(); 703 | if explanation.is_empty() { 704 | "Reviewer failed to output a response" 705 | } else { 706 | explanation 707 | } 708 | .to_string() 709 | } else { 710 | format_review_findings_block(&findings, None) 711 | }; 712 | 713 | client.send_agent_text(&text).await; 714 | Ok(()) 715 | } 716 | 717 | async fn patch_approval( 718 | &self, 719 | client: &SessionClient, 720 | event: ApplyPatchApprovalRequestEvent, 721 | ) -> Result<(), Error> { 722 | let raw_input = serde_json::json!(&event); 723 | let ApplyPatchApprovalRequestEvent { 724 | call_id, 725 | changes, 726 | reason, 727 | // grant_root doesn't seem to be set anywhere on the codex side 728 | grant_root: _, 729 | turn_id: _, 730 | } = event; 731 | let (title, locations, content) = extract_tool_call_content_from_changes(changes); 732 | let response = client 733 | .request_permission( 734 | ToolCallUpdate::new( 735 | call_id, 736 | ToolCallUpdateFields::new() 737 | .kind(ToolKind::Edit) 738 | .status(ToolCallStatus::Pending) 739 | .title(title) 740 | .locations(locations) 741 | .content(content.chain(reason.map(|r| r.into())).collect::>()) 742 | .raw_input(raw_input), 743 | ), 744 | vec![ 745 | PermissionOption::new("approved", "Yes", PermissionOptionKind::AllowOnce), 746 | PermissionOption::new( 747 | "abort", 748 | "No, provide feedback", 749 | PermissionOptionKind::RejectOnce, 750 | ), 751 | ], 752 | ) 753 | .await?; 754 | 755 | let decision = match response.outcome { 756 | RequestPermissionOutcome::Selected(SelectedPermissionOutcome { option_id, .. }) => { 757 | match option_id.0.as_ref() { 758 | "approved" => ReviewDecision::Approved, 759 | _ => ReviewDecision::Abort, 760 | } 761 | } 762 | RequestPermissionOutcome::Cancelled | _ => ReviewDecision::Abort, 763 | }; 764 | 765 | self.conversation 766 | .submit(Op::PatchApproval { 767 | id: self.submission_id.clone(), 768 | decision, 769 | }) 770 | .await 771 | .map_err(|e| Error::from(anyhow::anyhow!(e)))?; 772 | Ok(()) 773 | } 774 | 775 | async fn start_patch_apply(&self, client: &SessionClient, event: PatchApplyBeginEvent) { 776 | let raw_input = serde_json::json!(&event); 777 | let PatchApplyBeginEvent { 778 | call_id, 779 | auto_approved: _, 780 | changes, 781 | turn_id: _, 782 | } = event; 783 | 784 | let (title, locations, content) = extract_tool_call_content_from_changes(changes); 785 | 786 | client 787 | .send_notification(SessionUpdate::ToolCall( 788 | ToolCall::new(call_id, title) 789 | .kind(ToolKind::Edit) 790 | .status(ToolCallStatus::InProgress) 791 | .locations(locations) 792 | .content(content.collect()) 793 | .raw_input(raw_input), 794 | )) 795 | .await; 796 | } 797 | 798 | async fn end_patch_apply(&self, client: &SessionClient, event: PatchApplyEndEvent) { 799 | let raw_output = serde_json::json!(&event); 800 | let PatchApplyEndEvent { 801 | call_id, 802 | stdout: _, 803 | stderr: _, 804 | success, 805 | changes, 806 | turn_id: _, 807 | } = event; 808 | 809 | let (title, locations, content) = if !changes.is_empty() { 810 | let (title, locations, content) = extract_tool_call_content_from_changes(changes); 811 | (Some(title), Some(locations), Some(content.collect())) 812 | } else { 813 | (None, None, None) 814 | }; 815 | 816 | client 817 | .send_notification(SessionUpdate::ToolCallUpdate(ToolCallUpdate::new( 818 | call_id, 819 | ToolCallUpdateFields::new() 820 | .status(if success { 821 | ToolCallStatus::Completed 822 | } else { 823 | ToolCallStatus::Failed 824 | }) 825 | .raw_output(raw_output) 826 | .title(title) 827 | .locations(locations) 828 | .content(content), 829 | ))) 830 | .await; 831 | } 832 | 833 | async fn start_mcp_tool_call( 834 | &self, 835 | client: &SessionClient, 836 | call_id: String, 837 | invocation: McpInvocation, 838 | ) { 839 | let title = format!("Tool: {}/{}", invocation.server, invocation.tool); 840 | client 841 | .send_notification(SessionUpdate::ToolCall( 842 | ToolCall::new(call_id, title) 843 | .status(ToolCallStatus::InProgress) 844 | .raw_input(serde_json::json!(&invocation)), 845 | )) 846 | .await; 847 | } 848 | 849 | async fn end_mcp_tool_call( 850 | &self, 851 | client: &SessionClient, 852 | call_id: String, 853 | result: Result, 854 | ) { 855 | let is_error = match result.as_ref() { 856 | Ok(result) => result.is_error.unwrap_or_default(), 857 | Err(_) => true, 858 | }; 859 | let raw_output = match result.as_ref() { 860 | Ok(result) => serde_json::json!(result), 861 | Err(err) => serde_json::json!(err), 862 | }; 863 | 864 | client 865 | .send_notification(SessionUpdate::ToolCallUpdate(ToolCallUpdate::new( 866 | call_id, 867 | ToolCallUpdateFields::new() 868 | .status(if is_error { 869 | ToolCallStatus::Failed 870 | } else { 871 | ToolCallStatus::Completed 872 | }) 873 | .raw_output(raw_output) 874 | .content(result.ok().filter(|result| !result.content.is_empty()).map( 875 | |result| { 876 | result 877 | .content 878 | .into_iter() 879 | .map(codex_content_to_acp_content) 880 | .collect() 881 | }, 882 | )), 883 | ))) 884 | .await; 885 | } 886 | 887 | async fn exec_approval( 888 | &mut self, 889 | client: &SessionClient, 890 | event: ExecApprovalRequestEvent, 891 | ) -> Result<(), Error> { 892 | let raw_input = serde_json::json!(&event); 893 | let ExecApprovalRequestEvent { 894 | call_id, 895 | command: _, 896 | turn_id: _, 897 | cwd, 898 | reason, 899 | parsed_cmd, 900 | proposed_execpolicy_amendment, 901 | } = event; 902 | 903 | // Create a new tool call for the command execution 904 | let tool_call_id = ToolCallId::new(call_id.clone()); 905 | let ParseCommandToolCall { 906 | title, 907 | terminal_output, 908 | file_extension, 909 | locations, 910 | kind, 911 | } = parse_command_tool_call(parsed_cmd, &cwd); 912 | self.active_command = Some(ActiveCommand { 913 | call_id, 914 | terminal_output, 915 | tool_call_id: tool_call_id.clone(), 916 | output: String::new(), 917 | file_extension, 918 | }); 919 | 920 | let mut content = vec![]; 921 | 922 | if let Some(reason) = reason { 923 | content.push(reason); 924 | } 925 | if let Some(amendment) = proposed_execpolicy_amendment { 926 | content.push(format!( 927 | "Proposed Amendment: {}", 928 | amendment.command().join("\n") 929 | )); 930 | } 931 | 932 | let content = if content.is_empty() { 933 | None 934 | } else { 935 | Some(vec![content.join("\n").into()]) 936 | }; 937 | 938 | let response = client 939 | .request_permission( 940 | ToolCallUpdate::new( 941 | tool_call_id, 942 | ToolCallUpdateFields::new() 943 | .kind(kind) 944 | .status(ToolCallStatus::Pending) 945 | .title(title) 946 | .raw_input(raw_input) 947 | .content(content) 948 | .locations(if locations.is_empty() { 949 | None 950 | } else { 951 | Some(locations) 952 | }), 953 | ), 954 | vec![ 955 | PermissionOption::new( 956 | "approved-for-session", 957 | "Always", 958 | PermissionOptionKind::AllowAlways, 959 | ), 960 | PermissionOption::new("approved", "Yes", PermissionOptionKind::AllowOnce), 961 | PermissionOption::new( 962 | "abort", 963 | "No, provide feedback", 964 | PermissionOptionKind::RejectOnce, 965 | ), 966 | ], 967 | ) 968 | .await?; 969 | 970 | let decision = match response.outcome { 971 | RequestPermissionOutcome::Selected(SelectedPermissionOutcome { option_id, .. }) => { 972 | match option_id.0.as_ref() { 973 | "approved-for-session" => ReviewDecision::ApprovedForSession, 974 | "approved" => ReviewDecision::Approved, 975 | _ => ReviewDecision::Abort, 976 | } 977 | } 978 | RequestPermissionOutcome::Cancelled | _ => ReviewDecision::Abort, 979 | }; 980 | 981 | self.conversation 982 | .submit(Op::ExecApproval { 983 | id: self.submission_id.clone(), 984 | decision, 985 | }) 986 | .await 987 | .map_err(|e| Error::from(anyhow::anyhow!(e)))?; 988 | 989 | Ok(()) 990 | } 991 | 992 | async fn exec_command_begin(&mut self, client: &SessionClient, event: ExecCommandBeginEvent) { 993 | let raw_input = serde_json::json!(&event); 994 | let ExecCommandBeginEvent { 995 | turn_id: _, 996 | source: _, 997 | interaction_input: _, 998 | call_id, 999 | command: _, 1000 | cwd, 1001 | parsed_cmd, 1002 | process_id: _, 1003 | } = event; 1004 | // Create a new tool call for the command execution 1005 | let tool_call_id = ToolCallId::new(call_id.clone()); 1006 | let ParseCommandToolCall { 1007 | title, 1008 | file_extension, 1009 | locations, 1010 | terminal_output, 1011 | kind, 1012 | } = parse_command_tool_call(parsed_cmd, &cwd); 1013 | 1014 | let active_command = ActiveCommand { 1015 | call_id: call_id.clone(), 1016 | tool_call_id: tool_call_id.clone(), 1017 | output: String::new(), 1018 | file_extension, 1019 | terminal_output, 1020 | }; 1021 | let (content, meta) = if client.supports_terminal_output(&active_command) { 1022 | let content = vec![ToolCallContent::Terminal(Terminal::new(call_id.clone()))]; 1023 | let meta = Some(Meta::from_iter([( 1024 | "terminal_info".to_owned(), 1025 | serde_json::json!({ 1026 | "terminal_id": call_id, 1027 | "cwd": cwd 1028 | }), 1029 | )])); 1030 | (content, meta) 1031 | } else { 1032 | (vec![], None) 1033 | }; 1034 | 1035 | self.active_command = Some(active_command); 1036 | 1037 | client 1038 | .send_notification(SessionUpdate::ToolCall( 1039 | ToolCall::new(tool_call_id, title) 1040 | .kind(kind) 1041 | .status(ToolCallStatus::InProgress) 1042 | .locations(locations) 1043 | .raw_input(raw_input) 1044 | .content(content) 1045 | .meta(meta), 1046 | )) 1047 | .await; 1048 | } 1049 | 1050 | async fn exec_command_output_delta( 1051 | &mut self, 1052 | client: &SessionClient, 1053 | event: ExecCommandOutputDeltaEvent, 1054 | ) { 1055 | let ExecCommandOutputDeltaEvent { 1056 | call_id, 1057 | chunk, 1058 | stream: _, 1059 | } = event; 1060 | // Stream output bytes to the display-only terminal via ToolCallUpdate meta. 1061 | if let Some(active_command) = &mut self.active_command 1062 | && *active_command.call_id == call_id 1063 | { 1064 | let data_str = String::from_utf8_lossy(&chunk).to_string(); 1065 | 1066 | let update = if client.supports_terminal_output(active_command) { 1067 | ToolCallUpdate::new( 1068 | active_command.tool_call_id.clone(), 1069 | ToolCallUpdateFields::new(), 1070 | ) 1071 | .meta(Meta::from_iter([( 1072 | "terminal_output".to_owned(), 1073 | serde_json::json!({ 1074 | "terminal_id": call_id, 1075 | "data": data_str 1076 | }), 1077 | )])) 1078 | } else { 1079 | active_command.output.push_str(&data_str); 1080 | let content = match active_command.file_extension.as_deref() { 1081 | Some("md") => active_command.output.clone(), 1082 | Some(ext) => format!( 1083 | "```{ext}\n{}\n```\n", 1084 | active_command.output.trim_end_matches('\n') 1085 | ), 1086 | None => format!( 1087 | "```sh\n{}\n```\n", 1088 | active_command.output.trim_end_matches('\n') 1089 | ), 1090 | }; 1091 | ToolCallUpdate::new( 1092 | active_command.tool_call_id.clone(), 1093 | ToolCallUpdateFields::new().content(vec![content.into()]), 1094 | ) 1095 | }; 1096 | 1097 | client 1098 | .send_notification(SessionUpdate::ToolCallUpdate(update)) 1099 | .await; 1100 | } 1101 | } 1102 | 1103 | async fn exec_command_end(&mut self, client: &SessionClient, event: ExecCommandEndEvent) { 1104 | let raw_output = serde_json::json!(&event); 1105 | let ExecCommandEndEvent { 1106 | turn_id: _, 1107 | command: _, 1108 | cwd: _, 1109 | parsed_cmd: _, 1110 | source: _, 1111 | interaction_input: _, 1112 | call_id, 1113 | exit_code, 1114 | stdout: _, 1115 | stderr: _, 1116 | aggregated_output: _, 1117 | duration: _, 1118 | formatted_output: _, 1119 | process_id: _, 1120 | } = event; 1121 | if let Some(active_command) = self.active_command.take() 1122 | && active_command.call_id == call_id 1123 | { 1124 | let is_success = exit_code == 0; 1125 | 1126 | client 1127 | .send_notification(SessionUpdate::ToolCallUpdate( 1128 | ToolCallUpdate::new( 1129 | active_command.tool_call_id.clone(), 1130 | ToolCallUpdateFields::new() 1131 | .status(if is_success { 1132 | ToolCallStatus::Completed 1133 | } else { 1134 | ToolCallStatus::Failed 1135 | }) 1136 | .raw_output(raw_output), 1137 | ) 1138 | .meta( 1139 | client.supports_terminal_output(&active_command).then(|| { 1140 | Meta::from_iter([( 1141 | "terminal_exit".into(), 1142 | serde_json::json!({ 1143 | "terminal_id": call_id, 1144 | "exit_code": exit_code, 1145 | "signal": null 1146 | }), 1147 | )]) 1148 | }), 1149 | ), 1150 | )) 1151 | .await; 1152 | } 1153 | } 1154 | 1155 | async fn terminal_interaction( 1156 | &mut self, 1157 | client: &SessionClient, 1158 | event: TerminalInteractionEvent, 1159 | ) { 1160 | let TerminalInteractionEvent { 1161 | call_id, 1162 | process_id: _, 1163 | stdin, 1164 | } = event; 1165 | 1166 | let stdin = format!("\n{stdin}\n"); 1167 | // Stream output bytes to the display-only terminal via ToolCallUpdate meta. 1168 | if let Some(active_command) = &mut self.active_command 1169 | && *active_command.call_id == call_id 1170 | { 1171 | let update = if client.supports_terminal_output(active_command) { 1172 | ToolCallUpdate::new( 1173 | active_command.tool_call_id.clone(), 1174 | ToolCallUpdateFields::new(), 1175 | ) 1176 | .meta(Meta::from_iter([( 1177 | "terminal_output".to_owned(), 1178 | serde_json::json!({ 1179 | "terminal_id": call_id, 1180 | "data": stdin 1181 | }), 1182 | )])) 1183 | } else { 1184 | active_command.output.push_str(&stdin); 1185 | let content = match active_command.file_extension.as_deref() { 1186 | Some("md") => active_command.output.clone(), 1187 | Some(ext) => format!( 1188 | "```{ext}\n{}\n```\n", 1189 | active_command.output.trim_end_matches('\n') 1190 | ), 1191 | None => format!( 1192 | "```sh\n{}\n```\n", 1193 | active_command.output.trim_end_matches('\n') 1194 | ), 1195 | }; 1196 | ToolCallUpdate::new( 1197 | active_command.tool_call_id.clone(), 1198 | ToolCallUpdateFields::new().content(vec![content.into()]), 1199 | ) 1200 | }; 1201 | 1202 | client 1203 | .send_notification(SessionUpdate::ToolCallUpdate(update)) 1204 | .await; 1205 | } 1206 | } 1207 | 1208 | async fn start_web_search(&mut self, client: &SessionClient, call_id: String) { 1209 | self.active_web_search = Some(call_id.clone()); 1210 | client 1211 | .send_notification(SessionUpdate::ToolCall( 1212 | ToolCall::new(call_id, "Searching the Web").kind(ToolKind::Fetch), 1213 | )) 1214 | .await; 1215 | } 1216 | 1217 | async fn update_web_search_query( 1218 | &self, 1219 | client: &SessionClient, 1220 | call_id: String, 1221 | query: String, 1222 | ) { 1223 | client 1224 | .send_notification(SessionUpdate::ToolCallUpdate(ToolCallUpdate::new( 1225 | call_id, 1226 | ToolCallUpdateFields::new() 1227 | .status(ToolCallStatus::InProgress) 1228 | .title(format!("Searching for: {query}")) 1229 | .raw_input(serde_json::json!({ 1230 | "query": query 1231 | })), 1232 | ))) 1233 | .await; 1234 | } 1235 | 1236 | async fn complete_web_search(&mut self, client: &SessionClient) { 1237 | if let Some(call_id) = self.active_web_search.take() { 1238 | client 1239 | .send_notification(SessionUpdate::ToolCallUpdate(ToolCallUpdate::new( 1240 | call_id, 1241 | ToolCallUpdateFields::new().status(ToolCallStatus::Completed), 1242 | ))) 1243 | .await; 1244 | } 1245 | } 1246 | } 1247 | 1248 | struct ParseCommandToolCall { 1249 | title: String, 1250 | file_extension: Option, 1251 | terminal_output: bool, 1252 | locations: Vec, 1253 | kind: ToolKind, 1254 | } 1255 | 1256 | fn parse_command_tool_call(parsed_cmd: Vec, cwd: &Path) -> ParseCommandToolCall { 1257 | let mut titles = Vec::new(); 1258 | let mut locations = Vec::new(); 1259 | let mut file_extension = None; 1260 | let mut terminal_output = false; 1261 | let mut kind = ToolKind::Execute; 1262 | 1263 | for cmd in parsed_cmd { 1264 | let mut cmd_path = None; 1265 | match cmd { 1266 | ParsedCommand::Read { cmd: _, name, path } => { 1267 | titles.push(format!("Read {name}")); 1268 | file_extension = path 1269 | .extension() 1270 | .map(|ext| ext.to_string_lossy().to_string()); 1271 | cmd_path = Some(path); 1272 | kind = ToolKind::Read; 1273 | } 1274 | ParsedCommand::ListFiles { cmd: _, path } => { 1275 | let dir = if let Some(path) = path.as_ref() { 1276 | &cwd.join(path) 1277 | } else { 1278 | cwd 1279 | }; 1280 | titles.push(format!("List {}", dir.display())); 1281 | cmd_path = path.map(PathBuf::from); 1282 | kind = ToolKind::Search; 1283 | } 1284 | ParsedCommand::Search { cmd, query, path } => { 1285 | titles.push(match (query, path.as_ref()) { 1286 | (Some(query), Some(path)) => format!("Search {query} in {path}"), 1287 | (Some(query), None) => format!("Search {query}"), 1288 | _ => format!("Search {cmd}"), 1289 | }); 1290 | kind = ToolKind::Search; 1291 | } 1292 | ParsedCommand::Unknown { cmd } => { 1293 | titles.push(format!("Run {cmd}")); 1294 | terminal_output = true; 1295 | } 1296 | } 1297 | 1298 | if let Some(path) = cmd_path { 1299 | locations.push(ToolCallLocation::new(if path.is_relative() { 1300 | cwd.join(&path) 1301 | } else { 1302 | path 1303 | })); 1304 | } 1305 | } 1306 | 1307 | ParseCommandToolCall { 1308 | title: titles.join(", "), 1309 | file_extension, 1310 | terminal_output, 1311 | locations, 1312 | kind, 1313 | } 1314 | } 1315 | 1316 | struct TaskState { 1317 | response_tx: Option>>, 1318 | } 1319 | 1320 | impl TaskState { 1321 | fn new(response_tx: oneshot::Sender>) -> Self { 1322 | Self { 1323 | response_tx: Some(response_tx), 1324 | } 1325 | } 1326 | 1327 | fn is_active(&self) -> bool { 1328 | self.response_tx.is_some() 1329 | } 1330 | 1331 | async fn handle_event(&mut self, client: &SessionClient, event: EventMsg) { 1332 | match event { 1333 | EventMsg::TaskComplete(..) => { 1334 | if let Some(response_tx) = self.response_tx.take() { 1335 | response_tx.send(Ok(StopReason::EndTurn)).ok(); 1336 | } 1337 | } 1338 | // Safer to grab the non-streaming version of the events so we don't duplicate 1339 | // and it is likely these are synthetic events, not from the model 1340 | EventMsg::AgentMessage(AgentMessageEvent { message }) => { 1341 | client.send_agent_text(message).await; 1342 | } 1343 | EventMsg::AgentReasoning(AgentReasoningEvent { text }) => { 1344 | client.send_agent_thought(text).await; 1345 | } 1346 | EventMsg::UndoStarted(event) => { 1347 | client 1348 | .send_agent_text( 1349 | event 1350 | .message 1351 | .unwrap_or_else(|| "Undo in progress...".to_string()), 1352 | ) 1353 | .await; 1354 | } 1355 | EventMsg::UndoCompleted(event) => { 1356 | let fallback = if event.success { 1357 | "Undo completed.".to_string() 1358 | } else { 1359 | "Undo failed.".to_string() 1360 | }; 1361 | client 1362 | .send_agent_text(event.message.unwrap_or(fallback)) 1363 | .await; 1364 | } 1365 | EventMsg::StreamError(StreamErrorEvent { 1366 | message, 1367 | codex_error_info, 1368 | }) => { 1369 | error!("Handled error during turn: {message} {codex_error_info:?}"); 1370 | } 1371 | EventMsg::Error(ErrorEvent { 1372 | message, 1373 | codex_error_info, 1374 | }) => { 1375 | error!("Unhandled error during turn: {message} {codex_error_info:?}"); 1376 | if let Some(response_tx) = self.response_tx.take() { 1377 | response_tx 1378 | .send(Err(Error::internal_error().data( 1379 | json!({ "message": message, "codex_error_info": codex_error_info }), 1380 | ))) 1381 | .ok(); 1382 | } 1383 | } 1384 | EventMsg::TurnAborted(TurnAbortedEvent { reason }) => { 1385 | info!("Turn aborted: {reason:?}"); 1386 | if let Some(response_tx) = self.response_tx.take() { 1387 | response_tx.send(Ok(StopReason::Cancelled)).ok(); 1388 | } 1389 | } 1390 | EventMsg::ShutdownComplete => { 1391 | info!("Agent shutting down"); 1392 | if let Some(response_tx) = self.response_tx.take() { 1393 | response_tx.send(Ok(StopReason::Cancelled)).ok(); 1394 | } 1395 | } 1396 | EventMsg::Warning(WarningEvent { message }) => { 1397 | warn!("Warning: {message}"); 1398 | } 1399 | EventMsg::McpStartupUpdate(McpStartupUpdateEvent { server, status }) => { 1400 | info!("MCP startup update: server={server}, status={status:?}"); 1401 | } 1402 | EventMsg::McpStartupComplete(McpStartupCompleteEvent { 1403 | ready, 1404 | failed, 1405 | cancelled, 1406 | }) => { 1407 | info!( 1408 | "MCP startup complete: ready={ready:?}, failed={failed:?}, cancelled={cancelled:?}" 1409 | ); 1410 | } 1411 | // Expected but ignore 1412 | EventMsg::TaskStarted(..) 1413 | | EventMsg::ItemStarted(..) 1414 | | EventMsg::ItemCompleted(..) 1415 | | EventMsg::TokenCount(..) 1416 | | EventMsg::AgentMessageDelta(..) 1417 | | EventMsg::AgentReasoningDelta(..) 1418 | | EventMsg::AgentMessageContentDelta(..) 1419 | | EventMsg::AgentReasoningRawContent(..) 1420 | | EventMsg::AgentReasoningRawContentDelta(..) 1421 | | EventMsg::ReasoningContentDelta(..) 1422 | | EventMsg::ReasoningRawContentDelta(..) 1423 | | EventMsg::AgentReasoningSectionBreak(..) 1424 | | EventMsg::RawResponseItem(..) 1425 | | EventMsg::BackgroundEvent(..) 1426 | | EventMsg::ContextCompacted(..) => {} 1427 | // Unexpected events for this submission 1428 | e @ (EventMsg::UserMessage(..) 1429 | | EventMsg::SessionConfigured(..) 1430 | | EventMsg::McpToolCallBegin(..) 1431 | | EventMsg::McpToolCallEnd(..) 1432 | | EventMsg::WebSearchBegin(..) 1433 | | EventMsg::WebSearchEnd(..) 1434 | | EventMsg::ExecCommandBegin(..) 1435 | | EventMsg::ExecCommandOutputDelta(..) 1436 | | EventMsg::ExecCommandEnd(..) 1437 | | EventMsg::TerminalInteraction(..) 1438 | | EventMsg::ViewImageToolCall(..) 1439 | | EventMsg::ExecApprovalRequest(..) 1440 | | EventMsg::ApplyPatchApprovalRequest(..) 1441 | | EventMsg::PatchApplyBegin(..) 1442 | | EventMsg::PatchApplyEnd(..) 1443 | | EventMsg::TurnDiff(..) 1444 | | EventMsg::GetHistoryEntryResponse(..) 1445 | | EventMsg::McpListToolsResponse(..) 1446 | | EventMsg::ListCustomPromptsResponse(..) 1447 | | EventMsg::ListSkillsResponse(..) 1448 | | EventMsg::PlanUpdate(..) 1449 | | EventMsg::EnteredReviewMode(..) 1450 | | EventMsg::ExitedReviewMode(..) 1451 | | EventMsg::DeprecationNotice(..) 1452 | | EventMsg::ElicitationRequest(..)) => { 1453 | warn!("Unexpected event: {:?}", e); 1454 | } 1455 | } 1456 | } 1457 | } 1458 | 1459 | #[derive(Clone)] 1460 | struct SessionClient { 1461 | session_id: SessionId, 1462 | client: Arc, 1463 | client_capabilities: Arc>, 1464 | } 1465 | 1466 | impl SessionClient { 1467 | fn new(session_id: SessionId, client_capabilities: Arc>) -> Self { 1468 | Self { 1469 | session_id, 1470 | client: ACP_CLIENT.get().expect("Client should be set").clone(), 1471 | client_capabilities, 1472 | } 1473 | } 1474 | 1475 | #[cfg(test)] 1476 | fn with_client( 1477 | session_id: SessionId, 1478 | client: Arc, 1479 | client_capabilities: Arc>, 1480 | ) -> Self { 1481 | Self { 1482 | session_id, 1483 | client, 1484 | client_capabilities, 1485 | } 1486 | } 1487 | 1488 | fn supports_terminal_output(&self, active_command: &ActiveCommand) -> bool { 1489 | active_command.terminal_output 1490 | && self 1491 | .client_capabilities 1492 | .lock() 1493 | .unwrap() 1494 | .meta 1495 | .as_ref() 1496 | .is_some_and(|v| { 1497 | v.get("terminal_output") 1498 | .is_some_and(|v| v.as_bool().unwrap_or_default()) 1499 | }) 1500 | } 1501 | 1502 | async fn send_notification(&self, update: SessionUpdate) { 1503 | if let Err(e) = self 1504 | .client 1505 | .session_notification(SessionNotification::new(self.session_id.clone(), update)) 1506 | .await 1507 | { 1508 | error!("Failed to send session notification: {:?}", e); 1509 | } 1510 | } 1511 | 1512 | async fn send_agent_text(&self, text: impl Into) { 1513 | self.send_notification(SessionUpdate::AgentMessageChunk(ContentChunk::new( 1514 | text.into().into(), 1515 | ))) 1516 | .await; 1517 | } 1518 | 1519 | async fn send_agent_thought(&self, text: impl Into) { 1520 | self.send_notification(SessionUpdate::AgentThoughtChunk(ContentChunk::new( 1521 | text.into().into(), 1522 | ))) 1523 | .await; 1524 | } 1525 | 1526 | async fn update_plan(&self, plan: Vec) { 1527 | self.send_notification(SessionUpdate::Plan(Plan::new( 1528 | plan.into_iter() 1529 | .map(|entry| { 1530 | PlanEntry::new( 1531 | entry.step, 1532 | PlanEntryPriority::Medium, 1533 | match entry.status { 1534 | StepStatus::Pending => PlanEntryStatus::Pending, 1535 | StepStatus::InProgress => PlanEntryStatus::InProgress, 1536 | StepStatus::Completed => PlanEntryStatus::Completed, 1537 | }, 1538 | ) 1539 | }) 1540 | .collect(), 1541 | ))) 1542 | .await; 1543 | } 1544 | 1545 | async fn request_permission( 1546 | &self, 1547 | tool_call: ToolCallUpdate, 1548 | options: Vec, 1549 | ) -> Result { 1550 | self.client 1551 | .request_permission(RequestPermissionRequest::new( 1552 | self.session_id.clone(), 1553 | tool_call, 1554 | options, 1555 | )) 1556 | .await 1557 | } 1558 | } 1559 | 1560 | struct ConversationActor { 1561 | /// Allows for logging out from slash commands 1562 | auth: A, 1563 | /// Used for sending messages back to the client. 1564 | client: SessionClient, 1565 | /// The conversation associated with this task. 1566 | conversation: Arc, 1567 | /// The configuration for the conversation. 1568 | config: Config, 1569 | /// The custom prompts loaded for this workspace. 1570 | custom_prompts: Rc>>, 1571 | /// The models available for this conversation. 1572 | models_manager: Arc, 1573 | /// A sender for each interested `Op` submission that needs events routed. 1574 | submissions: HashMap, 1575 | /// A receiver for incoming conversation messages. 1576 | message_rx: mpsc::UnboundedReceiver, 1577 | } 1578 | 1579 | impl ConversationActor { 1580 | fn new( 1581 | auth: A, 1582 | client: SessionClient, 1583 | conversation: Arc, 1584 | models_manager: Arc, 1585 | config: Config, 1586 | message_rx: mpsc::UnboundedReceiver, 1587 | ) -> Self { 1588 | Self { 1589 | auth, 1590 | client, 1591 | conversation, 1592 | config, 1593 | custom_prompts: Rc::default(), 1594 | models_manager, 1595 | submissions: HashMap::new(), 1596 | message_rx, 1597 | } 1598 | } 1599 | 1600 | async fn spawn(mut self) { 1601 | loop { 1602 | tokio::select! { 1603 | biased; 1604 | message = self.message_rx.recv() => match message { 1605 | Some(message) => self.handle_message(message).await, 1606 | None => break, 1607 | }, 1608 | event = self.conversation.next_event() => match event { 1609 | Ok(event) => self.handle_event(event).await, 1610 | Err(e) => { 1611 | error!("Error getting next event: {:?}", e); 1612 | break; 1613 | } 1614 | } 1615 | } 1616 | // Litter collection of senders with no receivers 1617 | self.submissions 1618 | .retain(|_, submission| submission.is_active()); 1619 | } 1620 | } 1621 | 1622 | async fn handle_message(&mut self, message: ConversationMessage) { 1623 | match message { 1624 | ConversationMessage::Load { response_tx } => { 1625 | let result = self.handle_load().await; 1626 | drop(response_tx.send(result)); 1627 | let client = self.client.clone(); 1628 | let mut available_commands = Self::builtin_commands(); 1629 | let load_custom_prompts = self.load_custom_prompts().await; 1630 | let custom_prompts = self.custom_prompts.clone(); 1631 | 1632 | // Have this happen after the session is loaded by putting it 1633 | // in a separate task 1634 | tokio::task::spawn_local(async move { 1635 | let mut new_custom_prompts = load_custom_prompts 1636 | .await 1637 | .map_err(|_| Error::internal_error()) 1638 | .flatten() 1639 | .inspect_err(|e| error!("Failed to load custom prompts {e:?}")) 1640 | .unwrap_or_default(); 1641 | 1642 | for prompt in &new_custom_prompts { 1643 | available_commands.push( 1644 | AvailableCommand::new( 1645 | prompt.name.clone(), 1646 | prompt.description.clone().unwrap_or_default(), 1647 | ) 1648 | .input(prompt.argument_hint.as_ref().map( 1649 | |hint| { 1650 | AvailableCommandInput::Unstructured( 1651 | UnstructuredCommandInput::new(hint.clone()), 1652 | ) 1653 | }, 1654 | )), 1655 | ); 1656 | } 1657 | std::mem::swap( 1658 | custom_prompts.borrow_mut().deref_mut(), 1659 | &mut new_custom_prompts, 1660 | ); 1661 | 1662 | client 1663 | .send_notification(SessionUpdate::AvailableCommandsUpdate( 1664 | AvailableCommandsUpdate::new(available_commands), 1665 | )) 1666 | .await; 1667 | }); 1668 | } 1669 | ConversationMessage::Prompt { 1670 | request, 1671 | response_tx, 1672 | } => { 1673 | let result = self.handle_prompt(request).await; 1674 | drop(response_tx.send(result)); 1675 | } 1676 | ConversationMessage::SetMode { mode, response_tx } => { 1677 | let result = self.handle_set_mode(mode).await; 1678 | drop(response_tx.send(result)); 1679 | } 1680 | ConversationMessage::SetModel { model, response_tx } => { 1681 | let result = self.handle_set_model(model).await; 1682 | drop(response_tx.send(result)); 1683 | } 1684 | ConversationMessage::Cancel { response_tx } => { 1685 | let result = self.handle_cancel().await; 1686 | drop(response_tx.send(result)); 1687 | } 1688 | } 1689 | } 1690 | 1691 | fn builtin_commands() -> Vec { 1692 | vec![ 1693 | AvailableCommand::new("review", "Review my current changes and find issues").input( 1694 | AvailableCommandInput::Unstructured(UnstructuredCommandInput::new( 1695 | "optional custom review instructions", 1696 | )), 1697 | ), 1698 | AvailableCommand::new( 1699 | "review-branch", 1700 | "Review the code changes against a specific branch", 1701 | ) 1702 | .input(AvailableCommandInput::Unstructured( 1703 | UnstructuredCommandInput::new("branch name"), 1704 | )), 1705 | AvailableCommand::new( 1706 | "review-commit", 1707 | "Review the code changes introduced by a commit", 1708 | ) 1709 | .input(AvailableCommandInput::Unstructured( 1710 | UnstructuredCommandInput::new("commit sha"), 1711 | )), 1712 | AvailableCommand::new( 1713 | "init", 1714 | "create an AGENTS.md file with instructions for Codex", 1715 | ), 1716 | AvailableCommand::new( 1717 | "compact", 1718 | "summarize conversation to prevent hitting the context limit", 1719 | ), 1720 | AvailableCommand::new("undo", "undo Codex’s most recent turn"), 1721 | AvailableCommand::new("logout", "logout of Codex"), 1722 | ] 1723 | } 1724 | 1725 | async fn load_custom_prompts(&mut self) -> oneshot::Receiver, Error>> { 1726 | let (response_tx, response_rx) = oneshot::channel(); 1727 | let submission_id = match self.conversation.submit(Op::ListCustomPrompts).await { 1728 | Ok(id) => id, 1729 | Err(e) => { 1730 | drop(response_tx.send(Err(Error::internal_error().data(e.to_string())))); 1731 | return response_rx; 1732 | } 1733 | }; 1734 | 1735 | self.submissions.insert( 1736 | submission_id, 1737 | SubmissionState::CustomPrompts(CustomPromptsState::new(response_tx)), 1738 | ); 1739 | 1740 | response_rx 1741 | } 1742 | 1743 | fn modes(&self) -> Option { 1744 | let current_mode_id = APPROVAL_PRESETS 1745 | .iter() 1746 | .find(|preset| { 1747 | preset.approval == self.config.approval_policy 1748 | && preset.sandbox == self.config.sandbox_policy 1749 | }) 1750 | .map(|preset| SessionModeId::new(preset.id))?; 1751 | 1752 | Some(SessionModeState::new( 1753 | current_mode_id, 1754 | APPROVAL_PRESETS 1755 | .iter() 1756 | .map(|preset| { 1757 | SessionMode::new(preset.id, preset.label).description(preset.description) 1758 | }) 1759 | .collect(), 1760 | )) 1761 | } 1762 | 1763 | async fn find_current_model(&self) -> Option { 1764 | let model_presets = self.models_manager.list_models(&self.config).await; 1765 | let config_model = self.get_current_model().await; 1766 | let preset = model_presets 1767 | .iter() 1768 | .find(|preset| preset.model == config_model)?; 1769 | 1770 | let effort = self 1771 | .config 1772 | .model_reasoning_effort 1773 | .and_then(|effort| { 1774 | preset 1775 | .supported_reasoning_efforts 1776 | .iter() 1777 | .find_map(|e| (e.effort == effort).then_some(effort)) 1778 | }) 1779 | .unwrap_or(preset.default_reasoning_effort); 1780 | 1781 | Some(Self::model_id(&preset.id, effort)) 1782 | } 1783 | 1784 | fn model_id(id: &str, effort: ReasoningEffort) -> ModelId { 1785 | ModelId::new(format!("{id}/{effort}")) 1786 | } 1787 | 1788 | fn parse_model_id(id: &ModelId) -> Option<(String, ReasoningEffort)> { 1789 | let (model, reasoning) = id.0.split_once('/')?; 1790 | let reasoning = serde_json::from_value(reasoning.into()).ok()?; 1791 | Some((model.to_owned(), reasoning)) 1792 | } 1793 | 1794 | async fn models(&self) -> Result { 1795 | let mut available_models = Vec::new(); 1796 | 1797 | let current_model_id = if let Some(model_id) = self.find_current_model().await { 1798 | model_id 1799 | } else { 1800 | // If no preset found, return the current model string as-is 1801 | let model_id = ModelId::new(self.get_current_model().await); 1802 | available_models.push(ModelInfo::new(model_id.clone(), model_id.to_string())); 1803 | model_id 1804 | }; 1805 | 1806 | available_models.extend( 1807 | self.models_manager 1808 | .list_models(&self.config) 1809 | .await 1810 | .iter() 1811 | .flat_map(|preset| { 1812 | preset.supported_reasoning_efforts.iter().map(|effort| { 1813 | ModelInfo::new( 1814 | Self::model_id(&preset.id, effort.effort), 1815 | format!("{} ({})", preset.display_name, effort.effort), 1816 | ) 1817 | .description(format!("{} {}", preset.description, effort.description)) 1818 | }) 1819 | }), 1820 | ); 1821 | 1822 | Ok(SessionModelState::new(current_model_id, available_models)) 1823 | } 1824 | 1825 | async fn handle_load(&mut self) -> Result { 1826 | Ok(LoadSessionResponse::new() 1827 | .models(self.models().await?) 1828 | .modes(self.modes())) 1829 | } 1830 | 1831 | async fn handle_prompt( 1832 | &mut self, 1833 | request: PromptRequest, 1834 | ) -> Result>, Error> { 1835 | let (response_tx, response_rx) = oneshot::channel(); 1836 | 1837 | let items = build_prompt_items(request.prompt); 1838 | let op; 1839 | if let Some((name, rest)) = extract_slash_command(&items) { 1840 | match name { 1841 | "compact" => op = Op::Compact, 1842 | "undo" => op = Op::Undo, 1843 | "init" => { 1844 | op = Op::UserInput { 1845 | items: vec![UserInput::Text { 1846 | text: INIT_COMMAND_PROMPT.into(), 1847 | }], 1848 | } 1849 | } 1850 | "review" => { 1851 | let instructions = rest.trim(); 1852 | let target = if instructions.is_empty() { 1853 | ReviewTarget::UncommittedChanges 1854 | } else { 1855 | ReviewTarget::Custom { 1856 | instructions: instructions.to_owned(), 1857 | } 1858 | }; 1859 | 1860 | op = Op::Review { 1861 | review_request: ReviewRequest { 1862 | user_facing_hint: Some(user_facing_hint(&target)), 1863 | target, 1864 | }, 1865 | } 1866 | } 1867 | "review-branch" if !rest.is_empty() => { 1868 | let target = ReviewTarget::BaseBranch { 1869 | branch: rest.trim().to_owned(), 1870 | }; 1871 | op = Op::Review { 1872 | review_request: ReviewRequest { 1873 | user_facing_hint: Some(user_facing_hint(&target)), 1874 | target, 1875 | }, 1876 | } 1877 | } 1878 | "review-commit" if !rest.is_empty() => { 1879 | let target = ReviewTarget::Commit { 1880 | sha: rest.trim().to_owned(), 1881 | title: None, 1882 | }; 1883 | op = Op::Review { 1884 | review_request: ReviewRequest { 1885 | user_facing_hint: Some(user_facing_hint(&target)), 1886 | target, 1887 | }, 1888 | } 1889 | } 1890 | "logout" => { 1891 | self.auth.logout()?; 1892 | return Err(Error::auth_required()); 1893 | } 1894 | _ => { 1895 | if let Some(prompt) = 1896 | expand_custom_prompt(name, rest, self.custom_prompts.borrow().as_ref()) 1897 | .map_err(|e| Error::invalid_params().data(e.user_message()))? 1898 | { 1899 | op = Op::UserInput { 1900 | items: vec![UserInput::Text { text: prompt }], 1901 | } 1902 | } else { 1903 | op = Op::UserInput { items } 1904 | } 1905 | } 1906 | } 1907 | } else { 1908 | op = Op::UserInput { items } 1909 | } 1910 | 1911 | let submission_id = self 1912 | .conversation 1913 | .submit(op.clone()) 1914 | .await 1915 | .map_err(|e| Error::internal_error().data(e.to_string()))?; 1916 | 1917 | info!("Submitted prompt with submission_id: {submission_id}"); 1918 | info!("Starting to wait for conversation events for submission_id: {submission_id}"); 1919 | 1920 | let state = match op { 1921 | Op::Compact | Op::Undo => SubmissionState::Task(TaskState::new(response_tx)), 1922 | _ => SubmissionState::Prompt(PromptState::new( 1923 | self.conversation.clone(), 1924 | response_tx, 1925 | submission_id.clone(), 1926 | )), 1927 | }; 1928 | 1929 | self.submissions.insert(submission_id, state); 1930 | 1931 | Ok(response_rx) 1932 | } 1933 | 1934 | async fn handle_set_mode(&mut self, mode: SessionModeId) -> Result<(), Error> { 1935 | let preset = APPROVAL_PRESETS 1936 | .iter() 1937 | .find(|preset| mode.0.as_ref() == preset.id) 1938 | .ok_or_else(Error::invalid_params)?; 1939 | 1940 | self.conversation 1941 | .submit(Op::OverrideTurnContext { 1942 | cwd: None, 1943 | approval_policy: Some(preset.approval), 1944 | sandbox_policy: Some(preset.sandbox.clone()), 1945 | model: None, 1946 | effort: None, 1947 | summary: None, 1948 | }) 1949 | .await 1950 | .map_err(|e| Error::from(anyhow::anyhow!(e)))?; 1951 | 1952 | self.config.approval_policy = preset.approval; 1953 | self.config.sandbox_policy = preset.sandbox.clone(); 1954 | 1955 | match preset.sandbox { 1956 | // Treat this user action as a trusted dir 1957 | SandboxPolicy::DangerFullAccess | SandboxPolicy::WorkspaceWrite { .. } => { 1958 | set_project_trust_level( 1959 | &self.config.codex_home, 1960 | &self.config.cwd, 1961 | TrustLevel::Trusted, 1962 | )?; 1963 | } 1964 | SandboxPolicy::ReadOnly => {} 1965 | } 1966 | 1967 | Ok(()) 1968 | } 1969 | 1970 | async fn get_current_model(&self) -> String { 1971 | self.models_manager 1972 | .get_model(&self.config.model, &self.config) 1973 | .await 1974 | } 1975 | 1976 | async fn handle_set_model(&mut self, model: ModelId) -> Result<(), Error> { 1977 | // Try parsing as preset format, otherwise use as-is, fallback to config 1978 | let (model_to_use, effort_to_use) = if let Some((m, e)) = Self::parse_model_id(&model) { 1979 | (m, Some(e)) 1980 | } else { 1981 | let model_str = model.0.to_string(); 1982 | let fallback = if !model_str.is_empty() { 1983 | model_str 1984 | } else { 1985 | self.get_current_model().await 1986 | }; 1987 | (fallback, self.config.model_reasoning_effort) 1988 | }; 1989 | 1990 | if model_to_use.is_empty() { 1991 | return Err(Error::invalid_params().data("No model parsed or configured")); 1992 | } 1993 | 1994 | self.conversation 1995 | .submit(Op::OverrideTurnContext { 1996 | cwd: None, 1997 | approval_policy: None, 1998 | sandbox_policy: None, 1999 | model: Some(model_to_use.clone()), 2000 | effort: Some(effort_to_use), 2001 | summary: None, 2002 | }) 2003 | .await 2004 | .map_err(|e| Error::from(anyhow::anyhow!(e)))?; 2005 | 2006 | self.config.model = Some(model_to_use); 2007 | self.config.model_reasoning_effort = effort_to_use; 2008 | 2009 | Ok(()) 2010 | } 2011 | 2012 | async fn handle_cancel(&mut self) -> Result<(), Error> { 2013 | self.conversation 2014 | .submit(Op::Interrupt) 2015 | .await 2016 | .map_err(|e| Error::from(anyhow::anyhow!(e)))?; 2017 | Ok(()) 2018 | } 2019 | 2020 | async fn handle_event(&mut self, Event { id, msg }: Event) { 2021 | if let Some(submission) = self.submissions.get_mut(&id) { 2022 | submission.handle_event(&self.client, msg).await; 2023 | } else { 2024 | warn!("Received event for unknown submission ID: {id} {msg:?}"); 2025 | } 2026 | } 2027 | } 2028 | 2029 | fn build_prompt_items(prompt: Vec) -> Vec { 2030 | prompt 2031 | .into_iter() 2032 | .filter_map(|block| match block { 2033 | ContentBlock::Text(text_block) => Some(UserInput::Text { 2034 | text: text_block.text, 2035 | }), 2036 | ContentBlock::Image(image_block) => Some(UserInput::Image { 2037 | image_url: format!("data:{};base64,{}", image_block.mime_type, image_block.data), 2038 | }), 2039 | ContentBlock::ResourceLink(ResourceLink { name, uri, .. }) => Some(UserInput::Text { 2040 | text: format_uri_as_link(Some(name), uri), 2041 | }), 2042 | ContentBlock::Resource(EmbeddedResource { 2043 | resource: 2044 | EmbeddedResourceResource::TextResourceContents(TextResourceContents { 2045 | text, 2046 | uri, 2047 | .. 2048 | }), 2049 | .. 2050 | }) => Some(UserInput::Text { 2051 | text: format!( 2052 | "{}\n\n{text}\n", 2053 | format_uri_as_link(None, uri.clone()) 2054 | ), 2055 | }), 2056 | // Skip other content types for now 2057 | ContentBlock::Audio(..) | ContentBlock::Resource(..) | _ => None, 2058 | }) 2059 | .collect() 2060 | } 2061 | 2062 | fn format_uri_as_link(name: Option, uri: String) -> String { 2063 | if let Some(name) = name 2064 | && !name.is_empty() 2065 | { 2066 | format!("[@{name}]({uri})") 2067 | } else if let Some(path) = uri.strip_prefix("file://") { 2068 | let name = path.split('/').next_back().unwrap_or(path); 2069 | format!("[@{name}]({uri})") 2070 | } else if uri.starts_with("zed://") { 2071 | let name = uri.split('/').next_back().unwrap_or(&uri); 2072 | format!("[@{name}]({uri})") 2073 | } else { 2074 | uri 2075 | } 2076 | } 2077 | 2078 | fn codex_content_to_acp_content(content: mcp_types::ContentBlock) -> ToolCallContent { 2079 | ToolCallContent::Content(Content::new(match content { 2080 | mcp_types::ContentBlock::TextContent(mcp_types::TextContent { 2081 | annotations, text, .. 2082 | }) => ContentBlock::Text( 2083 | TextContent::new(text).annotations(annotations.map(convert_annotations)), 2084 | ), 2085 | mcp_types::ContentBlock::ImageContent(mcp_types::ImageContent { 2086 | annotations, 2087 | data, 2088 | mime_type, 2089 | .. 2090 | }) => ContentBlock::Image( 2091 | ImageContent::new(data, mime_type).annotations(annotations.map(convert_annotations)), 2092 | ), 2093 | mcp_types::ContentBlock::AudioContent(mcp_types::AudioContent { 2094 | annotations, 2095 | data, 2096 | mime_type, 2097 | .. 2098 | }) => ContentBlock::Audio( 2099 | AudioContent::new(data, mime_type).annotations(annotations.map(convert_annotations)), 2100 | ), 2101 | mcp_types::ContentBlock::ResourceLink(mcp_types::ResourceLink { 2102 | annotations, 2103 | description, 2104 | mime_type, 2105 | name, 2106 | size, 2107 | title, 2108 | uri, 2109 | .. 2110 | }) => ContentBlock::ResourceLink( 2111 | ResourceLink::new(name, uri) 2112 | .annotations(annotations.map(convert_annotations)) 2113 | .description(description) 2114 | .mime_type(mime_type) 2115 | .size(size) 2116 | .title(title), 2117 | ), 2118 | mcp_types::ContentBlock::EmbeddedResource(mcp_types::EmbeddedResource { 2119 | annotations, 2120 | resource, 2121 | .. 2122 | }) => { 2123 | let resource = match resource { 2124 | mcp_types::EmbeddedResourceResource::TextResourceContents( 2125 | mcp_types::TextResourceContents { 2126 | mime_type, 2127 | text, 2128 | uri, 2129 | }, 2130 | ) => EmbeddedResourceResource::TextResourceContents( 2131 | TextResourceContents::new(text, uri).mime_type(mime_type), 2132 | ), 2133 | mcp_types::EmbeddedResourceResource::BlobResourceContents( 2134 | mcp_types::BlobResourceContents { 2135 | blob, 2136 | mime_type, 2137 | uri, 2138 | }, 2139 | ) => EmbeddedResourceResource::BlobResourceContents( 2140 | BlobResourceContents::new(blob, uri).mime_type(mime_type), 2141 | ), 2142 | }; 2143 | ContentBlock::Resource( 2144 | EmbeddedResource::new(resource).annotations(annotations.map(convert_annotations)), 2145 | ) 2146 | } 2147 | })) 2148 | } 2149 | 2150 | fn convert_annotations( 2151 | mcp_types::Annotations { 2152 | audience, 2153 | last_modified, 2154 | priority, 2155 | }: mcp_types::Annotations, 2156 | ) -> Annotations { 2157 | Annotations::new() 2158 | .audience(audience.map(|a| { 2159 | a.into_iter() 2160 | .map(|audience| match audience { 2161 | mcp_types::Role::Assistant => agent_client_protocol::Role::Assistant, 2162 | mcp_types::Role::User => agent_client_protocol::Role::User, 2163 | }) 2164 | .collect::>() 2165 | })) 2166 | .last_modified(last_modified) 2167 | .priority(priority) 2168 | } 2169 | 2170 | fn extract_tool_call_content_from_changes( 2171 | changes: HashMap, 2172 | ) -> ( 2173 | String, 2174 | Vec, 2175 | impl Iterator, 2176 | ) { 2177 | ( 2178 | format!( 2179 | "Edit {}", 2180 | changes.keys().map(|p| p.display().to_string()).join(", ") 2181 | ), 2182 | changes.keys().map(ToolCallLocation::new).collect(), 2183 | changes.into_iter().map(|(path, change)| { 2184 | ToolCallContent::Diff(match change { 2185 | codex_core::protocol::FileChange::Add { content } => Diff::new(path, content), 2186 | codex_core::protocol::FileChange::Delete { content } => { 2187 | Diff::new(path, String::new()).old_text(content) 2188 | } 2189 | codex_core::protocol::FileChange::Update { 2190 | unified_diff: _, 2191 | move_path, 2192 | old_content, 2193 | new_content, 2194 | } => Diff::new(move_path.unwrap_or(path), new_content).old_text(old_content), 2195 | }) 2196 | }), 2197 | ) 2198 | } 2199 | 2200 | /// Checks if a prompt is slash command 2201 | fn extract_slash_command(content: &[UserInput]) -> Option<(&str, &str)> { 2202 | let line = content.first().and_then(|block| match block { 2203 | UserInput::Text { text, .. } => Some(text), 2204 | _ => None, 2205 | })?; 2206 | 2207 | parse_slash_name(line) 2208 | } 2209 | 2210 | #[cfg(test)] 2211 | mod tests { 2212 | use std::sync::atomic::AtomicUsize; 2213 | 2214 | use codex_core::{ 2215 | config::ConfigOverrides, openai_models::model_presets::all_model_presets, 2216 | protocol::AgentMessageEvent, 2217 | }; 2218 | use tokio::{ 2219 | sync::{Mutex, mpsc::UnboundedSender}, 2220 | task::LocalSet, 2221 | }; 2222 | 2223 | use super::*; 2224 | 2225 | #[tokio::test] 2226 | async fn test_prompt() -> anyhow::Result<()> { 2227 | let (session_id, client, _, message_tx, local_set) = setup(vec![]).await?; 2228 | let (prompt_response_tx, prompt_response_rx) = tokio::sync::oneshot::channel(); 2229 | 2230 | message_tx.send(ConversationMessage::Prompt { 2231 | request: PromptRequest::new(session_id.clone(), vec!["Hi".into()]), 2232 | response_tx: prompt_response_tx, 2233 | })?; 2234 | 2235 | tokio::try_join!( 2236 | async { 2237 | let stop_reason = prompt_response_rx.await??.await??; 2238 | assert_eq!(stop_reason, StopReason::EndTurn); 2239 | drop(message_tx); 2240 | anyhow::Ok(()) 2241 | }, 2242 | async { 2243 | local_set.await; 2244 | anyhow::Ok(()) 2245 | } 2246 | )?; 2247 | 2248 | let notifications = client.notifications.lock().unwrap(); 2249 | assert_eq!(notifications.len(), 1); 2250 | assert!(matches!( 2251 | ¬ifications[0].update, 2252 | SessionUpdate::AgentMessageChunk(ContentChunk { 2253 | content: ContentBlock::Text(TextContent { text, .. }), 2254 | .. 2255 | }) if text == "Hi" 2256 | )); 2257 | 2258 | Ok(()) 2259 | } 2260 | 2261 | #[tokio::test] 2262 | async fn test_compact() -> anyhow::Result<()> { 2263 | let (session_id, client, conversation, message_tx, local_set) = setup(vec![]).await?; 2264 | let (prompt_response_tx, prompt_response_rx) = tokio::sync::oneshot::channel(); 2265 | 2266 | message_tx.send(ConversationMessage::Prompt { 2267 | request: PromptRequest::new(session_id.clone(), vec!["/compact".into()]), 2268 | response_tx: prompt_response_tx, 2269 | })?; 2270 | 2271 | tokio::try_join!( 2272 | async { 2273 | let stop_reason = prompt_response_rx.await??.await??; 2274 | assert_eq!(stop_reason, StopReason::EndTurn); 2275 | drop(message_tx); 2276 | anyhow::Ok(()) 2277 | }, 2278 | async { 2279 | local_set.await; 2280 | anyhow::Ok(()) 2281 | } 2282 | )?; 2283 | 2284 | let notifications = client.notifications.lock().unwrap(); 2285 | assert_eq!(notifications.len(), 1); 2286 | assert!(matches!( 2287 | ¬ifications[0].update, 2288 | SessionUpdate::AgentMessageChunk(ContentChunk { 2289 | content: ContentBlock::Text(TextContent { text, .. }), 2290 | .. 2291 | }) if text == "Compact task completed" 2292 | )); 2293 | let ops = conversation.ops.lock().unwrap(); 2294 | assert_eq!(ops.as_slice(), &[Op::Compact]); 2295 | 2296 | Ok(()) 2297 | } 2298 | 2299 | #[tokio::test] 2300 | async fn test_undo() -> anyhow::Result<()> { 2301 | let (session_id, client, conversation, message_tx, local_set) = setup(vec![]).await?; 2302 | let (prompt_response_tx, prompt_response_rx) = tokio::sync::oneshot::channel(); 2303 | 2304 | message_tx.send(ConversationMessage::Prompt { 2305 | request: PromptRequest::new(session_id.clone(), vec!["/undo".into()]), 2306 | response_tx: prompt_response_tx, 2307 | })?; 2308 | 2309 | tokio::try_join!( 2310 | async { 2311 | let stop_reason = prompt_response_rx.await??.await??; 2312 | assert_eq!(stop_reason, StopReason::EndTurn); 2313 | drop(message_tx); 2314 | anyhow::Ok(()) 2315 | }, 2316 | async { 2317 | local_set.await; 2318 | anyhow::Ok(()) 2319 | } 2320 | )?; 2321 | 2322 | let notifications = client.notifications.lock().unwrap(); 2323 | assert_eq!( 2324 | notifications.len(), 2325 | 2, 2326 | "notifications don't match {notifications:?}" 2327 | ); 2328 | assert!(matches!( 2329 | ¬ifications[0].update, 2330 | SessionUpdate::AgentMessageChunk(ContentChunk { 2331 | content: ContentBlock::Text(TextContent { text, .. }), 2332 | .. 2333 | }) if text == "Undo in progress..." 2334 | )); 2335 | assert!(matches!( 2336 | ¬ifications[1].update, 2337 | SessionUpdate::AgentMessageChunk(ContentChunk { 2338 | content: ContentBlock::Text(TextContent { text, .. }), 2339 | .. 2340 | }) if text == "Undo completed." 2341 | )); 2342 | 2343 | let ops = conversation.ops.lock().unwrap(); 2344 | assert_eq!(ops.as_slice(), &[Op::Undo]); 2345 | 2346 | Ok(()) 2347 | } 2348 | 2349 | #[tokio::test] 2350 | async fn test_init() -> anyhow::Result<()> { 2351 | let (session_id, client, conversation, message_tx, local_set) = setup(vec![]).await?; 2352 | let (prompt_response_tx, prompt_response_rx) = tokio::sync::oneshot::channel(); 2353 | 2354 | message_tx.send(ConversationMessage::Prompt { 2355 | request: PromptRequest::new(session_id.clone(), vec!["/init".into()]), 2356 | response_tx: prompt_response_tx, 2357 | })?; 2358 | 2359 | tokio::try_join!( 2360 | async { 2361 | let stop_reason = prompt_response_rx.await??.await??; 2362 | assert_eq!(stop_reason, StopReason::EndTurn); 2363 | drop(message_tx); 2364 | anyhow::Ok(()) 2365 | }, 2366 | async { 2367 | local_set.await; 2368 | anyhow::Ok(()) 2369 | } 2370 | )?; 2371 | 2372 | let notifications = client.notifications.lock().unwrap(); 2373 | assert_eq!(notifications.len(), 1); 2374 | assert!( 2375 | matches!( 2376 | ¬ifications[0].update, 2377 | SessionUpdate::AgentMessageChunk(ContentChunk { 2378 | content: ContentBlock::Text(TextContent { text, .. }), .. 2379 | }) if text == INIT_COMMAND_PROMPT // we echo the prompt 2380 | ), 2381 | "notifications don't match {notifications:?}" 2382 | ); 2383 | let ops = conversation.ops.lock().unwrap(); 2384 | assert_eq!( 2385 | ops.as_slice(), 2386 | &[Op::UserInput { 2387 | items: vec![UserInput::Text { 2388 | text: INIT_COMMAND_PROMPT.to_string() 2389 | }] 2390 | }], 2391 | "ops don't match {ops:?}" 2392 | ); 2393 | 2394 | Ok(()) 2395 | } 2396 | 2397 | #[tokio::test] 2398 | async fn test_review() -> anyhow::Result<()> { 2399 | let (session_id, client, conversation, message_tx, local_set) = setup(vec![]).await?; 2400 | let (prompt_response_tx, prompt_response_rx) = tokio::sync::oneshot::channel(); 2401 | 2402 | message_tx.send(ConversationMessage::Prompt { 2403 | request: PromptRequest::new(session_id.clone(), vec!["/review".into()]), 2404 | response_tx: prompt_response_tx, 2405 | })?; 2406 | 2407 | tokio::try_join!( 2408 | async { 2409 | let stop_reason = prompt_response_rx.await??.await??; 2410 | assert_eq!(stop_reason, StopReason::EndTurn); 2411 | drop(message_tx); 2412 | anyhow::Ok(()) 2413 | }, 2414 | async { 2415 | local_set.await; 2416 | anyhow::Ok(()) 2417 | } 2418 | )?; 2419 | 2420 | let notifications = client.notifications.lock().unwrap(); 2421 | assert_eq!(notifications.len(), 1); 2422 | assert!( 2423 | matches!( 2424 | ¬ifications[0].update, 2425 | SessionUpdate::AgentMessageChunk(ContentChunk { 2426 | content: ContentBlock::Text(TextContent { text, .. }), 2427 | .. 2428 | }) if text == "current changes" // we echo the prompt 2429 | ), 2430 | "notifications don't match {notifications:?}" 2431 | ); 2432 | 2433 | let ops = conversation.ops.lock().unwrap(); 2434 | assert_eq!( 2435 | ops.as_slice(), 2436 | &[Op::Review { 2437 | review_request: ReviewRequest { 2438 | user_facing_hint: Some(user_facing_hint(&ReviewTarget::UncommittedChanges)), 2439 | target: ReviewTarget::UncommittedChanges, 2440 | } 2441 | }], 2442 | "ops don't match {ops:?}" 2443 | ); 2444 | 2445 | Ok(()) 2446 | } 2447 | 2448 | #[tokio::test] 2449 | async fn test_custom_review() -> anyhow::Result<()> { 2450 | let (session_id, client, conversation, message_tx, local_set) = setup(vec![]).await?; 2451 | let (prompt_response_tx, prompt_response_rx) = tokio::sync::oneshot::channel(); 2452 | let instructions = "Review what we did in agents.md"; 2453 | 2454 | message_tx.send(ConversationMessage::Prompt { 2455 | request: PromptRequest::new( 2456 | session_id.clone(), 2457 | vec![format!("/review {instructions}").into()], 2458 | ), 2459 | response_tx: prompt_response_tx, 2460 | })?; 2461 | 2462 | tokio::try_join!( 2463 | async { 2464 | let stop_reason = prompt_response_rx.await??.await??; 2465 | assert_eq!(stop_reason, StopReason::EndTurn); 2466 | drop(message_tx); 2467 | anyhow::Ok(()) 2468 | }, 2469 | async { 2470 | local_set.await; 2471 | anyhow::Ok(()) 2472 | } 2473 | )?; 2474 | 2475 | let notifications = client.notifications.lock().unwrap(); 2476 | assert_eq!(notifications.len(), 1); 2477 | assert!( 2478 | matches!( 2479 | ¬ifications[0].update, 2480 | SessionUpdate::AgentMessageChunk(ContentChunk { 2481 | content: ContentBlock::Text(TextContent { text, .. }), 2482 | .. 2483 | }) if text == "Review what we did in agents.md" // we echo the prompt 2484 | ), 2485 | "notifications don't match {notifications:?}" 2486 | ); 2487 | 2488 | let ops = conversation.ops.lock().unwrap(); 2489 | assert_eq!( 2490 | ops.as_slice(), 2491 | &[Op::Review { 2492 | review_request: ReviewRequest { 2493 | user_facing_hint: Some(user_facing_hint(&ReviewTarget::Custom { 2494 | instructions: instructions.to_owned() 2495 | })), 2496 | target: ReviewTarget::Custom { 2497 | instructions: instructions.to_owned() 2498 | }, 2499 | } 2500 | }], 2501 | "ops don't match {ops:?}" 2502 | ); 2503 | 2504 | Ok(()) 2505 | } 2506 | 2507 | #[tokio::test] 2508 | async fn test_commit_review() -> anyhow::Result<()> { 2509 | let (session_id, client, conversation, message_tx, local_set) = setup(vec![]).await?; 2510 | let (prompt_response_tx, prompt_response_rx) = tokio::sync::oneshot::channel(); 2511 | 2512 | message_tx.send(ConversationMessage::Prompt { 2513 | request: PromptRequest::new(session_id.clone(), vec!["/review-commit 123456".into()]), 2514 | response_tx: prompt_response_tx, 2515 | })?; 2516 | 2517 | tokio::try_join!( 2518 | async { 2519 | let stop_reason = prompt_response_rx.await??.await??; 2520 | assert_eq!(stop_reason, StopReason::EndTurn); 2521 | drop(message_tx); 2522 | anyhow::Ok(()) 2523 | }, 2524 | async { 2525 | local_set.await; 2526 | anyhow::Ok(()) 2527 | } 2528 | )?; 2529 | 2530 | let notifications = client.notifications.lock().unwrap(); 2531 | assert_eq!(notifications.len(), 1); 2532 | assert!( 2533 | matches!( 2534 | ¬ifications[0].update, 2535 | SessionUpdate::AgentMessageChunk(ContentChunk { 2536 | content: ContentBlock::Text(TextContent { text, .. }), 2537 | .. 2538 | }) if text == "commit 123456" // we echo the prompt 2539 | ), 2540 | "notifications don't match {notifications:?}" 2541 | ); 2542 | 2543 | let ops = conversation.ops.lock().unwrap(); 2544 | assert_eq!( 2545 | ops.as_slice(), 2546 | &[Op::Review { 2547 | review_request: ReviewRequest { 2548 | user_facing_hint: Some(user_facing_hint(&ReviewTarget::Commit { 2549 | sha: "123456".to_owned(), 2550 | title: None 2551 | })), 2552 | target: ReviewTarget::Commit { 2553 | sha: "123456".to_owned(), 2554 | title: None 2555 | }, 2556 | } 2557 | }], 2558 | "ops don't match {ops:?}" 2559 | ); 2560 | 2561 | Ok(()) 2562 | } 2563 | 2564 | #[tokio::test] 2565 | async fn test_branch_review() -> anyhow::Result<()> { 2566 | let (session_id, client, conversation, message_tx, local_set) = setup(vec![]).await?; 2567 | let (prompt_response_tx, prompt_response_rx) = tokio::sync::oneshot::channel(); 2568 | 2569 | message_tx.send(ConversationMessage::Prompt { 2570 | request: PromptRequest::new(session_id.clone(), vec!["/review-branch feature".into()]), 2571 | response_tx: prompt_response_tx, 2572 | })?; 2573 | 2574 | tokio::try_join!( 2575 | async { 2576 | let stop_reason = prompt_response_rx.await??.await??; 2577 | assert_eq!(stop_reason, StopReason::EndTurn); 2578 | drop(message_tx); 2579 | anyhow::Ok(()) 2580 | }, 2581 | async { 2582 | local_set.await; 2583 | anyhow::Ok(()) 2584 | } 2585 | )?; 2586 | 2587 | let notifications = client.notifications.lock().unwrap(); 2588 | assert_eq!(notifications.len(), 1); 2589 | assert!( 2590 | matches!( 2591 | ¬ifications[0].update, 2592 | SessionUpdate::AgentMessageChunk(ContentChunk { 2593 | content: ContentBlock::Text(TextContent { text, .. }), 2594 | .. 2595 | }) if text == "changes against 'feature'" // we echo the prompt 2596 | ), 2597 | "notifications don't match {notifications:?}" 2598 | ); 2599 | 2600 | let ops = conversation.ops.lock().unwrap(); 2601 | assert_eq!( 2602 | ops.as_slice(), 2603 | &[Op::Review { 2604 | review_request: ReviewRequest { 2605 | user_facing_hint: Some(user_facing_hint(&ReviewTarget::BaseBranch { 2606 | branch: "feature".to_owned() 2607 | })), 2608 | target: ReviewTarget::BaseBranch { 2609 | branch: "feature".to_owned() 2610 | }, 2611 | } 2612 | }], 2613 | "ops don't match {ops:?}" 2614 | ); 2615 | 2616 | Ok(()) 2617 | } 2618 | 2619 | #[tokio::test] 2620 | async fn test_custom_prompts() -> anyhow::Result<()> { 2621 | let custom_prompts = vec![CustomPrompt { 2622 | name: "custom".to_string(), 2623 | path: "/tmp/custom.md".into(), 2624 | content: "Custom prompt with $1 arg.".into(), 2625 | description: None, 2626 | argument_hint: None, 2627 | }]; 2628 | let (session_id, client, conversation, message_tx, local_set) = 2629 | setup(custom_prompts).await?; 2630 | let (prompt_response_tx, prompt_response_rx) = tokio::sync::oneshot::channel(); 2631 | 2632 | message_tx.send(ConversationMessage::Prompt { 2633 | request: PromptRequest::new(session_id.clone(), vec!["/custom foo".into()]), 2634 | response_tx: prompt_response_tx, 2635 | })?; 2636 | 2637 | tokio::try_join!( 2638 | async { 2639 | let stop_reason = prompt_response_rx.await??.await??; 2640 | assert_eq!(stop_reason, StopReason::EndTurn); 2641 | drop(message_tx); 2642 | anyhow::Ok(()) 2643 | }, 2644 | async { 2645 | local_set.await; 2646 | anyhow::Ok(()) 2647 | } 2648 | )?; 2649 | 2650 | let notifications = client.notifications.lock().unwrap(); 2651 | assert_eq!(notifications.len(), 1); 2652 | assert!( 2653 | matches!( 2654 | ¬ifications[0].update, 2655 | SessionUpdate::AgentMessageChunk(ContentChunk { 2656 | content: ContentBlock::Text(TextContent { text, .. }), 2657 | .. 2658 | }) if text == "Custom prompt with foo arg." 2659 | ), 2660 | "notifications don't match {notifications:?}" 2661 | ); 2662 | 2663 | let ops = conversation.ops.lock().unwrap(); 2664 | assert_eq!( 2665 | ops.as_slice(), 2666 | &[Op::UserInput { 2667 | items: vec![UserInput::Text { 2668 | text: "Custom prompt with foo arg.".into() 2669 | }] 2670 | }], 2671 | "ops don't match {ops:?}" 2672 | ); 2673 | 2674 | Ok(()) 2675 | } 2676 | 2677 | #[tokio::test] 2678 | async fn test_delta_deduplication() -> anyhow::Result<()> { 2679 | let (session_id, client, _, message_tx, local_set) = setup(vec![]).await?; 2680 | let (prompt_response_tx, prompt_response_rx) = tokio::sync::oneshot::channel(); 2681 | 2682 | message_tx.send(ConversationMessage::Prompt { 2683 | request: PromptRequest::new(session_id.clone(), vec!["test delta".into()]), 2684 | response_tx: prompt_response_tx, 2685 | })?; 2686 | 2687 | tokio::try_join!( 2688 | async { 2689 | let stop_reason = prompt_response_rx.await??.await??; 2690 | assert_eq!(stop_reason, StopReason::EndTurn); 2691 | drop(message_tx); 2692 | anyhow::Ok(()) 2693 | }, 2694 | async { 2695 | local_set.await; 2696 | anyhow::Ok(()) 2697 | } 2698 | )?; 2699 | 2700 | // We should only get ONE notification, not duplicates from both delta and non-delta 2701 | let notifications = client.notifications.lock().unwrap(); 2702 | assert_eq!( 2703 | notifications.len(), 2704 | 1, 2705 | "Should only receive delta event, not duplicate non-delta. Got: {notifications:?}" 2706 | ); 2707 | assert!(matches!( 2708 | ¬ifications[0].update, 2709 | SessionUpdate::AgentMessageChunk(ContentChunk { 2710 | content: ContentBlock::Text(TextContent { text, .. }), 2711 | .. 2712 | }) if text == "test delta" 2713 | )); 2714 | 2715 | Ok(()) 2716 | } 2717 | 2718 | async fn setup( 2719 | custom_prompts: Vec, 2720 | ) -> anyhow::Result<( 2721 | SessionId, 2722 | Arc, 2723 | Arc, 2724 | UnboundedSender, 2725 | LocalSet, 2726 | )> { 2727 | let session_id = SessionId::new("test"); 2728 | let client = Arc::new(StubClient::new()); 2729 | let session_client = 2730 | SessionClient::with_client(session_id.clone(), client.clone(), Arc::default()); 2731 | let conversation = Arc::new(StubCodexConversation::new()); 2732 | let models_manager = Arc::new(StubModelsManager); 2733 | let config = Config::load_with_cli_overrides(vec![], ConfigOverrides::default()).await?; 2734 | let (message_tx, message_rx) = tokio::sync::mpsc::unbounded_channel(); 2735 | 2736 | let mut actor = ConversationActor::new( 2737 | StubAuth, 2738 | session_client, 2739 | conversation.clone(), 2740 | models_manager, 2741 | config, 2742 | message_rx, 2743 | ); 2744 | actor.custom_prompts = Rc::new(RefCell::new(custom_prompts)); 2745 | 2746 | let local_set = LocalSet::new(); 2747 | local_set.spawn_local(actor.spawn()); 2748 | Ok((session_id, client, conversation, message_tx, local_set)) 2749 | } 2750 | 2751 | struct StubAuth; 2752 | 2753 | impl Auth for StubAuth { 2754 | fn logout(&self) -> Result { 2755 | Ok(true) 2756 | } 2757 | } 2758 | 2759 | struct StubModelsManager; 2760 | 2761 | #[async_trait::async_trait] 2762 | impl ModelsManagerImpl for StubModelsManager { 2763 | async fn get_model(&self, _model_id: &Option, _config: &Config) -> String { 2764 | all_model_presets()[0].to_owned().id 2765 | } 2766 | 2767 | async fn list_models(&self, _config: &Config) -> Vec { 2768 | all_model_presets().to_owned() 2769 | } 2770 | } 2771 | 2772 | struct StubCodexConversation { 2773 | current_id: AtomicUsize, 2774 | ops: std::sync::Mutex>, 2775 | op_tx: mpsc::UnboundedSender, 2776 | op_rx: Mutex>, 2777 | } 2778 | 2779 | impl StubCodexConversation { 2780 | fn new() -> Self { 2781 | let (op_tx, op_rx) = mpsc::unbounded_channel(); 2782 | StubCodexConversation { 2783 | current_id: AtomicUsize::new(0), 2784 | ops: std::sync::Mutex::default(), 2785 | op_tx, 2786 | op_rx: Mutex::new(op_rx), 2787 | } 2788 | } 2789 | } 2790 | 2791 | #[async_trait::async_trait] 2792 | impl CodexConversationImpl for StubCodexConversation { 2793 | async fn submit(&self, op: Op) -> Result { 2794 | let id = self 2795 | .current_id 2796 | .fetch_add(1, std::sync::atomic::Ordering::SeqCst); 2797 | 2798 | self.ops.lock().unwrap().push(op.clone()); 2799 | 2800 | match op { 2801 | Op::UserInput { items } => { 2802 | let prompt = items 2803 | .into_iter() 2804 | .map(|i| match i { 2805 | UserInput::Text { text } => text, 2806 | _ => unimplemented!(), 2807 | }) 2808 | .join("\n"); 2809 | 2810 | self.op_tx 2811 | .send(Event { 2812 | id: id.to_string(), 2813 | msg: EventMsg::AgentMessageContentDelta( 2814 | AgentMessageContentDeltaEvent { 2815 | thread_id: id.to_string(), 2816 | turn_id: id.to_string(), 2817 | item_id: id.to_string(), 2818 | delta: prompt.clone(), 2819 | }, 2820 | ), 2821 | }) 2822 | .unwrap(); 2823 | // Send non-delta event (should be deduplicated, but handled by deduplication) 2824 | self.op_tx 2825 | .send(Event { 2826 | id: id.to_string(), 2827 | msg: EventMsg::AgentMessage(AgentMessageEvent { message: prompt }), 2828 | }) 2829 | .unwrap(); 2830 | self.op_tx 2831 | .send(Event { 2832 | id: id.to_string(), 2833 | msg: EventMsg::TaskComplete(TaskCompleteEvent { 2834 | last_agent_message: None, 2835 | }), 2836 | }) 2837 | .unwrap(); 2838 | } 2839 | Op::Compact => { 2840 | self.op_tx 2841 | .send(Event { 2842 | id: id.to_string(), 2843 | msg: EventMsg::TaskStarted(TaskStartedEvent { 2844 | model_context_window: None, 2845 | }), 2846 | }) 2847 | .unwrap(); 2848 | self.op_tx 2849 | .send(Event { 2850 | id: id.to_string(), 2851 | msg: EventMsg::AgentMessage(AgentMessageEvent { 2852 | message: "Compact task completed".to_string(), 2853 | }), 2854 | }) 2855 | .unwrap(); 2856 | self.op_tx 2857 | .send(Event { 2858 | id: id.to_string(), 2859 | msg: EventMsg::TaskComplete(TaskCompleteEvent { 2860 | last_agent_message: None, 2861 | }), 2862 | }) 2863 | .unwrap(); 2864 | } 2865 | Op::Undo => { 2866 | self.op_tx 2867 | .send(Event { 2868 | id: id.to_string(), 2869 | msg: EventMsg::UndoStarted(codex_core::protocol::UndoStartedEvent { 2870 | message: Some("Undo in progress...".to_string()), 2871 | }), 2872 | }) 2873 | .unwrap(); 2874 | self.op_tx 2875 | .send(Event { 2876 | id: id.to_string(), 2877 | msg: EventMsg::UndoCompleted( 2878 | codex_core::protocol::UndoCompletedEvent { 2879 | success: true, 2880 | message: Some("Undo completed.".to_string()), 2881 | }, 2882 | ), 2883 | }) 2884 | .unwrap(); 2885 | self.op_tx 2886 | .send(Event { 2887 | id: id.to_string(), 2888 | msg: EventMsg::TaskComplete(TaskCompleteEvent { 2889 | last_agent_message: None, 2890 | }), 2891 | }) 2892 | .unwrap(); 2893 | } 2894 | Op::Review { review_request } => { 2895 | self.op_tx 2896 | .send(Event { 2897 | id: id.to_string(), 2898 | msg: EventMsg::EnteredReviewMode(review_request.clone()), 2899 | }) 2900 | .unwrap(); 2901 | self.op_tx 2902 | .send(Event { 2903 | id: id.to_string(), 2904 | msg: EventMsg::ExitedReviewMode(ExitedReviewModeEvent { 2905 | review_output: Some(ReviewOutputEvent { 2906 | findings: vec![], 2907 | overall_correctness: String::new(), 2908 | overall_explanation: review_request 2909 | .user_facing_hint 2910 | .clone() 2911 | .unwrap_or_default(), 2912 | overall_confidence_score: 1., 2913 | }), 2914 | }), 2915 | }) 2916 | .unwrap(); 2917 | self.op_tx 2918 | .send(Event { 2919 | id: id.to_string(), 2920 | msg: EventMsg::TaskComplete(TaskCompleteEvent { 2921 | last_agent_message: None, 2922 | }), 2923 | }) 2924 | .unwrap(); 2925 | } 2926 | _ => { 2927 | unimplemented!() 2928 | } 2929 | } 2930 | Ok(id.to_string()) 2931 | } 2932 | 2933 | async fn next_event(&self) -> Result { 2934 | let Some(event) = self.op_rx.lock().await.recv().await else { 2935 | return Err(CodexErr::InternalAgentDied); 2936 | }; 2937 | Ok(event) 2938 | } 2939 | } 2940 | 2941 | struct StubClient { 2942 | notifications: std::sync::Mutex>, 2943 | } 2944 | 2945 | impl StubClient { 2946 | fn new() -> Self { 2947 | StubClient { 2948 | notifications: std::sync::Mutex::default(), 2949 | } 2950 | } 2951 | } 2952 | 2953 | #[async_trait::async_trait(?Send)] 2954 | impl Client for StubClient { 2955 | async fn request_permission( 2956 | &self, 2957 | _args: RequestPermissionRequest, 2958 | ) -> Result { 2959 | unimplemented!() 2960 | } 2961 | 2962 | async fn session_notification(&self, args: SessionNotification) -> Result<(), Error> { 2963 | self.notifications.lock().unwrap().push(args); 2964 | Ok(()) 2965 | } 2966 | } 2967 | } 2968 | --------------------------------------------------------------------------------