├── tests ├── core │ └── __init__.py ├── utils │ ├── __init__.py │ └── test_model_info.py ├── test_config.py └── tools │ └── test_apply_diff_tool.py ├── codexy ├── tui │ ├── widgets │ │ ├── __init__.py │ │ ├── overlays │ │ │ ├── __init__.py │ │ │ ├── history_overlay.py │ │ │ ├── help_overlay.py │ │ │ ├── model_overlay.py │ │ │ └── approval_overlay.py │ │ └── chat │ │ │ ├── __init__.py │ │ │ ├── thinking_indicator.py │ │ │ ├── history_view.py │ │ │ ├── header.py │ │ │ ├── input_area.py │ │ │ ├── command_review.py │ │ │ └── message_display.py │ └── __init__.py ├── __init__.py ├── exceptions.py ├── __main__.py ├── utils │ ├── __init__.py │ ├── model_utils.py │ ├── token_utils.py │ ├── storage.py │ ├── update_checker.py │ ├── model_info.py │ ├── security_check.py │ └── filesystem.py ├── tools │ ├── __init__.py │ ├── execute_command_tool.py │ └── apply_diff_tool.py └── cli │ ├── completion_scripts.py │ └── main.py ├── assets ├── logo.png ├── codexy-demo.gif └── codexy-demo-2.gif ├── .pre-commit-config.yaml ├── docs ├── pre-commit_ZH.md └── pre-commit.md ├── pyproject.toml ├── .gitignore ├── README_ZH.md ├── LICENSE └── README.md /tests/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /codexy/tui/widgets/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /codexy/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.0.10" 2 | PACKAGE_NAME = "codexy" 3 | -------------------------------------------------------------------------------- /assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndersonBY/codexy/HEAD/assets/logo.png -------------------------------------------------------------------------------- /codexy/tui/__init__.py: -------------------------------------------------------------------------------- 1 | from .app import CodexTuiApp 2 | 3 | __all__ = ["CodexTuiApp"] 4 | -------------------------------------------------------------------------------- /assets/codexy-demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndersonBY/codexy/HEAD/assets/codexy-demo.gif -------------------------------------------------------------------------------- /assets/codexy-demo-2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndersonBY/codexy/HEAD/assets/codexy-demo-2.gif -------------------------------------------------------------------------------- /codexy/tui/widgets/overlays/__init__.py: -------------------------------------------------------------------------------- 1 | from .approval_overlay import ApprovalModeOverlay 2 | from .help_overlay import HelpOverlay 3 | from .history_overlay import HistoryOverlay 4 | from .model_overlay import ModelOverlay 5 | 6 | __all__ = [ 7 | "HistoryOverlay", 8 | "HelpOverlay", 9 | "ModelOverlay", 10 | "ApprovalModeOverlay", 11 | ] 12 | -------------------------------------------------------------------------------- /codexy/exceptions.py: -------------------------------------------------------------------------------- 1 | """Custom exceptions for the codexy project.""" 2 | 3 | 4 | class codexyError(Exception): 5 | """Base exception for codexy errors.""" 6 | 7 | pass 8 | 9 | 10 | class ToolError(codexyError): 11 | """Exception related to tool execution.""" 12 | 13 | pass 14 | 15 | 16 | class ConfigError(codexyError): 17 | """Exception related to configuration issues.""" 18 | 19 | pass 20 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/astral-sh/ruff-pre-commit 3 | # Ruff version. 4 | rev: v0.8.4 5 | hooks: 6 | # Run the linter. 7 | - id: ruff 8 | args: [--fix] 9 | # Run the formatter. 10 | - id: ruff-format 11 | 12 | - repo: https://github.com/pre-commit/pre-commit-hooks 13 | rev: v5.0.0 14 | hooks: 15 | - id: trailing-whitespace 16 | - id: end-of-file-fixer 17 | - id: check-yaml 18 | - id: check-added-large-files 19 | -------------------------------------------------------------------------------- /codexy/__main__.py: -------------------------------------------------------------------------------- 1 | """Allows running the CLI via 'python -m codexy'.""" 2 | 3 | # Ensure that the main CLI function is called when running as a module. 4 | # Import from the 'cli' module located at the root level relative to the package directory. 5 | # This assumes the parent directory containing 'cli.py' is accessible when running the module. 6 | from .cli.main import codexy 7 | 8 | if __name__ == "__main__": 9 | # Pass an empty object for context, similar to the original entry point check 10 | codexy(obj={}) 11 | -------------------------------------------------------------------------------- /codexy/tui/widgets/chat/__init__.py: -------------------------------------------------------------------------------- 1 | from .command_review import CommandReviewWidget 2 | from .header import ChatHeader 3 | from .history_view import ChatHistoryView 4 | from .input_area import ChatInputArea 5 | from .message_display import ( 6 | AssistantMessageDisplay, 7 | SystemMessageDisplay, 8 | ToolCallDisplay, 9 | ToolOutputDisplay, 10 | UserMessageDisplay, 11 | ) 12 | from .thinking_indicator import ThinkingIndicator 13 | 14 | __all__ = [ 15 | "ChatHeader", 16 | "ChatHistoryView", 17 | "ChatInputArea", 18 | "UserMessageDisplay", 19 | "AssistantMessageDisplay", 20 | "ToolCallDisplay", 21 | "ToolOutputDisplay", 22 | "SystemMessageDisplay", 23 | "CommandReviewWidget", 24 | "ThinkingIndicator", 25 | ] 26 | -------------------------------------------------------------------------------- /docs/pre-commit_ZH.md: -------------------------------------------------------------------------------- 1 | # Pre-commit 配置说明 2 | 3 | ## 概述 4 | 5 | 本项目已配置 pre-commit hooks,在每次 git commit 前自动执行代码检查和格式化。 6 | 7 | ## 包含的 hooks 8 | 9 | 1. **ruff lint** - 代码质量检查并自动修复 10 | 2. **ruff format** - 代码格式化 11 | 3. **trailing-whitespace** - 移除行尾空白 12 | 4. **end-of-file-fixer** - 确保文件以换行符结尾 13 | 5. **check-yaml** - YAML 文件语法检查 14 | 6. **check-added-large-files** - 防止提交大文件 15 | 16 | ## 使用方法 17 | 18 | ### 安装依赖 19 | ```bash 20 | pdm install 21 | ``` 22 | 23 | ### 安装 pre-commit hooks 24 | ```bash 25 | pdm run pre-commit install 26 | ``` 27 | 28 | ### 手动运行所有检查 29 | ```bash 30 | pdm run pre-commit run --all-files 31 | ``` 32 | 33 | ### 手动运行特定 hook 34 | ```bash 35 | pdm run pre-commit run ruff-format 36 | ``` 37 | 38 | ## 注意事项 39 | 40 | - 如果 pre-commit 检查失败,提交会被阻止 41 | - 如果代码被自动修复,需要重新 `git add` 并提交 42 | - 可以使用 `git commit --no-verify` 跳过 pre-commit 检查(不推荐) 43 | -------------------------------------------------------------------------------- /docs/pre-commit.md: -------------------------------------------------------------------------------- 1 | # Pre-commit Configuration Guide 2 | 3 | ## Overview 4 | 5 | This project has pre-commit hooks configured to automatically run code checks and formatting before each git commit. 6 | 7 | ## Included hooks 8 | 9 | 1. **ruff lint** - Code quality check with automatic fixes 10 | 2. **ruff format** - Code formatting 11 | 3. **trailing-whitespace** - Remove trailing whitespace 12 | 4. **end-of-file-fixer** - Ensure files end with a newline 13 | 5. **check-yaml** - YAML file syntax check 14 | 6. **check-added-large-files** - Prevent committing large files 15 | 16 | ## Usage 17 | 18 | ### Install dependencies 19 | ```bash 20 | pdm install 21 | ``` 22 | 23 | ### Install pre-commit hooks 24 | ```bash 25 | pdm run pre-commit install 26 | ``` 27 | 28 | ### Run all checks manually 29 | ```bash 30 | pdm run pre-commit run --all-files 31 | ``` 32 | 33 | ### Run specific hook manually 34 | ```bash 35 | pdm run pre-commit run ruff-format 36 | ``` 37 | 38 | ## Notes 39 | 40 | - If pre-commit checks fail, the commit will be blocked 41 | - If code is automatically fixed, you need to `git add` again and commit 42 | - You can use `git commit --no-verify` to skip pre-commit checks (not recommended) 43 | -------------------------------------------------------------------------------- /codexy/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Expose necessary functions from submodules 2 | 3 | from .filesystem import check_in_git, short_cwd, shorten_path 4 | from .model_info import get_max_tokens_for_model, get_model_max_tokens 5 | from .model_utils import ( 6 | format_model_for_display, 7 | get_available_models, 8 | is_model_supported, 9 | preload_models, 10 | sort_models_for_display, 11 | ) 12 | from .storage import ( 13 | DEFAULT_HISTORY_CONFIG, 14 | HistoryConfig, 15 | HistoryEntry, 16 | add_to_history, 17 | clear_command_history, 18 | load_command_history, 19 | save_command_history, 20 | ) 21 | from .token_utils import approximate_tokens_used 22 | from .update_checker import UpdateInfo, check_for_updates 23 | 24 | __all__ = [ 25 | "check_in_git", 26 | "shorten_path", 27 | "short_cwd", 28 | "load_command_history", 29 | "save_command_history", 30 | "add_to_history", 31 | "clear_command_history", 32 | "HistoryEntry", 33 | "DEFAULT_HISTORY_CONFIG", 34 | "HistoryConfig", 35 | "check_for_updates", 36 | "UpdateInfo", 37 | "get_available_models", 38 | "is_model_supported", 39 | "preload_models", 40 | "sort_models_for_display", 41 | "format_model_for_display", 42 | "get_max_tokens_for_model", 43 | "get_model_max_tokens", 44 | "approximate_tokens_used", 45 | ] 46 | -------------------------------------------------------------------------------- /codexy/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """Implementations for the tools callable by the agent.""" 2 | 3 | from .apply_diff_tool import APPLY_DIFF_TOOL_DEF, apply_diff_tool 4 | from .apply_patch_tool import APPLY_PATCH_TOOL_DEF, apply_patch 5 | from .execute_command_tool import EXECUTE_COMMAND_TOOL_DEF, execute_command_tool 6 | from .file_tools import ( 7 | LIST_FILES_TOOL_DEF, 8 | READ_FILE_TOOL_DEF, 9 | WRITE_TO_FILE_TOOL_DEF, 10 | list_files_tool, 11 | read_file_tool, 12 | write_to_file_tool, 13 | ) 14 | 15 | # --- Tool Registration --- 16 | # Map tool names (used by the LLM) to their Python functions 17 | TOOL_REGISTRY = { 18 | "execute_command": execute_command_tool, 19 | "read_file": read_file_tool, 20 | "write_to_file": write_to_file_tool, 21 | "list_files": list_files_tool, 22 | "apply_diff": apply_diff_tool, 23 | "apply_patch": apply_patch, 24 | } 25 | 26 | # Combine all tool definitions 27 | AVAILABLE_TOOL_DEFS = [ 28 | EXECUTE_COMMAND_TOOL_DEF, 29 | READ_FILE_TOOL_DEF, 30 | WRITE_TO_FILE_TOOL_DEF, 31 | LIST_FILES_TOOL_DEF, 32 | APPLY_DIFF_TOOL_DEF, 33 | APPLY_PATCH_TOOL_DEF, 34 | ] 35 | 36 | __all__ = [ 37 | "read_file_tool", 38 | "write_to_file_tool", 39 | "list_files_tool", 40 | "apply_patch", 41 | "apply_diff_tool", 42 | "execute_command_tool", 43 | "AVAILABLE_TOOL_DEFS", 44 | "TOOL_REGISTRY", 45 | ] 46 | -------------------------------------------------------------------------------- /codexy/tui/widgets/chat/thinking_indicator.py: -------------------------------------------------------------------------------- 1 | from textual.reactive import reactive 2 | from textual.timer import Timer 3 | from textual.widgets import Static 4 | 5 | 6 | class ThinkingIndicator(Static): 7 | """Display "Thinking..." animation and timer.""" 8 | 9 | DEFAULT_CSS = """ 10 | ThinkingIndicator { 11 | height: auto; 12 | padding: 1; 13 | } 14 | """ 15 | 16 | message: reactive[str] = reactive("Thinking") 17 | thinking_seconds: reactive[int] = reactive(0) 18 | _dots: reactive[str] = reactive(".") 19 | _timer: Timer | None = None 20 | 21 | def on_mount(self) -> None: 22 | """Start animation timer.""" 23 | self.update_display() 24 | self._timer = self.set_interval(0.5, self.update_dots) 25 | 26 | def on_unmount(self) -> None: 27 | """Stop timer.""" 28 | if self._timer: 29 | self._timer.stop() 30 | 31 | def update_dots(self) -> None: 32 | """Update animation dots.""" 33 | if len(self._dots) < 3: 34 | self._dots += "." 35 | else: 36 | self._dots = "." 37 | self.update_display() 38 | 39 | def watch_thinking_seconds(self, seconds: int) -> None: 40 | """Update display when seconds change.""" 41 | self.update_display() 42 | 43 | def update_display(self) -> None: 44 | """Update displayed text.""" 45 | display_text = f"{self.message}{self._dots} ({self.thinking_seconds}s)" 46 | self.update(display_text) 47 | 48 | def set_thinking_seconds(self, seconds: int): 49 | """External call to update seconds.""" 50 | self.thinking_seconds = seconds 51 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["pdm-backend"] 3 | build-backend = "pdm.backend" 4 | 5 | [project] 6 | authors = [{ name = "AndersonBY", email = "anderson@163.com" }] 7 | classifiers = [ 8 | "Programming Language :: Python :: 3", 9 | "License :: OSI Approved :: Apache Software License", 10 | "Operating System :: OS Independent", 11 | "Development Status :: 3 - Alpha", 12 | "Environment :: Console", 13 | "Intended Audience :: Developers", 14 | "Topic :: Software Development :: Code Generators", 15 | ] 16 | dependencies = [ 17 | "click>=8.0", 18 | "PyYAML>=6.0", 19 | "openai>=1.0", 20 | "rich>=13.0", 21 | "detect-secrets>=1.5.0", 22 | "textual", 23 | "httpx>=0.28.1", 24 | "packaging>=25.0", 25 | "python-dotenv", 26 | "pyperclip>=1.9.0", 27 | ] 28 | description = "A Python implementation of the Codex CLI tool." 29 | name = "codexy" 30 | readme = "README.md" 31 | requires-python = ">=3.10" 32 | version = "0.0.10" 33 | 34 | [project.urls] 35 | "Bug Tracker" = "https://github.com/andersonby/codexy/issues" 36 | "Homepage" = "https://github.com/andersonby/codexy" 37 | 38 | [project.scripts] 39 | codexy = "codexy.cli.main:codexy" 40 | 41 | [tool.pdm] 42 | distribution = true 43 | 44 | [tool.pdm.build] 45 | excludes = ["tests"] 46 | 47 | [dependency-groups] 48 | dev = ["textual-dev", "pytest>=8.3.5", "pre-commit", "ruff"] 49 | 50 | [tool.ruff] 51 | line-length = 130 52 | target-version = "py310" 53 | 54 | [tool.ruff.lint] 55 | select = [ 56 | "E", # pycodestyle errors 57 | "W", # pycodestyle warnings 58 | "F", # pyflakes 59 | "I", # isort 60 | "B", # flake8-bugbear 61 | "C4", # flake8-comprehensions 62 | "UP", # pyupgrade 63 | ] 64 | 65 | ignore = [ 66 | "E501", # Line too long (controlled by line-length) 67 | ] 68 | 69 | [tool.ruff.format] 70 | quote-style = "double" 71 | indent-style = "space" 72 | line-ending = "auto" 73 | -------------------------------------------------------------------------------- /tests/utils/test_model_info.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | from io import StringIO 4 | 5 | from codexy.utils.model_info import DEFAULT_MAX_TOKENS, MODEL_MAX_TOKENS, get_model_max_tokens 6 | 7 | 8 | class TestGetModelMaxTokens(unittest.TestCase): 9 | def test_known_model_names(self): 10 | self.assertEqual(get_model_max_tokens("gpt-4"), MODEL_MAX_TOKENS["gpt-4"]) 11 | self.assertEqual(get_model_max_tokens("gpt-3.5-turbo-16k"), MODEL_MAX_TOKENS["gpt-3.5-turbo-16k"]) 12 | self.assertEqual(get_model_max_tokens("o4-mini"), MODEL_MAX_TOKENS["o4-mini"]) 13 | self.assertEqual(get_model_max_tokens("gpt-4-turbo"), MODEL_MAX_TOKENS["gpt-4-turbo"]) 14 | self.assertEqual(get_model_max_tokens("gpt-4-32k"), MODEL_MAX_TOKENS["gpt-4-32k"]) 15 | 16 | def test_model_name_with_known_key_prefix(self): 17 | # Test variants that should match a more general key due to prefix matching logic 18 | self.assertEqual(get_model_max_tokens("gpt-4-turbo-preview"), MODEL_MAX_TOKENS["gpt-4-turbo"]) 19 | self.assertEqual(get_model_max_tokens("gpt-4-0125-preview"), MODEL_MAX_TOKENS["gpt-4-turbo"]) 20 | self.assertEqual(get_model_max_tokens("gpt-4-1106-preview"), MODEL_MAX_TOKENS["gpt-4-turbo"]) 21 | self.assertEqual(get_model_max_tokens("custom-gpt-4-model"), MODEL_MAX_TOKENS["gpt-4"]) 22 | self.assertEqual(get_model_max_tokens("gpt-3.5-turbo-instruct"), 4096) 23 | self.assertEqual(get_model_max_tokens("my-o4-mini-variant"), MODEL_MAX_TOKENS["o4-mini"]) 24 | 25 | def test_unknown_model_name(self): 26 | original_stderr = sys.stderr 27 | sys.stderr = captured_stderr = StringIO() 28 | try: 29 | self.assertEqual(get_model_max_tokens("unknown-model-xyz"), DEFAULT_MAX_TOKENS) 30 | self.assertIn("Warning: Unknown model name 'unknown-model-xyz'", captured_stderr.getvalue()) 31 | finally: 32 | sys.stderr = original_stderr 33 | 34 | def test_order_of_checking(self): 35 | # Ensure that "gpt-4-turbo" is checked before "gpt-4" 36 | self.assertEqual(get_model_max_tokens("gpt-4-turbo-specific-variant"), MODEL_MAX_TOKENS["gpt-4-turbo"]) 37 | # Ensure "gpt-4-32k" is checked before "gpt-4" 38 | self.assertEqual(get_model_max_tokens("gpt-4-32k-specific-variant"), MODEL_MAX_TOKENS["gpt-4-32k"]) 39 | # Ensure "gpt-3.5-turbo-16k" is checked before "gpt-3.5-turbo" 40 | self.assertEqual(get_model_max_tokens("gpt-3.5-turbo-16k-variant"), MODEL_MAX_TOKENS["gpt-3.5-turbo-16k"]) 41 | 42 | 43 | if __name__ == "__main__": 44 | unittest.main() 45 | -------------------------------------------------------------------------------- /codexy/tui/widgets/chat/history_view.py: -------------------------------------------------------------------------------- 1 | from textual.containers import Container, VerticalScroll 2 | 3 | from .message_display import ( 4 | AssistantMessageDisplay, 5 | BaseMessageDisplay, 6 | SystemMessageDisplay, 7 | ToolCallDisplay, 8 | ToolOutputDisplay, 9 | UserMessageDisplay, 10 | ) 11 | 12 | 13 | class ChatHistoryView(VerticalScroll): 14 | """Display the scrollable area for chat message history.""" 15 | 16 | DEFAULT_CSS = """ 17 | ChatHistoryView { 18 | border: none; 19 | padding: 0 1; 20 | } 21 | ChatHistoryView > Container { 22 | /* Ensure containers take full width for alignment */ 23 | width: 100%; 24 | height: auto; 25 | /* Add some spacing between message containers */ 26 | margin-bottom: 1; 27 | } 28 | ChatHistoryView > .user-message-container { 29 | align-horizontal: right; /* Align user messages to the right */ 30 | } 31 | ChatHistoryView > .assistant-message-container, 32 | ChatHistoryView > .tool-call-container, 33 | ChatHistoryView > .tool-output-container, 34 | ChatHistoryView > .system-message-container { 35 | align-horizontal: left; /* Align others to the left */ 36 | } 37 | """ 38 | 39 | def add_message(self, message_widget: BaseMessageDisplay): 40 | """ 41 | Add a new message component to the history view. 42 | Now wraps the message component in a Container to control alignment. 43 | """ 44 | # Create a container to wrap the message component 45 | container = Container(message_widget) 46 | container.styles.height = "auto" # Ensure container height adapts 47 | 48 | # Add CSS class based on message type 49 | if isinstance(message_widget, UserMessageDisplay): 50 | container.add_class("user-message-container") 51 | elif isinstance(message_widget, AssistantMessageDisplay): 52 | container.add_class("assistant-message-container") 53 | elif isinstance(message_widget, ToolCallDisplay): 54 | container.add_class("tool-call-container") 55 | elif isinstance(message_widget, ToolOutputDisplay): 56 | container.add_class("tool-output-container") 57 | elif isinstance(message_widget, SystemMessageDisplay): 58 | container.add_class("system-message-container") 59 | else: 60 | container.add_class("other-message-container") 61 | 62 | # Mount the wrapped container, not the message component directly 63 | self.mount(container) 64 | # Scroll to the bottom, ensuring new messages are visible 65 | self.call_after_refresh(self.scroll_end, animate=True) 66 | 67 | def clear(self): 68 | """Clear all history messages.""" 69 | self.remove_children() 70 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # Pipfile.lock 90 | 91 | # poetry 92 | # Poetry pyproject.toml: https://python-poetry.org/docs/pyproject/ 93 | # It is generally recommended to include poetry.lock in version control. 94 | # poetry.lock 95 | 96 | # pdm 97 | # Similar to Pipfile.lock and poetry.lock, it is generally recommended to include pdm.lock in version control. 98 | # pdm.lock 99 | # pdm stores its cache in the .pdm directory, which should be ignored. 100 | .pdm/ 101 | 102 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 103 | __pypackages__/ 104 | 105 | # Celery stuff 106 | celerybeat-schedule 107 | celerybeat.pid 108 | 109 | # SageMath parsed files 110 | *.sage.py 111 | 112 | # Environments 113 | .env 114 | .venv 115 | env/ 116 | venv/ 117 | ENV/ 118 | env.bak/ 119 | venv.bak/ 120 | 121 | # Spyder project settings 122 | .spyderproject 123 | .spyproject 124 | 125 | # Rope project settings 126 | .ropeproject 127 | 128 | # mkdocs documentation 129 | /site 130 | 131 | # mypy 132 | .mypy_cache/ 133 | .dmypy.json 134 | dmypy.json 135 | 136 | # Pyre type checker 137 | .pyre/ 138 | 139 | # pytype static analysis results 140 | .pytype/ 141 | 142 | # Cython debug symbols 143 | cython_debug/ 144 | 145 | # Editor directories and files 146 | .vscode/ 147 | .idea/ 148 | .history/ 149 | *.swp 150 | *~ 151 | .project 152 | .pydevproject 153 | .settings/ 154 | 155 | # OS generated files 156 | .DS_Store 157 | Thumbs.db 158 | Icon? 159 | .Spotlight-V100 160 | 161 | # Logs 162 | *.log 163 | 164 | # Environment variables 165 | .env* 166 | !.env.example 167 | 168 | # Coverage data 169 | coverage/ 170 | .nyc_output/ # Keep if using JS tools alongside Python? Unlikely for now. 171 | 172 | # Other Caches 173 | .cache/ 174 | .pytest_cache/ 175 | 176 | .pdm-python 177 | pdm.lock 178 | -------------------------------------------------------------------------------- /codexy/tui/widgets/overlays/history_overlay.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from typing import cast 3 | 4 | from rich.text import Text 5 | from textual import events 6 | from textual.app import ComposeResult 7 | from textual.message import Message 8 | from textual.widgets import Label, ListItem, ListView, Static 9 | 10 | from ....utils.storage import HistoryEntry 11 | 12 | 13 | class HistoryOverlay(Static): 14 | """A floating layer for displaying and selecting command history.""" 15 | 16 | DEFAULT_CSS = """ 17 | HistoryOverlay ListView { 18 | border: none; 19 | background: $panel-darken-1; 20 | } 21 | HistoryOverlay Label { 22 | padding: 0 1; 23 | color: $text-muted; 24 | height: 1; 25 | } 26 | HistoryOverlay ListItem { 27 | padding: 0 1; 28 | height: 1; 29 | } 30 | HistoryOverlay ListItem > Static { 31 | height: 1; 32 | } 33 | HistoryOverlay ListItem :hover { 34 | background: $accent-darken-1; 35 | } 36 | HistoryOverlay ListItem.--highlight { 37 | background: $accent !important; 38 | color: $text !important; 39 | } 40 | HistoryOverlay ListItem.--highlight:focus { 41 | background: $accent-darken-1 !important; 42 | } 43 | """ 44 | 45 | # --- Messages --- 46 | class SelectHistory(Message): 47 | """Sent when a user selects a history entry.""" 48 | 49 | def __init__(self, command: str): 50 | self.command = command 51 | super().__init__() 52 | 53 | class ExitHistory(Message): 54 | """Sent when a user exits history view (e.g. by pressing ESC).""" 55 | 56 | pass 57 | 58 | # --- UI Composition & Updates --- 59 | def compose(self) -> ComposeResult: 60 | yield Label("Command History (↑/↓ Select, Enter Use, Esc Close)") 61 | yield ListView(id="history-list") 62 | 63 | def set_history(self, history_entries: list[HistoryEntry]): 64 | """Fill the list with history entries.""" 65 | list_view = self.query_one("#history-list", ListView) 66 | list_view.clear() # Clear old entries 67 | # Iterate in reverse to show latest at the top 68 | for entry in reversed(history_entries): 69 | # Format timestamp 70 | dt = datetime.fromtimestamp(entry["timestamp"]) 71 | time_str = dt.strftime("%Y-%m-%d %H:%M:%S") 72 | # Create display text 73 | display_text = Text.assemble((f"{time_str} ", "dim"), (entry["command"], "")) 74 | # Create ListItem, storing original command as its value 75 | # Note: ListItem itself doesn't have a value property, we might need to subclass 76 | # Or use another way to store the original command. A simple method is to use ID. 77 | # Alternatively, when selected, extract from Label. 78 | # For simplicity, we will extract in on_list_view_selected. 79 | list_view.append(ListItem(Static(display_text))) # Use Static to display Rich Text 80 | # If list is not empty, highlight first (latest) 81 | if len(list_view): 82 | list_view.index = 0 83 | 84 | # --- Event Handlers --- 85 | def on_list_view_selected(self, event: ListView.Selected) -> None: 86 | """Handle list item selection event.""" 87 | event.stop() 88 | selected_item = event.item 89 | if selected_item: 90 | # Extract original command text from Static component 91 | static_widget = selected_item.query_one(Static) 92 | rich_text = cast(Text, static_widget.renderable) # Assuming it's Text 93 | # Extract command part (assuming timestamp followed by command) 94 | command_text = rich_text.plain.split(" ", 2)[-1] # Simple split logic 95 | self.post_message(self.SelectHistory(command_text)) 96 | 97 | # Allow closing via ESC 98 | def on_key(self, event: events.Key) -> None: 99 | if event.key == "escape": 100 | event.stop() 101 | self.post_message(self.ExitHistory()) 102 | -------------------------------------------------------------------------------- /codexy/tui/widgets/overlays/help_overlay.py: -------------------------------------------------------------------------------- 1 | from rich.text import Text 2 | from textual import events 3 | from textual.app import ComposeResult 4 | from textual.containers import VerticalScroll 5 | from textual.message import Message 6 | from textual.widgets import Label, Static 7 | 8 | 9 | class HelpOverlay(Static): 10 | """An overlay that displays help information about commands and shortcuts.""" 11 | 12 | DEFAULT_CSS = """ 13 | HelpOverlay { 14 | layer: help_layer; 15 | display: none; 16 | align: center middle; 17 | width: 80%; 18 | max-width: 80; 19 | height: 80%; 20 | max-height: 25; 21 | border: thick $accent; 22 | background: $panel; 23 | padding: 1 2; 24 | overflow-y: auto; 25 | } 26 | HelpOverlay.-active { 27 | display: block; 28 | } 29 | HelpOverlay #help-title { 30 | width: 100%; 31 | text-align: center; 32 | margin-bottom: 1; 33 | text-style: bold; 34 | } 35 | HelpOverlay .help-section-title { 36 | margin-top: 1; 37 | text-style: bold underline; 38 | } 39 | HelpOverlay Static.help-command .command { 40 | color: $secondary; 41 | text-style: bold; 42 | width: 15; 43 | } 44 | HelpOverlay Static.help-command .description { 45 | width: 1fr; 46 | } 47 | HelpOverlay Static.help-key .key { 48 | color: $accent; 49 | text-style: bold; 50 | width: 10; 51 | } 52 | HelpOverlay Static.help-key .description { 53 | width: 1fr; 54 | } 55 | HelpOverlay Static.help-line { 56 | height: 1; 57 | width: 100%; 58 | margin-bottom: 1; 59 | } 60 | HelpOverlay #help-footer { 61 | margin-top: 1; 62 | width: 100%; 63 | text-align: center; 64 | color: $text-muted; 65 | } 66 | """ 67 | 68 | COMMANDS: list[tuple[str, str]] = [ 69 | ("/help", "Show this help overlay"), 70 | ("/model", "Switch the LLM model in-session"), 71 | ("/approval", "Switch auto-approval mode"), 72 | ("/history", "Show command & file history for this session"), 73 | ("/clear", "Clear screen & context"), 74 | ("/clearhistory", "Clear command history from disk"), 75 | ("/bug", "File a bug report with session log"), 76 | ("/compact", "Condense context into a summary (not implemented)"), 77 | ("q | exit | :q", "Exit codexy"), 78 | ] 79 | 80 | KEYBINDINGS: list[tuple[str, str]] = [ 81 | ("Ctrl+J/Ctrl+Enter", "Submit message / Approve command"), 82 | ("Up/Down", "Navigate history / options"), 83 | ("ESC", "Cancel input / Deny command / Close overlay"), 84 | ("Ctrl+Q", "Quit Application"), 85 | ("F1", "Show this help overlay"), 86 | ("F2", "Change Model (not implemented)"), 87 | ("F3", "Change Approval Mode (not implemented)"), 88 | ("F4", "Show Command History"), 89 | # ("Ctrl+X", "Open External Editor (not implemented)"), 90 | ] 91 | 92 | class ExitHelp(Message): 93 | """Message to signal exiting the help overlay.""" 94 | 95 | pass 96 | 97 | def compose(self) -> ComposeResult: 98 | yield Label("Available Commands & Shortcuts", id="help-title") 99 | with VerticalScroll(): 100 | yield Label("Slash Commands", classes="help-section-title") 101 | 102 | for command, description in self.COMMANDS: 103 | line_text = Text.assemble( 104 | (f"{command:<15}", "bold"), 105 | f" - {description}", 106 | ) 107 | yield Static(line_text, classes="help-line help-command") 108 | 109 | yield Label("Keyboard Shortcuts", classes="help-section-title") 110 | 111 | for key, description in self.KEYBINDINGS: 112 | line_text = Text.assemble( 113 | (f"{key:<10}", "bold"), 114 | f" - {description}", 115 | ) 116 | yield Static(line_text, classes="help-line help-key") 117 | 118 | yield Label("Press ESC to close", id="help-footer") 119 | 120 | def on_key(self, event: events.Key) -> None: 121 | """Handle key press to close the overlay.""" 122 | if event.key == "escape": 123 | event.stop() 124 | # Post message to the App to handle closing 125 | self.post_message(self.ExitHelp()) 126 | -------------------------------------------------------------------------------- /codexy/cli/completion_scripts.py: -------------------------------------------------------------------------------- 1 | _COMPLETION_SCRIPTS = { 2 | "bash": """ 3 | _codexy_completion() { 4 | local cur prev words cword 5 | _get_comp_words_by_ref -n : cur prev words cword 6 | 7 | # Basic file/directory completion for options that take paths 8 | if [[ "$prev" == "--image" || "$prev" == "-i" || "$prev" == "--view" || "$prev" == "-v" || "$prev" == "--writable-root" || "$prev" == "-w" || "$prev" == "--project-doc" ]]; then 9 | _filedir 10 | return 0 11 | fi 12 | 13 | # Completion for the approval-mode option 14 | if [[ "$prev" == "--approval-mode" || "$prev" == "-a" ]]; then 15 | COMPREPLY=( $(compgen -W "suggest auto-edit full-auto dangerous-auto" -- "$cur") ) 16 | return 0 17 | fi 18 | 19 | # Completion for the model option (can add common models here if desired) 20 | if [[ "$prev" == "--model" || "$prev" == "-m" ]]; then 21 | COMPREPLY=( $(compgen -W "o4-mini o3 gpt-4.1 gpt-4o" -- "$cur") ) 22 | return 0 23 | fi 24 | 25 | # General argument completion (e.g., main prompt) or option names 26 | if [[ "$cur" == -* ]]; then 27 | COMPREPLY=( $(compgen -W "-h --help --version --model -m --image -i --view -v --quiet -q --config -c --writable-root -w --approval-mode -a --auto-edit --full-auto --no-project-doc --project-doc --full-stdout --notify --dangerously-auto-approve-everything --full-context -f" -- "$cur") ) 28 | else 29 | # Default to file/directory completion for arguments if not an option 30 | _filedir 31 | fi 32 | 33 | return 0 34 | } 35 | complete -F _codexy_completion codexy 36 | """, 37 | "zsh": """ 38 | #compdef codexy 39 | 40 | _codexy() { 41 | local -a options 42 | options=( 43 | '(-h --help)'{-h,--help}'[Show help message]' 44 | '--version[Show version information]' 45 | '(-m --model)'{-m,--model=}'[Model to use]: :(o4-mini o3 gpt-4.1 gpt-4o)' 46 | '(-i --image)'{-i,--image=}'[Path to image file]:_files' 47 | '(-v --view)'{-v,--view=}'[Path to rollout file]:_files' 48 | '(-q --quiet)'{-q,--quiet}'[Non-interactive mode]' 49 | '(-c --config)'{-c,--config}'[Open instructions file]' 50 | '(-w --writable-root)'{-w,--writable-root=}'[Writable root for full-auto]:_files -/' 51 | '(-a --approval-mode)'{-a,--approval-mode=}'[Approval policy]: :(suggest auto-edit full-auto dangerous-auto)' 52 | '--auto-edit[Auto-approve file edits]' 53 | '--full-auto[Auto-approve edits and sandboxed commands]' 54 | '--no-project-doc[Do not include codex.md]' 55 | '--project-doc=[Path to project doc]:_files' 56 | '--full-stdout[Do not truncate stdout/stderr]' 57 | '--notify[Enable desktop notifications]' 58 | '--dangerously-auto-approve-everything[Auto-approve everything unsandboxed (DANGEROUS)]' 59 | '(-f --full-context)'{-f,--full-context}'[Full-context mode]' 60 | '*:prompt:_files' 61 | ) 62 | _arguments $options 63 | } 64 | _codexy 65 | """, 66 | "fish": """ 67 | # fish completion for codexy 68 | complete -c codexy -f -a "completion" -d "Generate shell completion script" 69 | 70 | # Options for main command 71 | complete -c codexy -s h -l help -d 'Show help message' 72 | complete -c codexy -l version -d 'Show version information' 73 | complete -c codexy -s m -l model -d 'Model to use' -xa "o4-mini o3 gpt-4.1 gpt-4o" 74 | complete -c codexy -s i -l image -d 'Path to image file' -r -F 75 | complete -c codexy -s v -l view -d 'Path to rollout file' -r -F 76 | complete -c codexy -s q -l quiet -d 'Non-interactive mode' 77 | complete -c codexy -s c -l config -d 'Open instructions file' 78 | complete -c codexy -s w -l writable-root -d 'Writable root for full-auto' -r -F 79 | complete -c codexy -s a -l approval-mode -d 'Approval policy' -xa "suggest auto-edit full-auto dangerous-auto" 80 | complete -c codexy -l auto-edit -d 'Auto-approve file edits' 81 | complete -c codexy -l full-auto -d 'Auto-approve edits and sandboxed commands' 82 | complete -c codexy -l no-project-doc -d 'Do not include codex.md' 83 | complete -c codexy -l project-doc -d 'Path to project doc' -r -F 84 | complete -c codexy -l full-stdout -d 'Do not truncate stdout/stderr' 85 | complete -c codexy -l notify -d 'Enable desktop notifications' 86 | complete -c codexy -l dangerously-auto-approve-everything -d 'Auto-approve everything unsandboxed (DANGEROUS)' 87 | complete -c codexy -l full-context -d 'Full-context mode' 88 | 89 | # Options for 'completion' command 90 | complete -c codexy -n "__fish_seen_subcommand_from completion" -f -a "bash zsh fish" -d "Shell type" 91 | 92 | # Default argument completion (likely file paths or prompt text) 93 | complete -c codexy -f -a "(__fish_complete_path)" 94 | """, 95 | } 96 | -------------------------------------------------------------------------------- /codexy/tui/widgets/chat/header.py: -------------------------------------------------------------------------------- 1 | from typing import cast 2 | 3 | from textual.app import ComposeResult 4 | from textual.containers import Horizontal 5 | from textual.reactive import reactive 6 | from textual.widgets import Label, Static 7 | 8 | from ....config import AppConfig 9 | from ....utils.filesystem import short_cwd 10 | 11 | 12 | class ChatHeader(Static): 13 | """Display the title bar for chat session information.""" 14 | 15 | DEFAULT_CSS = """ 16 | ChatHeader { 17 | dock: top; 18 | width: 100%; 19 | background: $accent-darken-2; /* Use theme color */ 20 | color: $text; 21 | height: auto; 22 | padding: 0 1; 23 | border-bottom: thick $accent; /* Add bottom border */ 24 | } 25 | ChatHeader Horizontal { 26 | width: 1fr; 27 | height: 1; /* Force height to 1 for single line */ 28 | align: left middle; 29 | overflow: hidden; /* Hide overflow if content is too long */ 30 | } 31 | ChatHeader Label { 32 | margin-right: 2; 33 | height: 1; 34 | text-style: bold; 35 | content-align: left middle; 36 | overflow: hidden; /* Prevent label content itself from wrapping */ 37 | text-overflow: ellipsis; /* Add ellipsis if label content is too long */ 38 | } 39 | ChatHeader .info { 40 | color: $text-muted; 41 | text-style: none; 42 | width: auto; /* Let info labels take their needed width */ 43 | } 44 | ChatHeader .value { 45 | color: $text; 46 | text-style: bold; 47 | width: auto; /* Let value labels take their needed width */ 48 | max-width: 25%; /* Limit max width of value labels */ 49 | } 50 | ChatHeader #session-label { 51 | /* Allow session ID to take more space if needed, but still limit */ 52 | max-width: 35%; 53 | width: 1fr; /* Allow it to shrink if needed */ 54 | text-align: right; /* Align session ID to the right */ 55 | } 56 | """ 57 | 58 | # Keep its own reactives to display the data 59 | session_id: reactive[str] = reactive("N/A") 60 | cwd: reactive[str] = reactive("N/A") 61 | model: reactive[str] = reactive("N/A") 62 | approval_mode: reactive[str] = reactive("N/A") 63 | 64 | # Store config for reference if needed, but maybe not necessary 65 | _app_config: AppConfig | None = None 66 | 67 | def compose(self) -> ComposeResult: 68 | with Horizontal(): 69 | yield Label("Dir:", classes="info") 70 | yield Label(self.cwd, classes="value") 71 | yield Label("Model:", classes="info") 72 | yield Label(self.model, classes="value") 73 | yield Label("Approval:", classes="info") 74 | yield Label(self.approval_mode, classes="value", id="approval-label") 75 | # Use remaining space for session ID, aligned right 76 | yield Label("Session:", classes="info", shrink=True) # Allow info label to shrink 77 | yield Label(self.session_id, classes="value", id="session-label") 78 | 79 | def update_info(self, config: AppConfig, session_id: str | None = None): 80 | """Update Header display info (called once on mount usually).""" 81 | self._app_config = config 82 | self.session_id = session_id or "N/A" 83 | self.cwd = short_cwd() 84 | self.model = config.get("model", "N/A") 85 | self.approval_mode = config.get("effective_approval_mode", "N/A") 86 | 87 | # Watchers update the specific Label widgets 88 | def watch_cwd(self, new_cwd: str) -> None: 89 | try: 90 | label = cast(Label, self.query("Label").filter(".value").first()) 91 | label.update(new_cwd) 92 | except Exception: 93 | pass 94 | 95 | def watch_model(self, new_model: str) -> None: 96 | try: 97 | model_label = cast(Label, self.query("Label").filter(".value")[1]) # Assume Model is the second value 98 | model_label.update(new_model) 99 | except Exception: 100 | pass 101 | 102 | def watch_approval_mode(self, new_mode: str) -> None: 103 | try: 104 | approval_label = cast(Label, self.query("Label").filter(".value")[2]) # Assume Approval is the third value 105 | approval_label.update(new_mode) 106 | except Exception: 107 | pass 108 | 109 | def watch_session_id(self, new_id: str) -> None: 110 | try: 111 | session_label = cast(Label, self.query_one("#session-label", Label)) 112 | session_label.update(new_id) 113 | except Exception: 114 | pass 115 | -------------------------------------------------------------------------------- /codexy/utils/model_utils.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | 4 | from openai import APIError, AsyncOpenAI 5 | 6 | # --- Constants --- 7 | # Define recommended models (adjust as needed) 8 | RECOMMENDED_MODELS: list[str] = ["o4-mini", "o3", "gpt-4o", "gpt-4.1"] 9 | MODEL_LIST_TIMEOUT_SECONDS = 5.0 # Timeout for fetching model list 10 | 11 | # --- Caching --- 12 | # Simple in-memory cache for the model list 13 | _cached_models: list[str] | None = None 14 | _cache_lock = asyncio.Lock() 15 | _is_fetching = False 16 | 17 | # --- Functions --- 18 | 19 | 20 | async def _fetch_models_from_api(client: AsyncOpenAI) -> list[str]: 21 | """Fetches the list of models from the OpenAI API.""" 22 | global _is_fetching 23 | if _is_fetching: 24 | # Avoid concurrent fetches if one is already in progress 25 | print("Model fetch already in progress, waiting...", file=sys.stderr) 26 | while _is_fetching: 27 | await asyncio.sleep(0.1) 28 | return _cached_models or [] # Return potentially updated cache 29 | 30 | _is_fetching = True 31 | try: 32 | print("Fetching available models from OpenAI API...", file=sys.stderr) 33 | models_response = await client.models.list() 34 | # Extract model IDs, filter out older models if desired, and sort 35 | # Example filtering: models starting with 'gpt-', 'ft:', 'o3', 'o4' 36 | models = sorted( 37 | m.id 38 | for m in models_response.data 39 | if m.id and (m.id.startswith("gpt-") or m.id.startswith("ft:") or m.id.startswith("o3") or m.id.startswith("o4")) 40 | ) 41 | print(f"Fetched {len(models)} models.", file=sys.stderr) 42 | return models 43 | except APIError as e: 44 | print(f"Warning: API Error fetching models: {e.code} - {e.message}", file=sys.stderr) 45 | return [] # Return empty on API error 46 | except Exception as e: 47 | print(f"Warning: Unexpected error fetching models: {e}", file=sys.stderr) 48 | return [] # Return empty on other errors 49 | finally: 50 | _is_fetching = False 51 | 52 | 53 | async def get_available_models(client: AsyncOpenAI, force_refresh: bool = False) -> list[str]: 54 | """ 55 | Gets the list of available models, using cache if available and not forced. 56 | Adds recommended models even if the API call fails. 57 | """ 58 | global _cached_models 59 | async with _cache_lock: 60 | if _cached_models is None or force_refresh: 61 | fetched_models = await _fetch_models_from_api(client) 62 | # Combine fetched models with recommended models, ensuring uniqueness and sorting 63 | combined_models = set(fetched_models) | set(RECOMMENDED_MODELS) 64 | _cached_models = sorted(combined_models) 65 | if not fetched_models: 66 | print("Warning: Using only recommended models due to fetch failure.", file=sys.stderr) 67 | 68 | return _cached_models if _cached_models is not None else list(RECOMMENDED_MODELS) # Fallback 69 | 70 | 71 | async def preload_models(client: AsyncOpenAI): 72 | """Initiates the model fetching process in the background.""" 73 | async with _cache_lock: 74 | if _cached_models is None and not _is_fetching: 75 | asyncio.create_task(_fetch_models_from_api(client)) 76 | 77 | 78 | async def is_model_supported(model_id: str, client: AsyncOpenAI) -> bool: 79 | """Checks if a given model ID is likely supported.""" 80 | if not model_id: 81 | return False 82 | # Assume recommended models are always supported initially 83 | if model_id in RECOMMENDED_MODELS: 84 | return True 85 | try: 86 | available = await get_available_models(client) 87 | return model_id in available 88 | except Exception: 89 | # If check fails, conservatively assume it might be supported 90 | return True 91 | 92 | 93 | def sort_models_for_display(models: list[str], current_model: str) -> list[str]: 94 | """Sorts models, putting recommended and current at the top.""" 95 | recommended_set = set(RECOMMENDED_MODELS) 96 | current_list = [m for m in models if m == current_model] 97 | recommended_list = sorted([m for m in models if m in recommended_set and m != current_model]) 98 | other_list = sorted([m for m in models if m not in recommended_set and m != current_model]) 99 | return current_list + recommended_list + other_list 100 | 101 | 102 | def format_model_for_display(model_id: str, current_model: str) -> str: 103 | """Formats the model ID for display, adding markers.""" 104 | prefix = "" 105 | if model_id == current_model: 106 | prefix += "✓ " # Checkmark for current 107 | if model_id in RECOMMENDED_MODELS: 108 | prefix += "⭐ " # Star for recommended 109 | return f"{prefix}{model_id}" 110 | -------------------------------------------------------------------------------- /codexy/utils/token_utils.py: -------------------------------------------------------------------------------- 1 | """Utilities for estimating token usage.""" 2 | 3 | import json 4 | import math 5 | from collections.abc import Sequence 6 | 7 | from openai.types.chat import ChatCompletionContentPartParam, ChatCompletionMessageParam, ChatCompletionMessageToolCall 8 | 9 | # Simple approximation: 4 characters per token on average 10 | CHARS_PER_TOKEN_ESTIMATE = 4 11 | 12 | 13 | def _count_chars_in_content(content: str | Sequence[ChatCompletionContentPartParam] | None) -> int: 14 | """Counts characters in message content, handling different formats.""" 15 | if content is None: 16 | return 0 17 | if isinstance(content, str): 18 | return len(content) 19 | if isinstance(content, list): 20 | count = 0 21 | for part in content: 22 | if isinstance(part, dict): 23 | part_type = part.get("type") 24 | if part_type == "text" or part_type == "input_text" or part_type == "output_text": 25 | text_part = part.get("text") 26 | if isinstance(text_part, str): 27 | count += len(text_part) 28 | elif part_type == "input_file": # As in codex-cli 29 | filename_part = part.get("filename") 30 | if isinstance(filename_part, str): 31 | count += len(filename_part) 32 | # Ignore image URLs for token count approximation 33 | # elif part_type == "image_url": 34 | # pass 35 | # Handle refusal type if present in history items (like in TS version) 36 | elif part_type == "refusal": 37 | refusal_part = part.get("refusal") 38 | if isinstance(refusal_part, str): 39 | count += len(refusal_part) 40 | return count 41 | return 0 42 | 43 | 44 | def _count_chars_in_tool_calls(tool_calls: list[ChatCompletionMessageToolCall] | None) -> int: 45 | """Counts characters in tool call names and arguments.""" 46 | count = 0 47 | if tool_calls and isinstance(tool_calls, list): 48 | for tool_call in tool_calls: 49 | if isinstance(tool_call, dict): 50 | function_data = tool_call.get("function") 51 | if isinstance(function_data, dict): 52 | count += len(function_data.get("name", "")) 53 | # Arguments might be stored differently, handle safely 54 | args = function_data.get("arguments", "") 55 | if isinstance(args, str): 56 | count += len(args) 57 | elif isinstance(args, dict): # Handle if arguments are dict 58 | try: 59 | count += len(json.dumps(args)) 60 | except TypeError: 61 | count += len(str(args)) # Fallback 62 | return count 63 | 64 | 65 | def approximate_tokens_used(history: list[ChatCompletionMessageParam]) -> int: 66 | """ 67 | Roughly estimates the number of tokens used by the message history. 68 | Excludes system messages from the count, includes tool calls and outputs. 69 | """ 70 | char_count = 0 71 | for message in history: 72 | # Ensure message is a dictionary before proceeding 73 | if not isinstance(message, dict): 74 | continue 75 | 76 | role = message.get("role") 77 | 78 | # Only count user and assistant messages for context usage approximation 79 | if role == "user" or role == "assistant": 80 | message_content = message.get("content", "") 81 | if isinstance(message_content, str): 82 | char_count += _count_chars_in_content(message_content) 83 | elif isinstance(message_content, list): 84 | for part in message_content: 85 | if isinstance(part, dict): 86 | part_type = part.get("type") 87 | if part_type == "text": 88 | char_count += _count_chars_in_content(part.get("text", "")) 89 | # Add handling for other part types if needed 90 | # Add contribution from tool calls if present 91 | tool_calls = message.get("tool_calls") 92 | if tool_calls and isinstance(tool_calls, list): 93 | for tool_call in tool_calls: 94 | if isinstance(tool_call, dict): 95 | function_data = tool_call.get("function") 96 | if isinstance(function_data, dict): 97 | char_count += len(function_data.get("name", "")) 98 | char_count += len(function_data.get("arguments", "")) 99 | elif role == "tool": 100 | # Also count tool responses (content field) 101 | tool_content = message.get("content") 102 | if isinstance(tool_content, str): 103 | char_count += len(tool_content) 104 | elif isinstance(tool_content, list): 105 | for part in tool_content: 106 | if isinstance(part, dict): 107 | part_type = part.get("type") 108 | if part_type == "text": 109 | char_count += _count_chars_in_content(part.get("text", "")) 110 | 111 | # Estimate tokens based on character count 112 | return math.ceil(char_count / CHARS_PER_TOKEN_ESTIMATE) 113 | -------------------------------------------------------------------------------- /codexy/utils/storage.py: -------------------------------------------------------------------------------- 1 | """Utilities for persistent storage like command history.""" 2 | 3 | import json 4 | import sys 5 | import time 6 | from typing import TypedDict 7 | 8 | from ..config import CONFIG_DIR 9 | from ..utils.security_check import SecurityChecker 10 | 11 | security_checker = SecurityChecker() 12 | 13 | 14 | # Assuming config types might be shared or defined elsewhere, 15 | # but defining locally for clarity if not. 16 | # If AppConfig/HistoryConfig are defined in config.py, import them instead. 17 | class HistoryConfig(TypedDict, total=False): 18 | max_size: int 19 | save_history: bool 20 | 21 | 22 | class HistoryEntry(TypedDict): 23 | command: str 24 | timestamp: float 25 | 26 | 27 | # Default history config 28 | DEFAULT_HISTORY_CONFIG: HistoryConfig = { 29 | "max_size": 1000, 30 | "save_history": True, 31 | } 32 | 33 | 34 | HISTORY_FILE = CONFIG_DIR / "history.json" 35 | 36 | 37 | def is_sensitive_command(command: str) -> bool: 38 | """Checks if a command contains potential secrets using detect-secrets.""" 39 | messages = security_checker.check_line(command) 40 | if messages: 41 | # Log sensitivity check failure to stderr for debugging/awareness 42 | print( 43 | f"[History] Command '{command[:20]}...' potentially sensitive, skipping save.", 44 | file=sys.stderr, 45 | ) 46 | return True 47 | return False 48 | 49 | 50 | # --- Command History Functions --- 51 | 52 | 53 | def load_command_history() -> list[HistoryEntry]: 54 | """Loads command history from the history file.""" 55 | if not HISTORY_FILE.exists(): 56 | return [] 57 | try: 58 | with open(HISTORY_FILE, encoding="utf-8") as f: 59 | history_data = json.load(f) 60 | # Basic validation: check if it's a list 61 | if isinstance(history_data, list): 62 | # Further validation could be added here to check structure of entries 63 | # For now, assume the structure is correct if it's a list 64 | return history_data 65 | else: 66 | print(f"Warning: History file {HISTORY_FILE} does not contain a valid list. Starting fresh.", file=sys.stderr) 67 | return [] 68 | except (OSError, json.JSONDecodeError) as e: 69 | # Use stderr for warnings/errors that shouldn't pollute normal output 70 | print(f"Warning: Failed to load command history from {HISTORY_FILE}. Starting fresh. Error: {e}", file=sys.stderr) 71 | return [] 72 | except Exception as e: # Catch unexpected errors 73 | print( 74 | f"Warning: An unexpected error occurred loading history {HISTORY_FILE}. Starting fresh. Error: {e}", 75 | file=sys.stderr, 76 | ) 77 | return [] 78 | 79 | 80 | def save_command_history(history: list[HistoryEntry], config: HistoryConfig | None = None): 81 | """Saves command history to the history file.""" 82 | cfg_to_use = config if config else DEFAULT_HISTORY_CONFIG 83 | max_size = cfg_to_use.get("max_size", DEFAULT_HISTORY_CONFIG.get("max_size", 1000)) 84 | 85 | try: 86 | HISTORY_FILE.parent.mkdir(parents=True, exist_ok=True) 87 | trimmed_history = history[-max_size:] 88 | with open(HISTORY_FILE, "w", encoding="utf-8") as f: 89 | json.dump(trimmed_history, f, indent=2, ensure_ascii=False) 90 | except OSError as e: 91 | print(f"Error: Failed to save command history to {HISTORY_FILE}: {e}", file=sys.stderr) 92 | except Exception as e: 93 | print(f"Error: An unexpected error occurred saving history to {HISTORY_FILE}: {e}", file=sys.stderr) 94 | 95 | 96 | def add_to_history( 97 | command: str, 98 | history: list[HistoryEntry], 99 | config: HistoryConfig | None = None, 100 | ) -> list[HistoryEntry]: 101 | """ 102 | Adds a command to the history list if configured to save, it's not sensitive (using detect-secrets), 103 | and it's not an immediate duplicate. Saves the updated history to disk. 104 | 105 | Returns: 106 | The potentially updated history list. 107 | """ 108 | cfg_to_use = config if config else DEFAULT_HISTORY_CONFIG 109 | should_save = cfg_to_use.get("save_history", DEFAULT_HISTORY_CONFIG.get("save_history", True)) 110 | 111 | if not should_save: 112 | return history 113 | 114 | trimmed_command = command.strip() 115 | if not trimmed_command: 116 | return history 117 | 118 | # Check for sensitivity using detect-secrets 119 | if is_sensitive_command(trimmed_command): 120 | return history # Don't save sensitive commands 121 | 122 | # Check for immediate duplicate 123 | if history and history[-1]["command"] == trimmed_command: 124 | return history 125 | 126 | new_entry: HistoryEntry = { 127 | "command": trimmed_command, 128 | "timestamp": time.time(), 129 | } 130 | 131 | new_history = history + [new_entry] 132 | save_command_history(new_history, cfg_to_use) # Save handles trimming 133 | 134 | max_size = cfg_to_use.get("max_size", DEFAULT_HISTORY_CONFIG.get("max_size", 1000)) 135 | return new_history[-max_size:] 136 | 137 | 138 | def clear_command_history(): 139 | """Clears the command history by overwriting the file with an empty list.""" 140 | try: 141 | HISTORY_FILE.parent.mkdir(parents=True, exist_ok=True) 142 | with open(HISTORY_FILE, "w", encoding="utf-8") as f: 143 | json.dump([], f) 144 | # Removed print statement - feedback should be in TUI 145 | # print(f"Command history cleared ({HISTORY_FILE})") 146 | except OSError as e: 147 | print(f"Error: Failed to clear command history file {HISTORY_FILE}: {e}", file=sys.stderr) 148 | # Re-raise or handle more gracefully depending on requirements 149 | raise # Or return False/status code 150 | except Exception as e: 151 | print(f"Error: An unexpected error occurred clearing history {HISTORY_FILE}: {e}", file=sys.stderr) 152 | raise # Or return False/status code 153 | -------------------------------------------------------------------------------- /codexy/tui/widgets/overlays/model_overlay.py: -------------------------------------------------------------------------------- 1 | from rich.text import Text 2 | from textual import events 3 | from textual.app import ComposeResult 4 | from textual.containers import VerticalScroll 5 | from textual.message import Message 6 | from textual.reactive import reactive 7 | from textual.widgets import Label, ListItem, ListView, Static 8 | 9 | from ....utils.model_utils import format_model_for_display, sort_models_for_display 10 | 11 | 12 | # Type for list items carrying the model ID 13 | class ModelItem(ListItem): 14 | def __init__(self, model_id: str, display_text: Text): 15 | super().__init__(Label(display_text)) 16 | self.model_id = model_id 17 | 18 | 19 | class ModelOverlay(Static): 20 | """An overlay for selecting an OpenAI model.""" 21 | 22 | DEFAULT_CSS = """ 23 | ModelOverlay { 24 | layer: model_overlay_layer; 25 | display: none; 26 | align: center middle; 27 | width: 80%; 28 | max-width: 60; 29 | height: 80%; 30 | max-height: 25; 31 | border: thick $accent; 32 | background: $panel; 33 | padding: 1; 34 | } 35 | ModelOverlay.-active { 36 | display: block; 37 | } 38 | ModelOverlay #model-overlay-title { 39 | width: 100%; 40 | text-align: center; 41 | margin-bottom: 1; 42 | text-style: bold; 43 | } 44 | ModelOverlay #model-list-view { 45 | border: none; 46 | background: $panel-darken-1; 47 | } 48 | ModelOverlay #model-overlay-footer { 49 | margin-top: 1; 50 | width: 100%; 51 | text-align: center; 52 | color: $text-muted; 53 | } 54 | ModelOverlay #model-overlay-error { 55 | margin-top: 1; 56 | text-align: center; 57 | color: $error; 58 | } 59 | ModelOverlay ListItem { 60 | padding: 0 1; 61 | height: 1; 62 | } 63 | ModelOverlay ListItem Label { 64 | height: 1; 65 | } 66 | ModelOverlay ListItem :hover { 67 | background: $accent-darken-1; 68 | } 69 | ModelOverlay ListItem.--highlight { 70 | background: $accent !important; 71 | color: $text !important; 72 | } 73 | ModelOverlay ListItem.--highlight:focus { 74 | background: $accent-darken-1 !important; 75 | } 76 | """ 77 | 78 | # --- Reactives --- 79 | available_models: reactive[list[str]] = reactive(list) 80 | current_model: reactive[str] = reactive("") 81 | can_switch: reactive[bool] = reactive(True) # Controls if switching is allowed 82 | 83 | # --- Messages --- 84 | class Selected(Message): 85 | """Sent when a model is selected.""" 86 | 87 | def __init__(self, model_id: str): 88 | self.model_id = model_id 89 | super().__init__() 90 | 91 | class Exit(Message): 92 | """Sent when the overlay is exited without selection.""" 93 | 94 | pass 95 | 96 | def compose(self) -> ComposeResult: 97 | yield Label("Switch Model", id="model-overlay-title") 98 | yield Label("Cannot switch model after conversation starts.", id="model-overlay-error", classes="-hidden") 99 | with VerticalScroll(): 100 | yield ListView(id="model-list-view") 101 | yield Label("↑/↓ Select, Enter Confirm, Esc Cancel", id="model-overlay-footer") 102 | 103 | def on_mount(self) -> None: 104 | """Focus the list view when mounted.""" 105 | self.call_later(self.focus_list) 106 | 107 | def focus_list(self) -> None: 108 | """Safely focus the ListView.""" 109 | try: 110 | list_view = self.query_one(ListView) 111 | if list_view.is_mounted: 112 | list_view.focus() 113 | except Exception as e: 114 | self.log.error(f"Error focusing model list: {e}") 115 | 116 | def watch_can_switch(self, can_switch: bool) -> None: 117 | """Update UI based on whether switching is allowed.""" 118 | self.query_one("#model-list-view").display = can_switch 119 | self.query_one("#model-overlay-error").set_class(not can_switch, "-active") 120 | self.query_one("#model-overlay-error").display = not can_switch # Ensure it's visible 121 | 122 | def watch_available_models(self, new_models: list[str]) -> None: 123 | """Update the list view when available models change.""" 124 | self._populate_list() 125 | 126 | def watch_current_model(self, new_current_model: str) -> None: 127 | """Update the list view when the current model changes.""" 128 | self._populate_list() 129 | 130 | def _populate_list(self): 131 | """Populate the ListView with models.""" 132 | if not self.is_mounted: # Don't populate if not mounted 133 | return 134 | 135 | list_view = self.query_one("#model-list-view", ListView) 136 | list_view.clear() 137 | 138 | if not self.can_switch: 139 | return # Don't populate if switching isn't allowed 140 | 141 | sorted_list = sort_models_for_display(self.available_models, self.current_model) 142 | highlighted_index: int | None = None 143 | 144 | for index, model_id in enumerate(sorted_list): 145 | display_text = format_model_for_display(model_id, self.current_model) 146 | rich_text = Text.from_markup(display_text) # Convert potentially marked-up string 147 | list_view.append(ModelItem(model_id, rich_text)) 148 | if model_id == self.current_model: 149 | highlighted_index = index 150 | 151 | if highlighted_index is not None and len(list_view) > 0: 152 | list_view.index = highlighted_index 153 | 154 | def on_list_view_selected(self, event: ListView.Selected) -> None: 155 | """Handle selection from the list view.""" 156 | event.stop() 157 | if self.can_switch and isinstance(event.item, ModelItem): 158 | selected_model_id = event.item.model_id 159 | self.log(f"Model selected: {selected_model_id}") 160 | self.post_message(self.Selected(selected_model_id)) 161 | 162 | def on_key(self, event: events.Key) -> None: 163 | """Handle key presses, specifically Escape.""" 164 | if event.key == "escape": 165 | event.stop() 166 | self.log("Model overlay exited via Escape.") 167 | self.post_message(self.Exit()) 168 | -------------------------------------------------------------------------------- /codexy/utils/update_checker.py: -------------------------------------------------------------------------------- 1 | """Utility for checking for new versions of the codexy package on PyPI.""" 2 | 3 | import asyncio 4 | import json 5 | import sys 6 | from datetime import datetime, timezone 7 | from importlib import metadata 8 | from typing import TypedDict, cast 9 | 10 | import httpx 11 | from packaging.version import parse as parse_version 12 | 13 | from .. import PACKAGE_NAME 14 | from ..config import CONFIG_DIR 15 | 16 | # Constants 17 | PYPI_URL_TEMPLATE = f"https://pypi.org/pypi/{PACKAGE_NAME}/json" 18 | UPDATE_CHECK_FREQUENCY_SECONDS = 60 * 60 * 24 # Check once per day 19 | STATE_FILE = CONFIG_DIR / "update_check.json" 20 | 21 | 22 | class UpdateCheckState(TypedDict, total=False): 23 | """Structure for storing the last update check timestamp.""" 24 | 25 | last_check_ts: float # Store timestamp as float (seconds since epoch) 26 | 27 | 28 | class UpdateInfo(TypedDict): 29 | """Structure for returning update information.""" 30 | 31 | current_version: str 32 | latest_version: str 33 | 34 | 35 | # --- State Management --- 36 | 37 | 38 | def _read_state() -> UpdateCheckState | None: 39 | """Reads the last check state from the JSON file.""" 40 | if not STATE_FILE.exists(): 41 | return None 42 | try: 43 | with open(STATE_FILE, encoding="utf-8") as f: 44 | data = json.load(f) 45 | if isinstance(data, dict) and "last_check_ts" in data: 46 | return cast(UpdateCheckState, data) # Use cast after validation 47 | else: 48 | print(f"Warning: Invalid format in {STATE_FILE}. Ignoring.", file=sys.stderr) 49 | return None 50 | except (OSError, json.JSONDecodeError) as e: 51 | print(f"Warning: Could not read update check state from {STATE_FILE}: {e}", file=sys.stderr) 52 | return None 53 | except Exception as e: 54 | print(f"Warning: Unexpected error reading update state {STATE_FILE}: {e}", file=sys.stderr) 55 | return None 56 | 57 | 58 | def _write_state(state: UpdateCheckState): 59 | """Writes the current check state to the JSON file.""" 60 | try: 61 | STATE_FILE.parent.mkdir(parents=True, exist_ok=True) 62 | with open(STATE_FILE, "w", encoding="utf-8") as f: 63 | json.dump(state, f, indent=2) 64 | except OSError as e: 65 | print(f"Error: Could not write update check state to {STATE_FILE}: {e}", file=sys.stderr) 66 | except Exception as e: 67 | print(f"Error: Unexpected error writing update state {STATE_FILE}: {e}", file=sys.stderr) 68 | 69 | 70 | # --- Version Information --- 71 | 72 | 73 | async def _get_current_version() -> str | None: 74 | """Gets the currently installed version of the package.""" 75 | try: 76 | return metadata.version(PACKAGE_NAME) 77 | except metadata.PackageNotFoundError: 78 | print(f"Warning: Package '{PACKAGE_NAME}' not found. Cannot determine current version.", file=sys.stderr) 79 | return None 80 | except Exception as e: 81 | print(f"Warning: Error getting current version for '{PACKAGE_NAME}': {e}", file=sys.stderr) 82 | return None 83 | 84 | 85 | async def _fetch_latest_version() -> str | None: 86 | """Fetches the latest version string from PyPI.""" 87 | try: 88 | async with httpx.AsyncClient(timeout=10.0) as client: # Add a timeout 89 | response = await client.get(PYPI_URL_TEMPLATE) 90 | response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx) 91 | data = response.json() 92 | return data.get("info", {}).get("version") 93 | except httpx.RequestError as e: 94 | # Network-related errors 95 | print(f"Warning: Network error checking for updates: {e}", file=sys.stderr) 96 | return None 97 | except httpx.HTTPStatusError as e: 98 | # Errors for 4xx/5xx responses 99 | print( 100 | f"Warning: HTTP error checking for updates: {e.response.status_code} - {e.response.text[:100]}...", 101 | file=sys.stderr, 102 | ) 103 | return None 104 | except json.JSONDecodeError: 105 | print(f"Warning: Could not decode JSON response from PyPI for {PACKAGE_NAME}.", file=sys.stderr) 106 | return None 107 | except Exception as e: 108 | print(f"Warning: Unexpected error fetching latest version: {e}", file=sys.stderr) 109 | return None 110 | 111 | 112 | # --- Main Check Function --- 113 | 114 | 115 | async def check_for_updates() -> UpdateInfo | None: 116 | """ 117 | Checks PyPI for a newer version of the package if enough time has passed. 118 | 119 | Returns: 120 | An UpdateInfo dictionary if a newer version is found, otherwise None. 121 | """ 122 | now_ts = datetime.now(timezone.utc).timestamp() 123 | state = _read_state() 124 | last_check_ts = state.get("last_check_ts", 0.0) if state else 0.0 125 | 126 | # Check if enough time has passed since the last check 127 | if (now_ts - last_check_ts) < UPDATE_CHECK_FREQUENCY_SECONDS: 128 | # print("Debug: Update check skipped, frequency not met.", file=sys.stderr) 129 | return None 130 | 131 | print("Checking for codexy updates...", file=sys.stderr) # Indicate check is running 132 | 133 | # Get current and latest versions concurrently 134 | current_version_str, latest_version_str = await asyncio.gather( 135 | _get_current_version(), # Run sync metadata call in thread 136 | _fetch_latest_version(), 137 | ) 138 | 139 | # Update state regardless of whether the check succeeded, to avoid constant checks on failure 140 | _write_state({"last_check_ts": now_ts}) 141 | 142 | if not current_version_str or not latest_version_str: 143 | print("Debug: Could not determine current or latest version.", file=sys.stderr) 144 | return None # Cannot compare if either version is missing 145 | 146 | try: 147 | current_version = parse_version(current_version_str) 148 | latest_version = parse_version(latest_version_str) 149 | 150 | if latest_version > current_version: 151 | print(f"Update found: {current_version_str} -> {latest_version_str}", file=sys.stderr) 152 | return { 153 | "current_version": current_version_str, 154 | "latest_version": latest_version_str, 155 | } 156 | else: 157 | # print(f"Debug: Already on the latest version ({current_version_str}).", file=sys.stderr) 158 | return None 159 | except Exception as e: # Catch errors during version parsing/comparison 160 | print( 161 | f"Warning: Error comparing versions ('{current_version_str}', '{latest_version_str}'): {e}", 162 | file=sys.stderr, 163 | ) 164 | return None 165 | -------------------------------------------------------------------------------- /codexy/utils/model_info.py: -------------------------------------------------------------------------------- 1 | """Stores information about supported models, like context length.""" 2 | 3 | import sys 4 | from typing import TypedDict 5 | 6 | # Default from oai/models.go (but this might change) 7 | # Using a common default for unknown models. 8 | DEFAULT_MAX_TOKENS = 4096 9 | 10 | 11 | # Define the structure for model information 12 | class ModelInfo(TypedDict): 13 | label: str 14 | max_context_length: int # Using tokens as the unit 15 | 16 | 17 | # Dictionary mapping model IDs to their information 18 | # Based on codex-cli/src/utils/model-info.ts, but simplified for common models 19 | # We estimate context length in tokens. 20 | MODEL_INFO_REGISTRY: dict[str, ModelInfo] = { 21 | "o1-pro-2025-03-19": {"label": "o1 Pro (2025-03-19)", "max_context_length": 200000}, 22 | "o3": {"label": "o3", "max_context_length": 200000}, 23 | "o3-2025-04-16": {"label": "o3 (2025-04-16)", "max_context_length": 200000}, 24 | "o4-mini": {"label": "o4 Mini", "max_context_length": 200000}, 25 | "gpt-4.1-nano": {"label": "GPT-4.1 Nano", "max_context_length": 1000000}, 26 | "gpt-4.1-nano-2025-04-14": {"label": "GPT-4.1 Nano (2025-04-14)", "max_context_length": 1000000}, 27 | "o4-mini-2025-04-16": {"label": "o4 Mini (2025-04-16)", "max_context_length": 200000}, 28 | "gpt-4": {"label": "GPT-4", "max_context_length": 8192}, 29 | "o1-preview-2024-09-12": {"label": "o1 Preview (2024-09-12)", "max_context_length": 128000}, 30 | "gpt-4.1-mini": {"label": "GPT-4.1 Mini", "max_context_length": 1000000}, 31 | "gpt-3.5-turbo-instruct-0914": {"label": "GPT-3.5 Turbo Instruct (0914)", "max_context_length": 4096}, 32 | "gpt-4o-mini-search-preview": {"label": "GPT-4o Mini Search Preview", "max_context_length": 128000}, 33 | "gpt-4.1-mini-2025-04-14": {"label": "GPT-4.1 Mini (2025-04-14)", "max_context_length": 1000000}, 34 | "chatgpt-4o-latest": {"label": "ChatGPT-4o Latest", "max_context_length": 128000}, 35 | "gpt-3.5-turbo-1106": {"label": "GPT-3.5 Turbo (1106)", "max_context_length": 16385}, 36 | "gpt-4o-search-preview": {"label": "GPT-4o Search Preview", "max_context_length": 128000}, 37 | "gpt-4-turbo": {"label": "GPT-4 Turbo", "max_context_length": 128000}, 38 | "gpt-4o-realtime-preview-2024-12-17": { 39 | "label": "GPT-4o Realtime Preview (2024-12-17)", 40 | "max_context_length": 128000, 41 | }, 42 | "gpt-3.5-turbo-instruct": {"label": "GPT-3.5 Turbo Instruct", "max_context_length": 4096}, 43 | "gpt-3.5-turbo": {"label": "GPT-3.5 Turbo", "max_context_length": 16385}, 44 | "gpt-4-turbo-preview": {"label": "GPT-4 Turbo Preview", "max_context_length": 128000}, 45 | "gpt-4o-mini-search-preview-2025-03-11": { 46 | "label": "GPT-4o Mini Search Preview (2025-03-11)", 47 | "max_context_length": 128000, 48 | }, 49 | "gpt-4-0125-preview": {"label": "GPT-4 (0125) Preview", "max_context_length": 128000}, 50 | "gpt-4o-2024-11-20": {"label": "GPT-4o (2024-11-20)", "max_context_length": 128000}, 51 | "o3-mini": {"label": "o3 Mini", "max_context_length": 200000}, 52 | "gpt-4o-2024-05-13": {"label": "GPT-4o (2024-05-13)", "max_context_length": 128000}, 53 | "gpt-4-turbo-2024-04-09": {"label": "GPT-4 Turbo (2024-04-09)", "max_context_length": 128000}, 54 | "gpt-3.5-turbo-16k": {"label": "GPT-3.5 Turbo 16k", "max_context_length": 16385}, 55 | "o3-mini-2025-01-31": {"label": "o3 Mini (2025-01-31)", "max_context_length": 200000}, 56 | "o1-preview": {"label": "o1 Preview", "max_context_length": 128000}, 57 | "o1-2024-12-17": {"label": "o1 (2024-12-17)", "max_context_length": 128000}, 58 | "gpt-4-0613": {"label": "GPT-4 (0613)", "max_context_length": 8192}, 59 | "o1": {"label": "o1", "max_context_length": 128000}, 60 | "o1-pro": {"label": "o1 Pro", "max_context_length": 200000}, 61 | "gpt-4.5-preview": {"label": "GPT-4.5 Preview", "max_context_length": 128000}, 62 | "gpt-4.5-preview-2025-02-27": {"label": "GPT-4.5 Preview (2025-02-27)", "max_context_length": 128000}, 63 | "gpt-4o-search-preview-2025-03-11": {"label": "GPT-4o Search Preview (2025-03-11)", "max_context_length": 128000}, 64 | "gpt-4o": {"label": "GPT-4o", "max_context_length": 128000}, 65 | "gpt-4o-mini": {"label": "GPT-4o Mini", "max_context_length": 128000}, 66 | "gpt-4o-2024-08-06": {"label": "GPT-4o (2024-08-06)", "max_context_length": 128000}, 67 | "gpt-4.1": {"label": "GPT-4.1", "max_context_length": 1000000}, 68 | "gpt-4.1-2025-04-14": {"label": "GPT-4.1 (2025-04-14)", "max_context_length": 1000000}, 69 | "gpt-4o-mini-2024-07-18": {"label": "GPT-4o Mini (2024-07-18)", "max_context_length": 128000}, 70 | "o1-mini": {"label": "o1 Mini", "max_context_length": 128000}, 71 | "gpt-3.5-turbo-0125": {"label": "GPT-3.5 Turbo (0125)", "max_context_length": 16385}, 72 | "o1-mini-2024-09-12": {"label": "o1 Mini (2024-09-12)", "max_context_length": 128000}, 73 | "gpt-4-1106-preview": {"label": "GPT-4 (1106) Preview", "max_context_length": 128000}, 74 | "deepseek-chat": {"label": "DeepSeek Chat", "max_context_length": 64000}, 75 | "deepseek-reasoner": {"label": "DeepSeek Reasoner", "max_context_length": 64000}, 76 | } 77 | 78 | # Legacy mapping for backward compatibility 79 | MODEL_MAX_TOKENS = { 80 | "gpt-4-turbo": 128000, 81 | "gpt-4-32k": 32768, 82 | "gpt-4.1-32k": 32768, 83 | "gpt-4": 8192, 84 | "gpt-4.1": 1000000, 85 | "gpt-3.5-turbo-16k": 16385, 86 | "gpt-3.5-turbo": 16385, 87 | "o4-mini": 200000, 88 | "gpt-4o": 128000, 89 | "gpt-4o-mini": 128000, 90 | } 91 | 92 | 93 | def get_max_tokens_for_model(model_name: str) -> int: 94 | """ 95 | Returns the maximum context tokens for a given model name using the new registry. 96 | """ 97 | if model_name in MODEL_INFO_REGISTRY: 98 | return MODEL_INFO_REGISTRY[model_name]["max_context_length"] 99 | 100 | # Fallback to legacy mapping 101 | if model_name in MODEL_MAX_TOKENS: 102 | return MODEL_MAX_TOKENS[model_name] 103 | 104 | # Check for well-known prefixes (order matters - more specific first) 105 | if "gpt-4-turbo" in model_name: 106 | return 128000 107 | if "gpt-4-32k" in model_name: 108 | return 32768 109 | if "gpt-3.5-turbo-16k" in model_name: 110 | return 16385 111 | if "gpt-3.5-turbo-instruct" in model_name: 112 | return 4096 # gpt-3.5-turbo-instruct has different token limit than regular gpt-3.5-turbo 113 | if "gpt-3.5-turbo" in model_name: 114 | return 16385 115 | if "gpt-4" in model_name: 116 | return 8192 117 | if "o4-mini" in model_name: 118 | return 200000 119 | if "gpt-4o" in model_name: 120 | return 128000 121 | 122 | print(f"Warning: Unknown model name '{model_name}'. Using default max tokens: {DEFAULT_MAX_TOKENS}", file=sys.stderr) 123 | return DEFAULT_MAX_TOKENS 124 | 125 | 126 | def get_model_max_tokens(model_name: str) -> int: 127 | """ 128 | Alias for get_max_tokens_for_model for backward compatibility. 129 | """ 130 | return get_max_tokens_for_model(model_name) 131 | -------------------------------------------------------------------------------- /codexy/utils/security_check.py: -------------------------------------------------------------------------------- 1 | """ 2 | Security Check Module - Responsible for Checking File and Directory Security 3 | """ 4 | 5 | import re 6 | from dataclasses import dataclass 7 | from pathlib import Path 8 | 9 | from detect_secrets.core.secrets_collection import SecretsCollection 10 | from detect_secrets.settings import default_settings 11 | 12 | 13 | @dataclass 14 | class SuspiciousFileResult: 15 | """Suspicious File Result Class 16 | 17 | Attributes: 18 | file_path: File path as string 19 | messages: List of suspicious reasons 20 | """ 21 | 22 | file_path: str # Keep as string for serialization compatibility 23 | messages: list[str] 24 | 25 | 26 | class SecurityChecker: 27 | """Security Checker Class""" 28 | 29 | # Suspicious file name patterns 30 | SUSPICIOUS_FILE_PATTERNS = [ 31 | r"\.env($|\..*$)", # Environment variable files 32 | r".*_rsa$", # RSA keys 33 | r".*\.pem$", # PEM certificates 34 | r".*\.key$", # Key files 35 | r".*\.pfx$", # PFX certificates 36 | r".*\.p12$", # P12 certificates 37 | r".*\.pkcs12$", # PKCS12 certificates 38 | r".*\.keystore$", # Keystore 39 | r".*\.jks$", # Java keystore 40 | r".*\.kdbx$", # KeePass database 41 | r".*\.psafe3$", # Password Safe database 42 | ] 43 | 44 | # Suspicious file content patterns 45 | SUSPICIOUS_CONTENT_PATTERNS = [ 46 | # API keys 47 | r"api[_-]?key.*['\"][0-9a-zA-Z]{32,}['\"]", 48 | r"api[_-]?secret.*['\"][0-9a-zA-Z]{32,}['\"]", 49 | # Access tokens 50 | r"access[_-]?token.*['\"][0-9a-zA-Z]{32,}['\"]", 51 | r"auth[_-]?token.*['\"][0-9a-zA-Z]{32,}['\"]", 52 | # AWS related 53 | r"AKIA[0-9A-Z]{16}", # AWS access key ID 54 | r"aws[_-]?secret.*['\"][0-9a-zA-Z/+=]{32,}['\"]", 55 | # Database connection strings 56 | r"jdbc:.*:@.*:\d+:.*", # JDBC connection string 57 | r"mongodb(\+srv)?://[^/\s]+:[^/\s]+@[^/\s]+", # MongoDB connection URI 58 | r"postgres://[^/\s]+:[^/\s]+@[^/\s]+", # PostgreSQL connection URI 59 | # Private keys 60 | r"-----BEGIN (?:RSA )?PRIVATE KEY-----", 61 | # Passwords 62 | r"password.*['\"][^'\"\s]{8,}['\"]", 63 | r"passwd.*['\"][^'\"\s]{8,}['\"]", 64 | r"pwd.*['\"][^'\"\s]{8,}['\"]", 65 | ] 66 | 67 | def __init__(self): 68 | """Initialize security checker""" 69 | self.suspicious_file_patterns = [re.compile(pattern, re.IGNORECASE) for pattern in self.SUSPICIOUS_FILE_PATTERNS] 70 | self.suspicious_content_patterns = [re.compile(pattern, re.IGNORECASE) for pattern in self.SUSPICIOUS_CONTENT_PATTERNS] 71 | self.checked_paths: set[str] = set() 72 | 73 | def check_line(self, line: str) -> list[str]: 74 | """Check security of a single line 75 | 76 | Args: 77 | line: Line content 78 | """ 79 | messages: list[str] = [] 80 | for pattern in self.suspicious_content_patterns: 81 | matches = pattern.finditer(line) 82 | for match in matches: 83 | matched_text = match.group() 84 | messages.append(f"Suspicious content pattern: {matched_text}") 85 | 86 | return messages 87 | 88 | def check_file(self, file_path: Path, content: str) -> list[str]: 89 | """Check security of a single file 90 | 91 | Args: 92 | file_path: Path object representing the file path 93 | content: File content 94 | 95 | Returns: 96 | List of suspicious reasons 97 | """ 98 | str_path = str(file_path) 99 | if str_path in self.checked_paths: 100 | return [] 101 | 102 | self.checked_paths.add(str_path) 103 | messages: list[str] = [] 104 | 105 | # Check file name 106 | for pattern in self.suspicious_file_patterns: 107 | if pattern.match(file_path.name): 108 | messages.append(f"Suspicious file name pattern: {file_path.name}") 109 | break 110 | 111 | # Check file content 112 | for pattern in self.suspicious_content_patterns: 113 | matches = pattern.finditer(content) 114 | for match in matches: 115 | matched_text = match.group() 116 | # Truncate matched text to avoid displaying sensitive information 117 | truncated_text = matched_text[:20] + "..." if len(matched_text) > 20 else matched_text 118 | messages.append(f"Suspicious content pattern: {truncated_text}") 119 | 120 | return messages 121 | 122 | def check_file_size(self, file_path: Path, max_size_mb: float = 10.0) -> list[str]: 123 | """Check file size 124 | 125 | Args: 126 | file_path: Path object representing the file path 127 | max_size_mb: Maximum allowed size in megabytes 128 | 129 | Returns: 130 | List of warning messages 131 | """ 132 | try: 133 | size_mb = file_path.stat().st_size / (1024 * 1024) 134 | if size_mb > max_size_mb: 135 | return [f"File size exceeds {max_size_mb}MB (current size: {size_mb:.2f}MB)"] 136 | except Exception as e: 137 | print(f"Error checking file size: {e}") 138 | 139 | return [] 140 | 141 | def check_files_with_secretlint(self, file_path: Path) -> list[str]: 142 | secrets = SecretsCollection() 143 | with default_settings(): 144 | secrets.scan_file(filename=str(file_path.absolute())) 145 | 146 | results = [] 147 | for secret in secrets: 148 | results.append(f"Secret detected: {secret[1].type}") 149 | return results 150 | 151 | 152 | def check_files(root_dir: str | Path, file_paths: list[str], file_contents: dict[str, str]) -> list[SuspiciousFileResult]: 153 | """Check security of multiple files 154 | 155 | Args: 156 | root_dir: Root directory path 157 | file_paths: List of file paths to check 158 | file_contents: Dictionary mapping file paths to their contents 159 | 160 | Returns: 161 | List of suspicious file results 162 | """ 163 | checker = SecurityChecker() 164 | results: list[SuspiciousFileResult] = [] 165 | root_path = Path(root_dir) 166 | 167 | for file_path in file_paths: 168 | # Convert to Path object for path operations 169 | full_path = root_path / file_path 170 | content = file_contents.get(file_path, "") 171 | 172 | messages = [] 173 | if full_path.exists(): 174 | messages.extend(checker.check_file(full_path, content)) 175 | messages.extend(checker.check_file_size(full_path)) 176 | messages.extend(checker.check_files_with_secretlint(full_path)) 177 | if messages: 178 | # Keep using relative path string in results 179 | results.append(SuspiciousFileResult(file_path=file_path, messages=messages)) 180 | 181 | return results 182 | -------------------------------------------------------------------------------- /codexy/utils/filesystem.py: -------------------------------------------------------------------------------- 1 | """Filesystem and path related utility functions for codexy.""" 2 | 3 | import os 4 | import subprocess 5 | from pathlib import Path 6 | 7 | 8 | def check_in_git(workdir: str | Path) -> bool: 9 | """ 10 | Checks if the given directory is part of a Git repository. 11 | 12 | Uses `git rev-parse --is-inside-work-tree` command which exits with 0 13 | if inside a work tree, and non-zero otherwise. 14 | 15 | Args: 16 | workdir: The directory path (string or Path object) to check. 17 | 18 | Returns: 19 | True if the directory is inside a Git work tree, False otherwise 20 | (including if git command fails or git is not found). 21 | """ 22 | workdir_path = Path(workdir).resolve() # Ensure absolute path 23 | cmd = ["git", "rev-parse", "--is-inside-work-tree"] 24 | 25 | try: 26 | # Run the git command in the specified directory 27 | # Suppress stdout and stderr as we only care about the return code 28 | # check=False prevents raising CalledProcessError on non-zero exit 29 | result = subprocess.run( 30 | cmd, 31 | cwd=str(workdir_path), 32 | stdout=subprocess.DEVNULL, 33 | stderr=subprocess.DEVNULL, 34 | check=False, # Do not raise an exception on non-zero exit 35 | creationflags=subprocess.CREATE_NO_WINDOW if os.name == "nt" else 0, # Hide console window on Windows 36 | ) 37 | # Return True if the command executed successfully (exit code 0) 38 | return result.returncode == 0 39 | except FileNotFoundError: 40 | # Handle case where 'git' command is not found 41 | # print("Warning: 'git' command not found. Cannot check repository status.") 42 | return False 43 | except Exception: 44 | # Catch any other potential errors during subprocess execution 45 | # print(f"Warning: Error checking git status in {workdir_path}: {e}") 46 | return False 47 | 48 | 49 | def shorten_path(p: str | Path, max_length: int = 40) -> str: 50 | """ 51 | Shortens a path string for display, similar to codex-cli's behavior. 52 | 53 | 1. Replaces the home directory prefix with '~'. 54 | 2. If the path is still longer than max_length, it removes components 55 | from the middle, replacing them with '...', keeping the beginning 56 | (root or ~) and the end (filename and potentially some parent dirs). 57 | 58 | Args: 59 | p: The path (string or Path object) to shorten. 60 | max_length: The maximum desired length for the output string. 61 | 62 | Returns: 63 | The shortened path string. 64 | """ 65 | try: 66 | abs_path = Path(p).resolve() 67 | home = Path.home() 68 | except Exception: 69 | # Fallback if path resolution fails 70 | return str(p)[:max_length] + ("..." if len(str(p)) > max_length else "") 71 | 72 | try: 73 | # Check if path is under home directory 74 | if abs_path == home or abs_path.is_relative_to(home): 75 | if abs_path == home: 76 | display_path = "~" 77 | else: 78 | # Use '/' for display consistency across platforms within '~' notation 79 | display_path = "~/" + str(abs_path.relative_to(home)).replace(os.sep, "/") 80 | else: 81 | display_path = str(abs_path) 82 | except ValueError: 83 | # is_relative_to throws ValueError if paths are on different drives (Windows) 84 | display_path = str(abs_path) 85 | except Exception: 86 | # Fallback for other potential errors 87 | display_path = str(abs_path) 88 | 89 | if len(display_path) <= max_length: 90 | return display_path 91 | 92 | # Path is too long, apply shortening logic using '/' as separator for consistency 93 | display_path_unix = display_path.replace(os.sep, "/") 94 | parts = display_path_unix.split("/") 95 | 96 | # Filter out empty parts that might result from leading/trailing slashes or '//' 97 | parts = [part for part in parts if part] 98 | 99 | # Determine the prefix (e.g., '~/', '/') 100 | prefix = "" 101 | path_parts_for_suffix = parts # Assume we use all parts for suffix initially 102 | 103 | if display_path.startswith("~"): 104 | prefix = "~/" 105 | # parts already excludes '~', path_parts_for_suffix remains parts 106 | elif abs_path.is_absolute(): 107 | prefix = "/" # Simple root prefix for display 108 | # path_parts_for_suffix remains parts 109 | 110 | # Need to handle Windows drive letters specifically if not under home 111 | elif os.name == "nt" and len(str(abs_path)) > 2 and str(abs_path)[1] == ":": 112 | drive = str(abs_path)[:2] 113 | prefix = drive + "/" # Display as C:/ 114 | # Adjust parts if they include the drive 115 | if parts and parts[0] == drive: 116 | path_parts_for_suffix = parts[1:] 117 | 118 | # Iterate backwards, adding components until max_length is approached 119 | best_fit = "" 120 | # Keep at least the filename (last part) 121 | min_parts_to_keep = 1 if path_parts_for_suffix else 0 122 | 123 | # Iterate keeping at least `min_parts_to_keep` up to all parts 124 | for i in range(min_parts_to_keep, len(path_parts_for_suffix) + 1): 125 | # Take the last 'i' parts for the suffix 126 | suffix_parts = path_parts_for_suffix[len(path_parts_for_suffix) - i :] 127 | suffix = "/".join(suffix_parts) # Use '/' for joining 128 | 129 | # Construct candidate string 130 | candidate = prefix 131 | # Add ellipsis only if parts were actually omitted 132 | # Check if the number of suffix parts is less than total available parts 133 | if i < len(path_parts_for_suffix): 134 | candidate += ".../" 135 | candidate += suffix 136 | 137 | if len(candidate) <= max_length: 138 | best_fit = candidate # Found a candidate that fits 139 | # Continue loop to find the longest possible fit that still fits 140 | else: 141 | # If adding this part made it too long, the *previous* best_fit was optimal 142 | # If this was the *first* part tried (i == min_parts_to_keep) and it's already too long, 143 | # best_fit will still be empty. 144 | break 145 | 146 | # If no candidate ever fit (e.g., prefix + ... + filename was too long) 147 | if not best_fit: 148 | # Fallback: ellipsis + truncated filename 149 | filename = path_parts_for_suffix[-1] if path_parts_for_suffix else "" 150 | ellipsis_prefix = prefix + ".../" if prefix else ".../" 151 | available_chars = max_length - len(ellipsis_prefix) 152 | if available_chars < 1: 153 | return ellipsis_prefix[:max_length] # Cannot even fit ellipsis+part 154 | return ellipsis_prefix + filename[-available_chars:] 155 | else: 156 | return best_fit 157 | 158 | 159 | def short_cwd(max_length: int = 40) -> str: 160 | """Returns a shortened version of the current working directory.""" 161 | return shorten_path(Path.cwd(), max_length) 162 | -------------------------------------------------------------------------------- /codexy/tools/execute_command_tool.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shlex 3 | import subprocess 4 | from pathlib import Path 5 | 6 | from openai.types.chat import ChatCompletionToolParam 7 | 8 | PROJECT_ROOT = Path.cwd() 9 | DEFAULT_MAX_OUTPUT_LINES = 20 10 | 11 | 12 | def execute_command_tool( 13 | command: str, 14 | cwd: str | None = None, 15 | is_sandboxed: bool = False, 16 | allowed_write_paths: list[Path] | None = None, 17 | full_stdout: bool = False, 18 | ) -> str: 19 | """ 20 | Executes a shell command and returns its output (stdout and stderr). 21 | If is_sandboxed is True, attempts to run the command with shell=False within 22 | one of the allowed_write_paths. 23 | """ 24 | if not command: 25 | return "Error: Empty command received." 26 | 27 | effective_cwd_path = Path(cwd) if cwd else PROJECT_ROOT 28 | try: 29 | # Resolve CWD to an absolute path to prevent relative path issues 30 | effective_cwd = effective_cwd_path.resolve(strict=True) 31 | except FileNotFoundError: 32 | return f"Error: Working directory '{effective_cwd_path}' not found." 33 | except Exception as e: 34 | return f"Error resolving working directory '{effective_cwd_path}': {e}" 35 | 36 | if not effective_cwd.is_dir(): 37 | return f"Error: Working directory '{effective_cwd}' is not a directory." 38 | 39 | # --- Sandboxing Logic --- 40 | if is_sandboxed: 41 | print(f"Attempting sandboxed execution for: '{command}'") 42 | if not allowed_write_paths: 43 | print(f"[Sandbox] No allowed_write_paths provided, using project root: {PROJECT_ROOT}") 44 | allowed_write_paths = [PROJECT_ROOT] 45 | 46 | # Ensure allowed paths are resolved absolute paths 47 | resolved_allowed_paths = [] 48 | for p in allowed_write_paths: 49 | try: 50 | resolved_allowed_paths.append(Path(p).resolve(strict=True)) 51 | except Exception as e: 52 | return f"Error resolving allowed writable path '{p}': {e}" 53 | 54 | # Check if the effective CWD is within one of the allowed paths 55 | is_cwd_allowed = False 56 | for allowed_path in resolved_allowed_paths: 57 | if effective_cwd == allowed_path or str(effective_cwd).startswith(str(allowed_path) + os.sep): 58 | is_cwd_allowed = True 59 | break 60 | if not is_cwd_allowed: 61 | allowed_paths_str = ", ".join([str(p) for p in resolved_allowed_paths]) 62 | return f"Error: Sandboxed command CWD '{effective_cwd}' is not within allowed paths: [{allowed_paths_str}]" 63 | 64 | # Use shell=False for safety - requires splitting the command string 65 | try: 66 | cmd_list = shlex.split(command) 67 | if not cmd_list: # Handle empty command after split 68 | return "Error: Empty command after parsing for sandbox execution." 69 | print(f"Executing sandboxed (shell=False): {cmd_list} in '{effective_cwd}'") 70 | result = subprocess.run( 71 | cmd_list, # Pass list of args 72 | shell=False, # <<< IMPORTANT: No shell for sandboxed commands 73 | cwd=effective_cwd, 74 | capture_output=True, 75 | text=True, 76 | timeout=60, 77 | check=False, 78 | ) 79 | except Exception as e: 80 | # Catch errors during shlex.split or subprocess.run with shell=False 81 | return f"Error executing sandboxed command '{command}': {e}" 82 | 83 | # --- Default (Non-Sandboxed) Execution --- 84 | else: 85 | # Keep shell=True for now for non-sandboxed, but acknowledge the risk. 86 | # Consider switching to shell=False + shlex.split here too eventually. 87 | print(f"Executing command (shell=True): '{command}' in '{effective_cwd}'") 88 | try: 89 | result = subprocess.run( 90 | command, 91 | shell=True, # <<< Risk acknowledged 92 | cwd=effective_cwd, 93 | capture_output=True, 94 | text=True, 95 | timeout=60, 96 | check=False, 97 | ) 98 | except Exception as e: 99 | return f"Error executing command '{command}': {e}" 100 | 101 | # --- Process result (common for both sandboxed and non-sandboxed) --- 102 | try: 103 | output = f"Exit Code: {result.returncode}\n" 104 | stdout = result.stdout.strip() if result.stdout else "" 105 | stderr = result.stderr.strip() if result.stderr else "" 106 | 107 | if not full_stdout: 108 | stdout_lines = stdout.splitlines() 109 | stderr_lines = stderr.splitlines() 110 | if len(stdout_lines) > DEFAULT_MAX_OUTPUT_LINES: 111 | stdout = ( 112 | "\n".join(stdout_lines[:DEFAULT_MAX_OUTPUT_LINES]) 113 | + f"\n... ({len(stdout_lines) - DEFAULT_MAX_OUTPUT_LINES} more lines truncated)" 114 | ) 115 | if len(stderr_lines) > DEFAULT_MAX_OUTPUT_LINES: 116 | stderr = ( 117 | "\n".join(stderr_lines[:DEFAULT_MAX_OUTPUT_LINES]) 118 | + f"\n... ({len(stderr_lines) - DEFAULT_MAX_OUTPUT_LINES} more lines truncated)" 119 | ) 120 | 121 | if stdout: 122 | output += f"--- stdout ---\n{stdout}\n" 123 | if stderr: 124 | output += f"--- stderr ---\n{stderr}\n" 125 | 126 | return output.strip() 127 | 128 | except subprocess.TimeoutExpired: 129 | return f"Error: Command '{command}' timed out after 60 seconds." 130 | except FileNotFoundError: 131 | return f"Error: Command not found or shell execution failed for '{command}'." 132 | except Exception as e: 133 | return f"Error processing result for command '{command}': {e}" 134 | 135 | 136 | EXECUTE_COMMAND_TOOL_DEF: ChatCompletionToolParam = { 137 | "type": "function", 138 | "function": { 139 | "name": "execute_command", 140 | "description": "Execute a CLI command on the user's system. Use this for system operations, running scripts, file manipulations (like mkdir, rm, mv), etc. Always prefer using dedicated file operation tools if available.", 141 | "parameters": { 142 | "type": "object", 143 | "properties": { 144 | "command": { 145 | "type": "string", 146 | "description": "The CLI command to execute (e.g., 'ls -la', 'python script.py', 'mkdir new_dir').", 147 | }, 148 | "cwd": { 149 | "type": "string", 150 | "description": "Optional working directory to execute the command in. Defaults to the project root.", 151 | }, 152 | # <<< It might be better *not* to expose sandbox/write paths/full_stdout to the LLM directly. 153 | # The agent should decide these based on context and policy. 154 | # Keeping them internal to the Python implementation. 155 | }, 156 | "required": ["command"], 157 | }, 158 | }, 159 | } 160 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import unittest 3 | from pathlib import Path 4 | from tempfile import TemporaryDirectory 5 | from typing import Any 6 | 7 | import yaml 8 | 9 | from codexy.config import ( 10 | DEFAULT_MEMORY_COMPRESSION_THRESHOLD_FACTOR, 11 | DEFAULT_MEMORY_ENABLE_COMPRESSION, 12 | DEFAULT_MEMORY_ENABLED, 13 | DEFAULT_MEMORY_KEEP_RECENT_MESSAGES, 14 | EMPTY_STORED_CONFIG, 15 | load_config, 16 | ) 17 | 18 | 19 | class TestConfigLoadingMemory(unittest.TestCase): 20 | def setUp(self): 21 | self.temp_dir = TemporaryDirectory() 22 | self.temp_path = Path(self.temp_dir.name) 23 | # Create a dummy instructions file to prevent warnings/errors 24 | (self.temp_path / "instructions.md").write_text("Test instructions") 25 | 26 | def tearDown(self): 27 | self.temp_dir.cleanup() 28 | 29 | def _write_config(self, data: dict[str, Any], format: str = "json") -> Path: 30 | if format == "json": 31 | config_file = self.temp_path / "config.json" 32 | with open(config_file, "w") as f: 33 | json.dump(data, f, indent=2) 34 | return config_file 35 | elif format == "yaml": 36 | config_file = self.temp_path / "config.yaml" 37 | with open(config_file, "w") as f: 38 | yaml.dump(data, f) 39 | return config_file 40 | raise ValueError("Unsupported format") 41 | 42 | def test_load_config_no_memory_section(self): 43 | """Test loading config when memory section is entirely missing.""" 44 | config_data: dict[str, Any] = { 45 | "model": "test-model", 46 | # No memory section 47 | } 48 | config_file = self._write_config(config_data) 49 | app_config = load_config(config_path=config_file, instructions_path=(self.temp_path / "instructions.md")) 50 | 51 | # Memory should be None if not enabled and not present 52 | self.assertIsNone(app_config.get("memory")) 53 | 54 | def test_load_config_memory_disabled_explicitly(self): 55 | """Test loading config when memory.enabled is false.""" 56 | config_data: dict[str, Any] = { 57 | "model": "test-model", 58 | "memory": { 59 | "enabled": False, 60 | "enable_compression": True, # This should be ignored if memory is disabled 61 | }, 62 | } 63 | config_file = self._write_config(config_data) 64 | app_config = load_config(config_path=config_file, instructions_path=(self.temp_path / "instructions.md")) 65 | 66 | loaded_memory_config = app_config.get("memory") 67 | self.assertIsNotNone(loaded_memory_config) 68 | if loaded_memory_config: # for type checker 69 | self.assertFalse(loaded_memory_config.get("enabled")) 70 | # Other fields should still be populated with defaults even if memory is disabled 71 | self.assertEqual(loaded_memory_config.get("enable_compression"), DEFAULT_MEMORY_ENABLE_COMPRESSION) 72 | self.assertEqual( 73 | loaded_memory_config.get("compression_threshold_factor"), DEFAULT_MEMORY_COMPRESSION_THRESHOLD_FACTOR 74 | ) 75 | self.assertEqual(loaded_memory_config.get("keep_recent_messages"), DEFAULT_MEMORY_KEEP_RECENT_MESSAGES) 76 | 77 | def test_load_config_memory_enabled_no_compression_settings(self): 78 | """Test loading config when memory is enabled, but no specific compression settings.""" 79 | config_data: dict[str, Any] = {"model": "test-model", "memory": {"enabled": True}} 80 | config_file = self._write_config(config_data) 81 | app_config = load_config(config_path=config_file, instructions_path=(self.temp_path / "instructions.md")) 82 | 83 | loaded_memory_config = app_config.get("memory") 84 | self.assertIsNotNone(loaded_memory_config) 85 | if loaded_memory_config: # for type checker 86 | self.assertTrue(loaded_memory_config.get("enabled")) 87 | self.assertEqual(loaded_memory_config.get("enable_compression"), DEFAULT_MEMORY_ENABLE_COMPRESSION) 88 | self.assertEqual( 89 | loaded_memory_config.get("compression_threshold_factor"), DEFAULT_MEMORY_COMPRESSION_THRESHOLD_FACTOR 90 | ) 91 | self.assertEqual(loaded_memory_config.get("keep_recent_messages"), DEFAULT_MEMORY_KEEP_RECENT_MESSAGES) 92 | 93 | def test_load_config_partial_memory_compression_settings(self): 94 | """Test loading config with memory enabled and partial compression settings.""" 95 | config_data: dict[str, Any] = { 96 | "model": "test-model", 97 | "memory": { 98 | "enabled": True, 99 | "enable_compression": True, 100 | "keep_recent_messages": 10, 101 | # compression_threshold_factor is missing 102 | }, 103 | } 104 | config_file = self._write_config(config_data) 105 | app_config = load_config(config_path=config_file, instructions_path=(self.temp_path / "instructions.md")) 106 | 107 | loaded_memory_config = app_config.get("memory") 108 | self.assertIsNotNone(loaded_memory_config) 109 | if loaded_memory_config: # for type checker 110 | self.assertTrue(loaded_memory_config.get("enabled")) 111 | self.assertTrue(loaded_memory_config.get("enable_compression")) 112 | self.assertEqual( 113 | loaded_memory_config.get("compression_threshold_factor"), DEFAULT_MEMORY_COMPRESSION_THRESHOLD_FACTOR 114 | ) # Should be default 115 | self.assertEqual(loaded_memory_config.get("keep_recent_messages"), 10) # Should be custom 116 | 117 | def test_load_config_full_memory_compression_settings(self): 118 | """Test loading config with memory enabled and all compression settings specified.""" 119 | custom_threshold = 0.7 120 | custom_keep_recent = 3 121 | config_data: dict[str, Any] = { 122 | "model": "test-model", 123 | "memory": { 124 | "enabled": True, 125 | "enable_compression": True, 126 | "compression_threshold_factor": custom_threshold, 127 | "keep_recent_messages": custom_keep_recent, 128 | }, 129 | } 130 | config_file = self._write_config(config_data) 131 | app_config = load_config(config_path=config_file, instructions_path=(self.temp_path / "instructions.md")) 132 | 133 | loaded_memory_config = app_config.get("memory") 134 | self.assertIsNotNone(loaded_memory_config) 135 | if loaded_memory_config: # for type checker 136 | self.assertTrue(loaded_memory_config.get("enabled")) 137 | self.assertTrue(loaded_memory_config.get("enable_compression")) 138 | self.assertEqual(loaded_memory_config.get("compression_threshold_factor"), custom_threshold) 139 | self.assertEqual(loaded_memory_config.get("keep_recent_messages"), custom_keep_recent) 140 | 141 | def test_load_config_yaml_format(self): 142 | """Test loading config with memory settings from a YAML file.""" 143 | custom_threshold = 0.65 144 | custom_keep_recent = 7 145 | config_data: dict[str, Any] = { 146 | "model": "test-model-yaml", 147 | "memory": { 148 | "enabled": True, 149 | "enable_compression": False, 150 | "compression_threshold_factor": custom_threshold, 151 | "keep_recent_messages": custom_keep_recent, 152 | }, 153 | } 154 | config_file = self._write_config(config_data, format="yaml") 155 | app_config = load_config(config_path=config_file, instructions_path=(self.temp_path / "instructions.md")) 156 | 157 | loaded_memory_config = app_config.get("memory") 158 | self.assertIsNotNone(loaded_memory_config) 159 | if loaded_memory_config: # for type checker 160 | self.assertTrue(loaded_memory_config.get("enabled")) 161 | self.assertFalse(loaded_memory_config.get("enable_compression")) 162 | self.assertEqual(loaded_memory_config.get("compression_threshold_factor"), custom_threshold) 163 | self.assertEqual(loaded_memory_config.get("keep_recent_messages"), custom_keep_recent) 164 | 165 | def test_empty_stored_config_defaults(self): 166 | """Verify that EMPTY_STORED_CONFIG has the correct default memory settings.""" 167 | memory_defaults = EMPTY_STORED_CONFIG.get("memory") 168 | self.assertIsNotNone(memory_defaults) 169 | if memory_defaults: # for type checker 170 | self.assertEqual(memory_defaults.get("enabled"), DEFAULT_MEMORY_ENABLED) 171 | self.assertEqual(memory_defaults.get("enable_compression"), DEFAULT_MEMORY_ENABLE_COMPRESSION) 172 | self.assertEqual(memory_defaults.get("compression_threshold_factor"), DEFAULT_MEMORY_COMPRESSION_THRESHOLD_FACTOR) 173 | self.assertEqual(memory_defaults.get("keep_recent_messages"), DEFAULT_MEMORY_KEEP_RECENT_MESSAGES) 174 | 175 | 176 | if __name__ == "__main__": 177 | unittest.main() 178 | -------------------------------------------------------------------------------- /codexy/tui/widgets/overlays/approval_overlay.py: -------------------------------------------------------------------------------- 1 | from rich.text import Text 2 | from textual import events 3 | from textual.app import ComposeResult 4 | from textual.message import Message 5 | from textual.reactive import reactive 6 | from textual.widgets import Label, OptionList, Static 7 | from textual.widgets.option_list import Option, OptionDoesNotExist 8 | 9 | from ....approvals import ApprovalMode 10 | 11 | 12 | # --- Widget --- 13 | class ApprovalModeOverlay(Static): 14 | """A component for selecting an approval mode.""" 15 | 16 | # --- Messages --- 17 | class ApprovalModeSelected(Message): 18 | """Sent when a user selects a new approval mode.""" 19 | 20 | def __init__(self, mode: ApprovalMode): 21 | self.mode: ApprovalMode = mode 22 | super().__init__() 23 | 24 | class ExitApprovalOverlay(Message): 25 | """Sent when a user cancels selection, closing the overlay.""" 26 | 27 | pass 28 | 29 | DEFAULT_CSS = """ 30 | ApprovalModeOverlay { 31 | layer: approval_overlay_layer; 32 | display: none; 33 | align: center middle; 34 | width: 80%; 35 | max-width: 60; 36 | height: auto; 37 | max-height: 15; 38 | border: thick $accent; 39 | background: $panel; 40 | padding: 1; 41 | } 42 | ApprovalModeOverlay.-active { 43 | display: block; 44 | } 45 | ApprovalModeOverlay #approval-overlay-title { 46 | width: 100%; 47 | text-align: center; 48 | margin-bottom: 1; 49 | text-style: bold; 50 | } 51 | ApprovalModeOverlay #current-mode-label { 52 | width: 100%; 53 | text-align: center; 54 | color: $text-muted; 55 | margin-bottom: 1; 56 | } 57 | ApprovalModeOverlay OptionList { 58 | border: none; 59 | background: $panel-darken-1; 60 | max-height: 10; 61 | height: auto; 62 | min-height: 1; 63 | } 64 | ApprovalModeOverlay #approval-overlay-footer { 65 | margin-top: 1; 66 | width: 100%; 67 | text-align: center; 68 | color: $text-muted; 69 | } 70 | ApprovalModeOverlay OptionList Option { 71 | padding: 0 1; 72 | height: 1; 73 | } 74 | ApprovalModeOverlay OptionList Option :hover { 75 | background: $accent-darken-1; 76 | } 77 | ApprovalModeOverlay OptionList Option.--highlight { 78 | background: $accent !important; 79 | color: $text !important; 80 | } 81 | ApprovalModeOverlay OptionList Option.--highlight:focus { 82 | background: $accent-darken-1 !important; 83 | } 84 | """ 85 | 86 | # --- Reactives --- 87 | current_mode: reactive[ApprovalMode] = reactive(ApprovalMode.SUGGEST) 88 | _option_list_id = "approval-mode-option-list" 89 | 90 | def compose(self) -> ComposeResult: 91 | """Build the UI elements of the overlay.""" 92 | yield Label("Switch Approval Mode", id="approval-overlay-title") 93 | yield Label(f"Current: {self.current_mode.value}", id="current-mode-label") 94 | # Use OptionList to display options 95 | yield OptionList(id=self._option_list_id) 96 | yield Label("↑/↓ Select, Enter Confirm, Esc Cancel", id="approval-overlay-footer") 97 | 98 | def on_mount(self) -> None: 99 | """Mounted, fill the list and set focus.""" 100 | self.log.info("ApprovalModeOverlay mounted.") 101 | # Use call_later to schedule _populate_list and focus_list 102 | self.call_later(self._populate_list) 103 | self.call_later(self.focus_list) 104 | 105 | def focus_list(self) -> None: 106 | """Safely focus the OptionList.""" 107 | try: 108 | option_list = self.query_one(f"#{self._option_list_id}", OptionList) 109 | if option_list.is_mounted: 110 | self.log.info("Focusing approval mode OptionList.") 111 | option_list.focus() 112 | else: 113 | self.log.warning("Attempted to focus OptionList but it was not mounted.") 114 | except Exception as e: 115 | self.log.error(f"Error focusing approval mode list: {e}") 116 | 117 | # --- Watchers --- 118 | def watch_current_mode(self, new_mode: ApprovalMode) -> None: 119 | """When current_mode changes, update the label and repopulate the list to update the highlight.""" 120 | self.log.info(f"Watched current_mode change to {new_mode.value}. Repopulating list.") 121 | try: 122 | label = self.query_one("#current-mode-label", Label) 123 | label.update(f"Current: {new_mode.value}") 124 | # Repopulate the list to ensure the highlight is correct 125 | self._populate_list() 126 | except Exception as e: 127 | # If this happens during unmount, ignore the error 128 | if self.is_mounted: 129 | self.log.warning(f"Could not update current mode label or list: {e}") 130 | 131 | # --- Internal Methods --- 132 | def _populate_list(self): 133 | """Populate the OptionList with approval modes.""" 134 | if not self.is_mounted: 135 | self.log.warning("Attempted to populate list, but overlay is not mounted.") 136 | return 137 | self.log.info(f"Executing _populate_list for mode {self.current_mode.value}") 138 | try: 139 | option_list = self.query_one(f"#{self._option_list_id}", OptionList) 140 | self.log.info(f"Found OptionList widget: {option_list}") 141 | option_list.clear_options() 142 | self.log.info("Cleared existing options.") 143 | 144 | highlighted_index: int | None = None 145 | options_to_add = [] 146 | 147 | # Create Option for each approval mode 148 | for index, mode in enumerate(ApprovalMode): 149 | description = "" 150 | style = "" 151 | if mode == ApprovalMode.SUGGEST: 152 | description = "Ask for edits & commands" 153 | elif mode == ApprovalMode.AUTO_EDIT: 154 | description = "Auto-edit files, ask for commands" 155 | elif mode == ApprovalMode.FULL_AUTO: 156 | description = "Auto-edit & sandboxed commands" 157 | elif mode == ApprovalMode.DANGEROUS_AUTO: 158 | description = "Auto-approve all (UNSAFE)" 159 | style = "bold red" 160 | 161 | display_text = Text.assemble( 162 | (f"{mode.value}", "bold" if mode == self.current_mode else ""), 163 | (f" - {description}", f"dim {style}" if style else "dim"), 164 | ) 165 | options_to_add.append(Option(display_text, id=mode.value)) 166 | if mode == self.current_mode: 167 | highlighted_index = index 168 | 169 | self.log.info(f"Prepared {len(options_to_add)} options to add.") 170 | option_list.add_options(options_to_add) 171 | self.log.info(f"Called add_options. OptionList now has {option_list.option_count} options.") 172 | 173 | if highlighted_index is not None and option_list.option_count > 0: 174 | try: 175 | self.log.info(f"Attempting to highlight index: {highlighted_index}") 176 | option_list.highlighted = highlighted_index 177 | self.log.info(f"Highlighted index set to {highlighted_index}") 178 | except OptionDoesNotExist: 179 | # If the index is invalid (should not happen unless the list is empty), record a warning 180 | self.log.warning(f"Could not highlight approval mode index {highlighted_index}") 181 | except Exception as high_e: 182 | self.log.error(f"Error setting highlighted index: {high_e}") 183 | 184 | except Exception as e: 185 | if self.is_mounted: 186 | self.log.error(f"Error populating approval mode list: {e}") 187 | 188 | # --- Event Handlers --- 189 | def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None: 190 | """Handle the selection event in the OptionList.""" 191 | event.stop() 192 | option_id = event.option.id 193 | if option_id is not None: 194 | try: 195 | selected_mode = ApprovalMode(option_id) 196 | self.log.info(f"Approval mode selected: {selected_mode.value}") 197 | self.post_message(self.ApprovalModeSelected(selected_mode)) 198 | except ValueError: 199 | self.log.error(f"Invalid approval mode ID selected: {option_id}") 200 | self.post_message(self.ExitApprovalOverlay()) 201 | else: 202 | # If the option has no ID (should not happen), also exit 203 | self.log.warning("Selected option has no ID.") 204 | self.post_message(self.ExitApprovalOverlay()) 205 | 206 | def on_key(self, event: events.Key) -> None: 207 | """Handle key events, especially Escape key.""" 208 | if event.key == "escape": 209 | event.stop() 210 | self.log.info("Approval overlay exited via Escape.") 211 | self.post_message(self.ExitApprovalOverlay()) 212 | -------------------------------------------------------------------------------- /codexy/tools/apply_diff_tool.py: -------------------------------------------------------------------------------- 1 | import re 2 | from pathlib import Path 3 | 4 | from openai.types.chat import ChatCompletionToolParam 5 | 6 | PROJECT_ROOT = Path.cwd() 7 | 8 | 9 | def parse_diff_blocks(diff_text: str) -> list[tuple[int, str, str]]: 10 | """Parses the multi-block diff string into individual (start_line, search, replace) tuples.""" 11 | blocks = [] 12 | # Regex to find SEARCH blocks with start_line, search content, and replace content 13 | # It handles potential variations in whitespace and the presence of the ------- and ======= markers. 14 | # It uses non-greedy matching (.*?) to avoid consuming subsequent blocks. 15 | pattern = re.compile( 16 | r"^\s*<<<<<<<\s*SEARCH\s*\n" # Start marker 17 | r":start_line:(\d+)\s*\n" # Capture start line number 18 | r"-{2,}\s*\n" # Separator (at least two hyphens) 19 | r"(.*?)" # Capture search content (non-greedy) 20 | r"={2,}\s*\n" # Separator (at least two equals signs) 21 | r"(.*?)" # Capture replace content (non-greedy) 22 | r">>>>>>>\s*REPLACE\s*$", # End marker 23 | re.MULTILINE | re.DOTALL, # Multiline and Dotall flags 24 | ) 25 | 26 | for match in pattern.finditer(diff_text): 27 | start_line = int(match.group(1)) 28 | search_content = match.group(2) 29 | replace_content = match.group(3) 30 | # Important: Normalize line endings in search/replace content for comparison 31 | search_content = search_content.replace("\r\n", "\n") 32 | replace_content = replace_content.replace("\r\n", "\n") 33 | blocks.append((start_line, search_content, replace_content)) 34 | 35 | if not blocks and diff_text.strip(): # Check if parsing failed but diff wasn't empty 36 | raise ValueError("Diff text provided but could not parse any valid SEARCH/REPLACE blocks.") 37 | 38 | # Sort blocks by start line descending to apply changes from bottom up, 39 | # which avoids messing up line numbers for subsequent changes in the same file. 40 | blocks.sort(key=lambda x: x[0], reverse=True) 41 | return blocks 42 | 43 | 44 | def apply_diff_tool(path: str, diff: str) -> str: 45 | """Applies changes to a file based on a diff string.""" 46 | if not path: 47 | return "Error: 'path' argument is required." 48 | if not diff: 49 | return "Error: 'diff' argument is required and cannot be empty." 50 | 51 | file_path = PROJECT_ROOT / path 52 | 53 | # --- Path Validation --- 54 | try: 55 | resolved_path = file_path.resolve(strict=True) # Must exist for diff 56 | if not str(resolved_path).startswith(str(PROJECT_ROOT)): 57 | return f"Error: Attempted to modify file outside of project root: {path}" 58 | if not resolved_path.is_file(): 59 | return f"Error: Path '{path}' is not a file." 60 | except FileNotFoundError: 61 | return f"Error: File not found at '{path}' (resolved to '{file_path}')" 62 | except Exception as e: 63 | return f"Error resolving path '{path}': {e}" 64 | 65 | # --- Parse Diff Blocks --- 66 | try: 67 | diff_blocks = parse_diff_blocks(diff) 68 | if not diff_blocks: 69 | return "Error: No valid SEARCH/REPLACE blocks found in the provided diff." 70 | except ValueError as e: 71 | return f"Error parsing diff: {e}" 72 | except Exception as e: 73 | return f"Unexpected error parsing diff: {e}" 74 | 75 | # --- Read File Content --- 76 | try: 77 | with open(resolved_path, encoding="utf-8") as f: 78 | original_lines = f.readlines() # Read lines into a list 79 | except Exception as e: 80 | return f"Error reading file '{path}' for diff application: {e}" 81 | 82 | # --- Apply Changes (Bottom-Up) --- 83 | modified_lines = list(original_lines) # Create a mutable copy 84 | applied_count = 0 85 | errors = [] 86 | 87 | for start_line, search_content, replace_content in diff_blocks: 88 | start_idx = start_line - 1 # Convert to 0-based index 89 | 90 | if "\n" not in search_content: 91 | if start_idx < len(modified_lines): 92 | current_line = modified_lines[start_idx].rstrip("\r\n") 93 | if current_line == search_content: 94 | modified_lines[start_idx] = replace_content + "\n" 95 | applied_count += 1 96 | print(f"Successfully applied single-line diff block at line {start_line} to {path}") 97 | continue 98 | else: 99 | print(f"Single line mismatch - expected: '{search_content}', actual: '{current_line}'") 100 | 101 | errors.append( 102 | f"Error applying block starting at line {start_line}: SEARCH content does not exactly match file content." 103 | ) 104 | continue 105 | 106 | # Multi-line processing 107 | search_lines = search_content.splitlines() 108 | num_search_lines = len(search_lines) 109 | 110 | # Check bounds 111 | if start_idx < 0 or start_idx + num_search_lines > len(modified_lines): 112 | errors.append( 113 | f"Error applying block starting at line {start_line}: Line range [{start_line}-{start_line + num_search_lines - 1}] is out of bounds (file has {len(modified_lines)} lines)." 114 | ) 115 | continue # Skip this block 116 | 117 | # Extract corresponding lines from file and remove line endings for content comparison 118 | match = True 119 | for i, search_line in enumerate(search_lines): 120 | file_line = modified_lines[start_idx + i].rstrip("\r\n") 121 | if file_line != search_line: 122 | print(f"Line {start_line + i} mismatch: '{file_line}' != '{search_line}'") 123 | match = False 124 | break 125 | 126 | if match: 127 | # Apply replacement 128 | replace_lines = [line + "\n" for line in replace_content.splitlines()] 129 | if not replace_lines: # Handle empty replacement content 130 | replace_lines = [""] 131 | 132 | modified_lines[start_idx : start_idx + num_search_lines] = replace_lines 133 | applied_count += 1 134 | print(f"Successfully applied multi-line diff block starting at line {start_line} to {path}") 135 | else: 136 | errors.append( 137 | f"Error applying block starting at line {start_line}: SEARCH content does not exactly match file content." 138 | ) 139 | 140 | # --- Write Modified Content Back --- 141 | if applied_count > 0 and not errors: # Only write if at least one block applied and no errors occurred 142 | try: 143 | with open(resolved_path, "w", encoding="utf-8") as f: 144 | f.writelines(modified_lines) 145 | return f"Successfully applied {applied_count} diff block(s) to '{path}'." 146 | except Exception as e: 147 | return f"Error writing modified content back to '{path}' after applying diffs: {e}" 148 | elif errors: 149 | error_summary = "\n".join(errors) 150 | return f"Failed to apply diff to '{path}'. {applied_count} block(s) applied before encountering errors:\n{error_summary}" 151 | else: # No blocks applied (e.g., all failed matching) 152 | return f"Failed to apply diff to '{path}'. No matching SEARCH blocks found or all blocks failed." 153 | 154 | 155 | APPLY_DIFF_TOOL_DEF: ChatCompletionToolParam = { 156 | "type": "function", 157 | "function": { 158 | "name": "apply_diff", 159 | "description": "Apply a specific change to a file using a search/replace block format. The SEARCH block must exactly match existing content.", 160 | "parameters": { 161 | "type": "object", 162 | "properties": { 163 | "path": { 164 | "type": "string", 165 | "description": "The relative path of the file to modify.", 166 | }, 167 | "diff": { 168 | "type": "string", 169 | "description": """A string defining the changes in SEARCH/REPLACE block format. 170 | 171 | **EXACT FORMAT REQUIRED:** 172 | ``` 173 | <<<<<<< SEARCH 174 | :start_line:LINE_NUMBER 175 | ------- 176 | EXACT_CONTENT_TO_FIND 177 | ======= 178 | NEW_CONTENT_TO_REPLACE_WITH 179 | >>>>>>> REPLACE 180 | ``` 181 | 182 | **CRITICAL RULES:** 183 | 1. Must start with `<<<<<<< SEARCH` (exactly 7 < symbols + space + SEARCH) 184 | 2. Next line: `:start_line:NUMBER` where NUMBER is the 1-based line number where SEARCH content starts 185 | 3. Separator: At least 2 hyphens `--` or more `-------` on their own line 186 | 4. EXACT_CONTENT_TO_FIND: Must match the file content character-for-character (including whitespace) 187 | 5. Separator: At least 2 equals `==` or more `=======` on their own line 188 | 6. NEW_CONTENT_TO_REPLACE_WITH: The replacement content 189 | 7. Must end with `>>>>>>> REPLACE` (exactly 7 > symbols + space + REPLACE) 190 | 191 | **EXAMPLE - Single line change:** 192 | ``` 193 | <<<<<<< SEARCH 194 | :start_line:5 195 | ------- 196 | import os 197 | ======= 198 | import os 199 | from pathlib import Path 200 | >>>>>>> REPLACE 201 | ``` 202 | 203 | **EXAMPLE - Multi-line change:** 204 | ``` 205 | <<<<<<< SEARCH 206 | :start_line:10 207 | ------- 208 | def old_function(): 209 | return "old" 210 | ======= 211 | def new_function(): 212 | return "new" 213 | # Added comment 214 | >>>>>>> REPLACE 215 | ``` 216 | 217 | **IMPORTANT NOTES:** 218 | - The SEARCH content must match the file EXACTLY (same indentation, spaces, etc.) 219 | - Line numbers are 1-based (first line = 1, not 0) 220 | - Multiple blocks can be concatenated in one diff string 221 | - Each block is processed independently""", 222 | }, 223 | }, 224 | "required": ["path", "diff"], 225 | }, 226 | }, 227 | } 228 | -------------------------------------------------------------------------------- /README_ZH.md: -------------------------------------------------------------------------------- 1 |

2 | Codexy Logo 3 |

4 | 5 |

Codexy

6 |

一个在终端中运行的轻量级编码助手(OpenAI Codex CLI Python 版本)

7 | 8 |

9 | 中文 | English 10 |

11 | 12 |

pip install -U codexy

13 | 14 | ![Codexy demo GIF](./assets/codexy-demo.gif) 15 | 16 | ![Codexy demo 2 GIF](./assets/codexy-demo-2.gif) 17 | 18 | --- 19 | 20 |
21 | 目录 22 | 23 | - [原始 TypeScript 版本](#原始-typescript-版本) 24 | - [实验性技术免责声明](#实验性技术免責声明) 25 | - [快速开始](#快速开始) 26 | - [为何选择 Codexy?](#为何选择-codexy) 27 | - [安全模型与权限](#安全模型与权限) 28 | - [系统要求](#系统要求) 29 | - [CLI 参考](#cli-参考) 30 | - [配置](#配置) 31 | - [项目文档 (Project Docs)](#项目文档-project-docs) 32 | - [贡献](#贡献) 33 | - [许可证](#许可证) 34 | - [零数据保留 (ZDR) 组织限制](#零数据保留-zdr-组织限制) 35 | 36 |
37 | 38 | --- 39 | 40 | ## 原始 TypeScript 版本 41 | 42 | 本项目是原始 OpenAI Codex CLI 的 Python 重新实现,原版使用 TypeScript 编写。您可以在这里找到原始仓库: 43 | 44 | [openai/codex (TypeScript)](https://github.com/openai/codex) 45 | 46 | 这个 Python 版本旨在利用 Python 工具和库提供类似的功能。 47 | 48 | ## 实验性技术免責声明 49 | 50 | Codexy (Codex CLI 的 Python 实现) 是一个正在积极开发中的实验性项目。它尚未稳定,可能包含错误、未完成的功能,或可能发生破坏性更改。我们正在与社区一起公开构建它,并欢迎: 51 | 52 | - 错误报告 53 | - 功能请求 54 | - 拉取请求 (Pull Requests) 55 | - 积极的反馈 56 | 57 | 请通过提交 Issues 或 PRs 帮助我们改进(参见贡献部分)! 58 | 59 | ## 快速开始 60 | 61 | 使用 pip 全局安装: 62 | 63 | ```shell 64 | pip install -U codexy 65 | ``` 66 | 67 | 接下来,设置您的 OpenAI API 密钥作为环境变量: 68 | 69 | ```shell 70 | export OPENAI_API_KEY="你的-API-密钥" 71 | # 可选:为 OpenAI API 设置自定义基础 URL(例如,用于代理或自托管服务) 72 | # export OPENAI_BASE_URL="你的自定义基础URL" 73 | # 可选:设置 API 请求超时时间(毫秒) 74 | # export OPENAI_TIMEOUT_MS="300000" # 例如:300000 代表 5 分钟 75 | ``` 76 | 77 | > **注意:** 此命令仅为当前终端会话设置密钥。要使其永久生效,请将 `export` 行添加到您的 shell 配置文件中(例如 `~/.zshrc`、`~/.bashrc`)。 78 | > 79 | > **提示:** 您也可以将 API 密钥和其他环境变量放在项目根目录下的 `.env` 文件中: 80 | > 81 | > ```env 82 | > OPENAI_API_KEY=你的-API-密钥 83 | > # 可选: 84 | > # OPENAI_BASE_URL=你的自定义基础URL 85 | > # OPENAI_TIMEOUT_MS=300000 86 | > ``` 87 | > 88 | > CLI 将使用 `python-dotenv` 自动加载 `.env` 文件中的变量。 89 | 90 | 以交互模式运行: 91 | 92 | ```shell 93 | codexy 94 | ``` 95 | 96 | 或者,将提示作为输入运行(并可选择 `Full Auto` 模式): 97 | 98 | ```shell 99 | codexy "请解释一下这个代码库" 100 | ``` 101 | 102 | ```shell 103 | # 请谨慎使用自动批准模式 104 | codexy --approval-mode full-auto "创建一个最炫酷的待办事项列表应用" 105 | ``` 106 | 107 | 就是这样 – Codexy 将与 OpenAI API 交互,建议文件更改或命令,并(根据您的批准模式)执行它们。 108 | 109 | --- 110 | 111 | ## 为何选择 Codexy? 112 | 113 | Codexy 旨在将原始 Codex CLI 的强大功能带入 Python 生态系统。它专为偏好 Python 工具或希望将智能编码代理能力集成到 Python 工作流中的开发人员而构建。 114 | 115 | - **熟悉的 Python 技术栈:** 使用常见的 Python 库,如 `click`、`textual`、`openai`、`httpx`。 116 | - **终端原生:** 为习惯在终端中工作的开发人员设计。 117 | - **智能代理能力:** 理解提示,与代码交互,建议文件编辑,并能执行命令。 118 | - **可配置的批准:** 控制代理拥有的自主权级别。 119 | - **开源:** 贡献其发展并了解其工作原理。 120 | 121 | --- 122 | 123 | ## 安全模型与权限 124 | 125 | Codexy 允许您通过 `--approval-mode` 标志(或配置文件)决定代理的自主程度。这些模式决定了哪些操作需要您的明确确认: 126 | 127 | | 模式 | 代理无需询问即可执行的操作 | 仍需批准的操作 | 备注 | 128 | | ------------------------- | ------------------------------------------------------ | ------------------------------------------- | ------------------------------------------------------------------------ | 129 | | **Suggest**
(默认) | • 读取文件
• 运行已知的安全只读命令¹ | • **所有** 文件编辑/写入
• Shell 命令 | 最安全的模式,大多数操作都需要确认。 | 130 | | **Auto Edit** | • 读取文件
• 应用文件编辑/写入
• 安全的只读操作¹ | • Shell 命令 | 自动修改文件,但在运行命令前会询问。 | 131 | | **Full Auto** | • 读/写文件
• 运行 Shell 命令²
• 安全的只读操作¹ | – | 尝试自动批准,**但沙盒功能尚未实现**。 | 132 | | **Dangerous Auto** | • 读/写文件
• 运行 Shell 命令 | – | **不安全**。无需沙盒自动批准所有操作。请极其谨慎使用。 | 133 | 134 | ¹ *已知的安全只读命令包括 `ls`、`cat`、`pwd`、`git status` 等。可通过配置文件中的 `safe_commands` 进行用户配置。* 135 | ² *虽然 `full-auto` 旨在实现沙盒执行,但此 Python 版本**尚未实现沙盒功能**。命令将直接运行。* 136 | 137 | **⚠️ 重要提示:** Python 版本 (`codexy`) 目前**缺少**原始 TypeScript 版本中的平台特定沙盒机制(如 macOS Seatbelt 或 Docker/iptables)。在 `full-auto` 模式下,命令会在您的系统上直接执行,工具本身不会施加网络或文件系统限制。`dangerous-auto` 模式明确地在非沙盒环境中运行所有操作。请极其谨慎地使用自动批准模式,尤其是在不受信任的环境或未受版本控制的仓库中。 138 | 139 | 如果您在 `auto-edit`、`full-auto` 或 `dangerous-auto` 模式下启动,并且当前目录未被 Git 跟踪,Codexy 会显示警告/确认信息,以便您通过版本控制获得安全保障。 140 | 141 | --- 142 | 143 | ## 系统要求 144 | 145 | | 要求 | 详情 | 146 | | ------------ | ------------------------------------------ | 147 | | 操作系统 | Linux, macOS, Windows (跨平台) | 148 | | Python | **3.10 或更高版本** (见 `pyproject.toml`) | 149 | | Pip | 用于安装 | 150 | | Git (可选) | 推荐用于安全保障 | 151 | | 依赖项 | `click`, `textual`, `openai` 等 | 152 | 153 | --- 154 | 155 | ## CLI 参考 156 | 157 | ``` 158 | 用法: codexy [OPTIONS] [PROMPT] 159 | 160 | Codex 代理的交互式 REPL。 161 | 162 | codexy 交互式 REPL 163 | codexy "..." 交互式 REPL 的初始提示 164 | 165 | 选项: 166 | --model, -m TEXT 要使用的模型 (例如, o4-mini)。 167 | --image, -i PATH 要包含作为输入的图像文件的路径。 (未完全实现) 168 | --view, -v PATH 检查先前保存的 rollout 而不是开始会话。 (未实现) 169 | --quiet, -q 非交互模式,仅打印助手最终输出。(未实现) 170 | --config, -c 在编辑器中打开指令文件。 171 | --writable-root, -w PATH full-auto 模式下的可写文件夹 (未来用于沙盒)。 172 | --approval-mode, -a [suggest|auto-edit|full-auto|dangerous-auto] 173 | 覆盖批准策略。 174 | --auto-edit --approval-mode=auto-edit 的别名。 175 | --full-auto --approval-mode=full-auto 的别名。 176 | --no-project-doc 不自动包含仓库的 codex.md。 177 | --project-doc PATH 包含一个额外的 markdown 文件作为上下文。 178 | --full-stdout 不截断命令的 stdout/stderr 输出。 179 | --notify 启用桌面通知。(未实现) 180 | --flex-mode 启用 "flex-mode" 服务层级。(未实现) 181 | --dangerously-auto-approve-everything 182 | --approval-mode=dangerous-auto 的别名。 183 | --full-context, -f 以 "full-context" 模式启动。(未实现) 184 | --version 显示版本信息并退出。 185 | -h, --help 显示此帮助消息并退出。 186 | 187 | 命令: 188 | completion 生成 shell 自动补全脚本。 189 | ``` 190 | 191 | **应用内命令 (TUI 中):** 192 | 193 | | 命令 | 描述 | 194 | | --------------- | --------------------------------------- | 195 | | `/help` | 显示命令和快捷键帮助 | 196 | | `/model` | 切换 LLM 模型 (若在首次响应前) | 197 | | `/approval` | 切换自动批准模式 | 198 | | `/history` | 显示当前会话的命令历史 | 199 | | `/clear` | 清除屏幕和当前对话上下文 | 200 | | `/clearhistory` | 清除磁盘上的命令历史文件 | 201 | | `/bug` | 在浏览器中打开预填信息的 Bug 报告 (未实现) | 202 | | `/compact` | 将上下文压缩成摘要 (未实现) | 203 | | `q` / `exit` | 退出应用 | 204 | 205 | --- 206 | 207 | ## 配置 208 | 209 | Codexy 在 `~/.codexy/` 目录中查找配置文件 (注意目录名是 `codexy`)。 210 | 211 | - **`~/.codexy/config.yaml`** (或 `.yml`, `.json`): 主要配置文件。 212 | - **`~/.codexy/instructions.md`**: 全局自定义指令文件。 213 | - **`~/.codexy/history.json`**: 存储命令历史。 214 | 215 | **`config.yaml` 示例:** 216 | 217 | ```yaml 218 | # ~/.codexy/config.yaml 219 | model: o4-mini # 默认使用的模型 220 | approval_mode: suggest # suggest | auto-edit | full-auto | dangerous-auto 221 | full_auto_error_mode: ask-user # ask-user | ignore-and-continue 222 | notify: false # 启用桌面通知 (未完全实现) 223 | history: 224 | max_size: 1000 225 | save_history: true 226 | safe_commands: # 在 'suggest' 模式下可安全自动批准的命令 227 | - git status 228 | - ls -la 229 | ``` 230 | 231 | **`instructions.md` 示例:** 232 | 233 | ```markdown 234 | - Python 变量始终使用 snake_case。 235 | - 为所有函数定义添加类型提示。 236 | - 优先使用 f-string 进行格式化。 237 | ``` 238 | 239 | ### 记忆压缩 240 | 241 | 为了帮助管理对话历史并防止超过模型的上下文长度限制,Codexy 包含了记忆压缩功能。启用后,它会自动压缩对话历史中较旧的部分。 242 | 243 | **工作原理:** 244 | 245 | * 初始系统提示(来自 `instructions.md` 或项目文档)始终保留(如果存在)。 246 | * 对话中可配置数量的最新消息保持未压缩状态。 247 | * 初始系统提示和最新消息之间的消息将被替换为单个系统通知,表明历史的一部分已被汇总(例如:`[System: X previous message(s) were summarized due to context length constraints.]`)。 248 | 249 | **配置:** 250 | 251 | 这些设置在您的 `~/.codexy/config.json` 或 `~/.codexy/config.yaml` 文件的 `memory` 对象中配置。 252 | 253 | * `enable_compression` (布尔值): 254 | * 设置为 `true` 以启用记忆压缩功能。 255 | * 默认值: `false`。 256 | * `compression_threshold_factor` (浮点数): 257 | * 介于 0.0 和 1.0 之间的值。当当前对话历史的估计令牌数超过此因子乘以模型的最大上下文窗口大小时,将触发压缩。 258 | * 例如,如果模型的最大令牌数为 4096,此因子为 `0.8`,则当历史超过大约 3277 个令牌时将尝试压缩。 259 | * 默认值: `0.8`。 260 | * `keep_recent_messages` (整数): 261 | * 在对话末尾始终保持未压缩的最新消息数量。 262 | * 默认值: `5`。 263 | 264 | **`config.json` 示例:** 265 | 266 | ```json 267 | { 268 | "model": "o4-mini", 269 | "memory": { 270 | "enable_compression": true, 271 | "compression_threshold_factor": 0.75, 272 | "keep_recent_messages": 10 273 | } 274 | // ... 其他设置 275 | } 276 | ``` 277 | 278 | **`config.yaml` 示例:** 279 | 280 | ```yaml 281 | model: o4-mini 282 | memory: 283 | enable_compression: true 284 | compression_threshold_factor: 0.75 285 | keep_recent_messages: 10 286 | # ... 其他设置 287 | ``` 288 | 289 | --- 290 | 291 | ## 项目文档 (Project Docs) 292 | 293 | 与原始 Codex CLI 类似,Codexy 可以从 `codex.md` (或 `.codex.md`, `CODEX.md`) 文件加载项目特定的上下文。 294 | 295 | 它首先搜索当前目录,然后向上遍历直到找到 Git 根目录 (`.git` 目录)。如果找到该文件,其内容将附加到您的全局 `instructions.md` 文件之后。 296 | 297 | 使用 `--no-project-doc` 或设置环境变量 `CODEXY_DISABLE_PROJECT_DOC=1` 可禁用此行为。 298 | 299 | --- 300 | 301 | ## 贡献 302 | 303 | 欢迎贡献!请参考主项目的 [CONTRIBUTING 指南](https://github.com/openai/codex/blob/main/README.md#contributing)。 304 | 305 | 针对 Python 开发: 306 | 307 | - 本项目使用 [PDM](https://pdm-project.org/) 进行依赖管理。 308 | - 安装依赖: `pdm install -G:dev` 309 | - 运行测试: `pdm run pytest` 310 | - 格式化代码: `pdm run ruff format .` 311 | - 代码检查: `pdm run ruff check .` 312 | 313 | --- 314 | 315 | ## 许可证 316 | 317 | 本项目使用 Apache-2.0 许可证。请参阅 [LICENSE](./LICENSE) 文件。 318 | 319 | --- 320 | 321 | ## 零数据保留 (ZDR) 组织限制 322 | 323 | > **注意:** Codexy (Python) 目前继承了与原始 Codex CLI 相同的限制,并且**不支持**启用了[零数据保留 (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) 的 OpenAI 组织,因为它依赖于与 ZDR 不兼容的 API 功能。如果您的组织使用 ZDR,您可能会遇到 400 错误。 324 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2025 OpenAI 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /tests/tools/test_apply_diff_tool.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | 4 | import pytest 5 | 6 | sys.path.insert(0, str(Path(__file__).parent.parent.parent)) 7 | 8 | from codexy.tools.apply_diff_tool import apply_diff_tool, parse_diff_blocks 9 | 10 | # --- Test Fixtures --- 11 | 12 | 13 | @pytest.fixture 14 | def test_dir(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Path: 15 | """Creates a temporary directory for the test and sets PROJECT_ROOT within the tool module.""" 16 | monkeypatch.chdir(tmp_path) 17 | # Explicitly get the module from sys.modules to avoid ambiguity 18 | module_to_patch = sys.modules["codexy.tools.apply_diff_tool"] 19 | monkeypatch.setattr(module_to_patch, "PROJECT_ROOT", tmp_path) 20 | print(f"PROJECT_ROOT set to: {tmp_path}") 21 | return tmp_path 22 | 23 | 24 | # --- parse_diff_blocks Tests --- 25 | 26 | 27 | def test_parse_diff_blocks_single_block(): 28 | """Test parsing a diff string with a single block.""" 29 | diff_text = """<<<<<<< SEARCH 30 | :start_line:10 31 | ------- 32 | original line 33 | ======= 34 | modified line 35 | >>>>>>> REPLACE""" 36 | 37 | blocks = parse_diff_blocks(diff_text) 38 | 39 | assert len(blocks) == 1 40 | assert blocks[0][0] == 10 41 | assert blocks[0][1].rstrip("\n") == "original line" 42 | assert blocks[0][2].rstrip("\n") == "modified line" 43 | 44 | 45 | def test_parse_diff_blocks_multiple_blocks(): 46 | """Test parsing a diff string with multiple blocks.""" 47 | diff_text = """<<<<<<< SEARCH 48 | :start_line:5 49 | ------- 50 | first block original 51 | ======= 52 | first block modified 53 | >>>>>>> REPLACE 54 | <<<<<<< SEARCH 55 | :start_line:10 56 | ------- 57 | second block original 58 | ======= 59 | second block modified 60 | >>>>>>> REPLACE""" 61 | 62 | blocks = parse_diff_blocks(diff_text) 63 | 64 | # Blocks should be sorted by line number in descending order 65 | assert len(blocks) == 2 66 | assert blocks[0][0] == 10 # Higher line number first 67 | assert blocks[0][1].rstrip("\n") == "second block original" 68 | assert blocks[0][2].rstrip("\n") == "second block modified" 69 | assert blocks[1][0] == 5 70 | assert blocks[1][1].rstrip("\n") == "first block original" 71 | assert blocks[1][2].rstrip("\n") == "first block modified" 72 | 73 | 74 | def test_parse_diff_blocks_multiline(): 75 | """Test parsing a diff string with multi-line blocks.""" 76 | diff_text = """<<<<<<< SEARCH 77 | :start_line:3 78 | ------- 79 | line 1 80 | line 2 81 | line 3 82 | ======= 83 | new line 1 84 | new line 2 85 | >>>>>>> REPLACE""" 86 | 87 | blocks = parse_diff_blocks(diff_text) 88 | 89 | assert len(blocks) == 1 90 | assert blocks[0][0] == 3 91 | assert blocks[0][1].rstrip("\n").split("\n") == ["line 1", "line 2", "line 3"] 92 | assert blocks[0][2].rstrip("\n").split("\n") == ["new line 1", "new line 2"] 93 | 94 | 95 | def test_parse_diff_blocks_empty_search(): 96 | """Test parsing a diff with empty search content.""" 97 | diff_text = """<<<<<<< SEARCH 98 | :start_line:8 99 | ------- 100 | ======= 101 | added content 102 | >>>>>>> REPLACE""" 103 | 104 | blocks = parse_diff_blocks(diff_text) 105 | 106 | assert len(blocks) == 1 107 | assert blocks[0][0] == 8 108 | assert blocks[0][1].rstrip("\n") == "" 109 | assert blocks[0][2].rstrip("\n") == "added content" 110 | 111 | 112 | def test_parse_diff_blocks_empty_replace(): 113 | """Test parsing a diff with empty replace content.""" 114 | diff_text = """<<<<<<< SEARCH 115 | :start_line:8 116 | ------- 117 | content to delete 118 | ======= 119 | >>>>>>> REPLACE""" 120 | 121 | blocks = parse_diff_blocks(diff_text) 122 | 123 | assert len(blocks) == 1 124 | assert blocks[0][0] == 8 125 | assert blocks[0][1].rstrip("\n") == "content to delete" 126 | assert blocks[0][2].rstrip("\n") == "" 127 | 128 | 129 | def test_parse_diff_blocks_whitespace_variations(): 130 | """Test parsing a diff with whitespace variations in markers.""" 131 | diff_text = """ <<<<<<< SEARCH 132 | :start_line:15 133 | ------- 134 | text 135 | ======= 136 | new text 137 | >>>>>>> REPLACE """ 138 | 139 | blocks = parse_diff_blocks(diff_text) 140 | 141 | assert len(blocks) == 1 142 | assert blocks[0][0] == 15 143 | assert blocks[0][1].rstrip("\n") == "text" 144 | assert blocks[0][2].rstrip("\n") == "new text" 145 | 146 | 147 | def test_parse_diff_blocks_crlf_normalization(): 148 | """Test normalization of CRLF line endings.""" 149 | diff_text = """<<<<<<< SEARCH 150 | :start_line:20 151 | ------- 152 | line 1\r\nline 2 153 | ======= 154 | new line 1\r\nnew line 2 155 | >>>>>>> REPLACE""" 156 | 157 | blocks = parse_diff_blocks(diff_text) 158 | 159 | assert len(blocks) == 1 160 | assert blocks[0][0] == 20 161 | # The \r\n sequence is escaped but should be normalized to \n in the output 162 | assert blocks[0][1].rstrip("\n") == "line 1\nline 2" 163 | assert blocks[0][2].rstrip("\n") == "new line 1\nnew line 2" 164 | 165 | 166 | def test_parse_diff_blocks_empty_diff(): 167 | """Test parsing an empty diff string.""" 168 | diff_text = "" 169 | 170 | blocks = parse_diff_blocks(diff_text) 171 | 172 | assert len(blocks) == 0 173 | 174 | 175 | def test_parse_diff_blocks_invalid_diff(): 176 | """Test parsing an invalid diff string raises ValueError.""" 177 | diff_text = "This is not a valid diff format" 178 | 179 | with pytest.raises(ValueError): 180 | parse_diff_blocks(diff_text) 181 | 182 | 183 | # --- apply_diff_tool Tests --- 184 | 185 | 186 | def test_apply_diff_tool_single_line(test_dir: Path): 187 | """Test applying a single-line diff.""" 188 | file_path = test_dir / "test_file.txt" 189 | file_path.write_text("Line 1\nLine 2\nLine 3\n", encoding="utf-8") 190 | 191 | diff = """<<<<<<< SEARCH 192 | :start_line:2 193 | ------- 194 | Line 2 195 | ======= 196 | Modified Line 2 197 | >>>>>>> REPLACE""" 198 | 199 | result = apply_diff_tool("test_file.txt", diff) 200 | 201 | assert "Successfully" in result 202 | assert "1 diff block(s)" in result 203 | assert file_path.read_text(encoding="utf-8") == "Line 1\nModified Line 2\nLine 3\n" 204 | 205 | 206 | def test_apply_diff_tool_multiple_blocks(test_dir: Path): 207 | """Test applying multiple diff blocks to a file.""" 208 | file_path = test_dir / "multi_block.txt" 209 | file_path.write_text("Line 1\nLine 2\nLine 3\nLine 4\nLine 5\n", encoding="utf-8") 210 | 211 | diff = """<<<<<<< SEARCH 212 | :start_line:2 213 | ------- 214 | Line 2 215 | ======= 216 | Modified Line 2 217 | >>>>>>> REPLACE 218 | <<<<<<< SEARCH 219 | :start_line:4 220 | ------- 221 | Line 4 222 | ======= 223 | Modified Line 4 224 | >>>>>>> REPLACE""" 225 | 226 | result = apply_diff_tool("multi_block.txt", diff) 227 | 228 | assert "Successfully" in result 229 | assert "2 diff block(s)" in result 230 | assert file_path.read_text(encoding="utf-8") == "Line 1\nModified Line 2\nLine 3\nModified Line 4\nLine 5\n" 231 | 232 | 233 | def test_apply_diff_tool_multiline_block(test_dir: Path): 234 | """Test applying a multi-line diff block.""" 235 | file_path = test_dir / "multiline.txt" 236 | file_path.write_text("Title\nParagraph 1\nParagraph 2\nParagraph 3\nFooter\n", encoding="utf-8") 237 | 238 | diff = """<<<<<<< SEARCH 239 | :start_line:2 240 | ------- 241 | Paragraph 1 242 | Paragraph 2 243 | Paragraph 3 244 | ======= 245 | New Paragraph A 246 | New Paragraph B 247 | >>>>>>> REPLACE""" 248 | 249 | result = apply_diff_tool("multiline.txt", diff) 250 | 251 | assert "Successfully" in result 252 | assert file_path.read_text(encoding="utf-8") == "Title\nNew Paragraph A\nNew Paragraph B\nFooter\n" 253 | 254 | 255 | def test_apply_diff_tool_no_match(test_dir: Path): 256 | """Test applying a diff that doesn't match the file content.""" 257 | file_path = test_dir / "no_match.txt" 258 | file_path.write_text("Line 1\nLine X\nLine 3\n", encoding="utf-8") 259 | 260 | diff = """<<<<<<< SEARCH 261 | :start_line:2 262 | ------- 263 | Line 2 264 | ======= 265 | Modified Line 2 266 | >>>>>>> REPLACE""" 267 | 268 | result = apply_diff_tool("no_match.txt", diff) 269 | 270 | assert "Failed" in result 271 | assert "SEARCH content does not exactly match" in result 272 | # File should remain unchanged 273 | assert file_path.read_text(encoding="utf-8") == "Line 1\nLine X\nLine 3\n" 274 | 275 | 276 | def test_apply_diff_tool_out_of_bounds(test_dir: Path): 277 | """Test applying a diff with out-of-bounds line number.""" 278 | file_path = test_dir / "short.txt" 279 | file_path.write_text("Line 1\nLine 2\n", encoding="utf-8") 280 | 281 | diff = """<<<<<<< SEARCH 282 | :start_line:10 283 | ------- 284 | Non-existent line 285 | ======= 286 | New line 287 | >>>>>>> REPLACE""" 288 | 289 | result = apply_diff_tool("short.txt", diff) 290 | 291 | assert "Failed" in result 292 | assert "out of bounds" in result 293 | # File should remain unchanged 294 | assert file_path.read_text(encoding="utf-8") == "Line 1\nLine 2\n" 295 | 296 | 297 | def test_apply_diff_tool_nonexistent_file(test_dir: Path): 298 | """Test applying a diff to a non-existent file.""" 299 | diff = """<<<<<<< SEARCH 300 | :start_line:1 301 | ------- 302 | Some content 303 | ======= 304 | New content 305 | >>>>>>> REPLACE""" 306 | 307 | result = apply_diff_tool("nonexistent.txt", diff) 308 | 309 | assert "Error" in result 310 | assert "File not found" in result 311 | 312 | 313 | def test_apply_diff_tool_file_outside_project(test_dir: Path, monkeypatch: pytest.MonkeyPatch): 314 | """Test applying a diff to a file outside the project root.""" 315 | # Create a file in the parent directory 316 | outside_file = test_dir.parent / "outside.txt" 317 | outside_file.write_text("Content", encoding="utf-8") 318 | 319 | diff = """<<<<<<< SEARCH 320 | :start_line:1 321 | ------- 322 | Content 323 | ======= 324 | New content 325 | >>>>>>> REPLACE""" 326 | 327 | # Use '../outside.txt' to attempt to access file outside project 328 | result = apply_diff_tool("../outside.txt", diff) 329 | 330 | assert "Error" in result 331 | assert "outside of project root" in result 332 | 333 | 334 | def test_apply_diff_tool_empty_path(): 335 | """Test applying a diff with an empty path.""" 336 | diff = """<<<<<<< SEARCH 337 | :start_line:1 338 | ------- 339 | Content 340 | ======= 341 | New content 342 | >>>>>>> REPLACE""" 343 | 344 | result = apply_diff_tool("", diff) 345 | 346 | assert "Error" in result 347 | assert "'path' argument is required" in result 348 | 349 | 350 | def test_apply_diff_tool_empty_diff(test_dir: Path): 351 | """Test applying an empty diff.""" 352 | file_path = test_dir / "empty_diff.txt" 353 | file_path.write_text("Content", encoding="utf-8") 354 | 355 | result = apply_diff_tool("empty_diff.txt", "") 356 | 357 | assert "Error" in result 358 | assert "'diff' argument is required" in result 359 | 360 | 361 | def test_apply_diff_tool_partial_success(test_dir: Path): 362 | """Test applying a diff with one successful block and one failed block.""" 363 | file_path = test_dir / "partial.txt" 364 | file_path.write_text("Line 1\nLine 2\nLine 3\n", encoding="utf-8") 365 | 366 | diff = """<<<<<<< SEARCH 367 | :start_line:1 368 | ------- 369 | Line 1 370 | ======= 371 | Modified Line 1 372 | >>>>>>> REPLACE 373 | <<<<<<< SEARCH 374 | :start_line:3 375 | ------- 376 | Wrong Line 3 377 | ======= 378 | Modified Line 3 379 | >>>>>>> REPLACE""" 380 | 381 | result = apply_diff_tool("partial.txt", diff) 382 | 383 | # Should report failures but still apply good changes 384 | assert "Failed" in result 385 | assert "SEARCH content does not exactly match" in result 386 | # File should remain unchanged since we have errors 387 | assert file_path.read_text(encoding="utf-8") == "Line 1\nLine 2\nLine 3\n" 388 | -------------------------------------------------------------------------------- /codexy/cli/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import sys 4 | import traceback 5 | from pathlib import Path 6 | 7 | import click 8 | from rich.console import Console 9 | 10 | from ..approvals import ApprovalMode 11 | from ..config import DEFAULT_FULL_STDOUT, INSTRUCTIONS_FILEPATH, AppConfig, load_config 12 | from ..tui import CodexTuiApp 13 | from .completion_scripts import _COMPLETION_SCRIPTS 14 | 15 | stderr_console = Console(stderr=True) 16 | 17 | 18 | @click.command(context_settings={"help_option_names": ["-h", "--help"]}) 19 | @click.version_option(package_name="codexy") 20 | @click.option("--model", "-m", help="Model to use for completions (e.g., o4-mini).") 21 | @click.option( 22 | "--image", 23 | "-i", 24 | multiple=True, 25 | type=click.Path(exists=True, dir_okay=False), 26 | help="Path(s) to image files to include as input.", 27 | ) 28 | @click.option( 29 | "--view", 30 | "-v", 31 | type=click.Path(exists=True, dir_okay=False), 32 | help="Inspect a previously saved rollout instead of starting a session.", 33 | ) 34 | @click.option("--quiet", "-q", is_flag=True, help="Non-interactive mode that only prints the assistant's final output.") 35 | @click.option("--config", "-c", is_flag=True, help="Open the instructions file in your editor.") 36 | @click.option( 37 | "--writable-root", 38 | "-w", 39 | multiple=True, 40 | type=click.Path(file_okay=False), 41 | help="Writable folder for sandbox in full-auto mode (can be specified multiple times).", 42 | ) 43 | @click.option( 44 | "--approval-mode", 45 | "-a", 46 | type=click.Choice([mode.value for mode in ApprovalMode]), # Use enum values for choices 47 | default=None, 48 | help="Override the approval policy.", 49 | ) 50 | @click.option("--auto-edit", is_flag=True, help="Automatically approve file edits; still prompt for commands.") 51 | @click.option("--full-auto", is_flag=True, help="Automatically approve edits and commands when executed in the sandbox.") 52 | @click.option("--no-project-doc", is_flag=True, help="Do not automatically include the repository's 'codex.md'.") 53 | @click.option( 54 | "--project-doc", 55 | type=click.Path(exists=True, dir_okay=False), 56 | help="Include an additional markdown file as context.", 57 | ) 58 | @click.option( 59 | "--full-stdout", 60 | is_flag=True, 61 | default=DEFAULT_FULL_STDOUT, 62 | help="Do not truncate stdout/stderr from command outputs.", # Updated help text 63 | ) 64 | @click.option( 65 | "--notify", 66 | is_flag=True, 67 | default=None, # Default to None to distinguish between not set and set to False 68 | help="Enable desktop notifications for responses.", 69 | ) 70 | @click.option( 71 | "--flex-mode", 72 | is_flag=True, 73 | help='Enable "flex-mode" service tier (only supported by o3, o4-mini).', 74 | ) 75 | @click.option( 76 | "--dangerously-auto-approve-everything", 77 | is_flag=True, 78 | help="Skip all confirmation prompts and execute commands without sandboxing (DANGEROUS).", 79 | ) 80 | @click.option("--full-context", "-f", is_flag=True, help='Launch in "full-context" mode.') 81 | @click.argument("prompt", required=False) 82 | def codexy( 83 | prompt: str | None, 84 | model: str | None, 85 | image: tuple[str, ...], 86 | view: str | None, 87 | quiet: bool, 88 | config: bool, 89 | writable_root: tuple[str, ...], 90 | approval_mode: str | None, 91 | auto_edit: bool, 92 | full_auto: bool, 93 | no_project_doc: bool, 94 | project_doc: str | None, 95 | full_stdout: bool, 96 | notify: bool | None, 97 | flex_mode: bool, 98 | dangerously_auto_approve_everything: bool, 99 | full_context: bool, 100 | ): 101 | """Interactive REPL for Codex agent. 102 | 103 | codexy Interactive REPL 104 | codexy "..." Initial prompt for interactive REPL 105 | """ 106 | # --- Completion script generation --- 107 | if len(sys.argv) > 1 and sys.argv[1] == "completion": 108 | if len(sys.argv) > 2: 109 | shell = sys.argv[2] 110 | generate_completion(shell) 111 | else: 112 | generate_completion(None) # Ask user or default 113 | return 114 | 115 | # --- Execute main application logic --- 116 | run_repl( 117 | prompt=prompt, 118 | model=model, 119 | image=image, # Pass tuple directly 120 | view=view, 121 | quiet=quiet, 122 | handle_config_flag=config, # Renamed to avoid conflict with config dict 123 | writable_root=writable_root, # Pass tuple directly 124 | cli_approval_mode=approval_mode, # Renamed to avoid conflict 125 | auto_edit=auto_edit, 126 | full_auto=full_auto, 127 | no_project_doc=no_project_doc, 128 | project_doc=project_doc, 129 | full_stdout=full_stdout, # Pass the flag value 130 | notify=notify, 131 | flex_mode=flex_mode, # Pass the flag value 132 | dangerously_auto_approve_everything=dangerously_auto_approve_everything, 133 | full_context=full_context, 134 | ) 135 | 136 | 137 | def generate_completion(shell: str | None): # Added type hint 138 | """Generate shell completion script.""" 139 | if shell is None: 140 | # Simple prompt if shell not provided 141 | shell = click.prompt("Which shell? (bash, zsh, fish)", type=str) 142 | 143 | assert isinstance(shell, str) 144 | 145 | script = _COMPLETION_SCRIPTS.get(shell) 146 | if script: 147 | click.echo(script) 148 | click.echo(f"\n# To enable completion, add the above to your shell's config file (e.g., ~/.{shell}rc)") 149 | click.echo("# Or follow instructions specific to your shell completion setup.") 150 | else: 151 | click.echo(f"Error: Unsupported shell '{shell}'. Choose from bash, zsh, fish.") 152 | 153 | 154 | # Renamed kwargs keys to avoid conflicts and be more explicit 155 | def run_repl( 156 | prompt: str | None, 157 | model: str | None, 158 | image: tuple[str, ...], 159 | view: str | None, 160 | quiet: bool, 161 | handle_config_flag: bool, # Renamed from config 162 | writable_root: tuple[str, ...], 163 | cli_approval_mode: str | None, # Renamed from approval_mode 164 | auto_edit: bool, 165 | full_auto: bool, 166 | no_project_doc: bool, 167 | project_doc: str | None, 168 | full_stdout: bool, # Added parameter 169 | notify: bool | None, 170 | flex_mode: bool, # Added parameter 171 | dangerously_auto_approve_everything: bool, 172 | full_context: bool, 173 | ): 174 | """Run the interactive REPL.""" 175 | 176 | # --- Handle Action Flags (like --config) FIRST --- 177 | if handle_config_flag: 178 | editor = os.environ.get("EDITOR", "notepad" if os.name == "nt" else "vim") 179 | try: 180 | if not INSTRUCTIONS_FILEPATH.exists(): 181 | INSTRUCTIONS_FILEPATH.parent.mkdir(parents=True, exist_ok=True) 182 | INSTRUCTIONS_FILEPATH.touch() 183 | stderr_console.print(f"Created instructions file: {INSTRUCTIONS_FILEPATH}") 184 | else: 185 | stderr_console.print(f"Opening instructions file: {INSTRUCTIONS_FILEPATH}") 186 | # Use subprocess.call for potentially better editor handling 187 | return_code = subprocess.call([editor, str(INSTRUCTIONS_FILEPATH)]) 188 | if return_code != 0: 189 | stderr_console.print(f"[yellow]Editor exited with code {return_code}.[/yellow]") 190 | except FileNotFoundError: 191 | stderr_console.print(f"[bold red]Error: Editor '{editor}' not found. Set the EDITOR environment variable.[/bold red]") 192 | except Exception as e: 193 | stderr_console.print(f"[bold red]Error opening file '{INSTRUCTIONS_FILEPATH}': {e}[/bold red]") 194 | sys.exit(0) # Exit after handling config flag 195 | 196 | # --- Configuration Loading --- 197 | config_options = { 198 | "disable_project_doc": no_project_doc, 199 | "project_doc_path": Path(project_doc) if project_doc else None, 200 | "is_full_context": full_context, 201 | "flex_mode": flex_mode, # Pass to load_config 202 | "full_stdout": full_stdout, # Pass to load_config 203 | } 204 | try: 205 | # Pass runtime flags to load_config 206 | app_config: AppConfig = load_config(cwd=Path.cwd(), **config_options) 207 | 208 | # Override loaded config with CLI flags if provided 209 | if model: 210 | app_config["model"] = model 211 | if notify is not None: # Check for None explicitly 212 | app_config["notify"] = notify 213 | 214 | app_config["full_stdout"] = full_stdout 215 | 216 | # Determine the final approval mode based on CLI flags and config 217 | effective_mode = ApprovalMode(app_config["effective_approval_mode"]) 218 | 219 | # CLI flags override config 220 | if cli_approval_mode: 221 | effective_mode = ApprovalMode(cli_approval_mode) 222 | if auto_edit and effective_mode == ApprovalMode.SUGGEST: 223 | effective_mode = ApprovalMode.AUTO_EDIT 224 | if full_auto and effective_mode in [ApprovalMode.SUGGEST, ApprovalMode.AUTO_EDIT]: 225 | effective_mode = ApprovalMode.FULL_AUTO 226 | if dangerously_auto_approve_everything: # Highest priority override 227 | effective_mode = ApprovalMode.DANGEROUS_AUTO 228 | 229 | # Update the effective mode in the config dict 230 | app_config["effective_approval_mode"] = effective_mode.value 231 | 232 | # Resolve writable roots here instead of in AppConfig 233 | resolved_writable_roots = [str(Path(p).resolve()) for p in writable_root] 234 | app_config["writable_roots"] = resolved_writable_roots 235 | 236 | except Exception as e: 237 | stderr_console.print(f"[bold red]Error loading configuration:[/bold red] {e}") 238 | traceback.print_exc(file=sys.stderr) # Print traceback for debugging 239 | sys.exit(1) 240 | 241 | # --- Handle Other Flags/Modes (like --view, --quiet) --- 242 | if view: 243 | # TODO: Implement viewing rollout file 244 | stderr_console.print(f"Viewing rollout file '{view}' is not implemented yet.") 245 | sys.exit(0) 246 | 247 | if quiet: 248 | # TODO: Implement quiet mode (non-TUI) 249 | stderr_console.print("Quiet mode is not implemented yet.") 250 | sys.exit(1) 251 | 252 | if full_context: 253 | # TODO: Implement full context mode (non-TUI) 254 | stderr_console.print("Full context mode is not implemented yet.") 255 | sys.exit(1) 256 | 257 | # --- API Key Check --- 258 | # Check API key *after* loading config, as it might be set there 259 | # (although current load_config doesn't load it from file, only env) 260 | if not app_config.get("api_key"): 261 | stderr_console.print( 262 | "\n[bold red]Missing OpenAI API key.[/bold red]\n\n" 263 | "Set the environment variable [bold]OPENAI_API_KEY[/bold] or place it in a [bold].env[/bold] file.\n" 264 | "Create a key at: [link=https://platform.openai.com/account/api-keys]https://platform.openai.com/account/api-keys[/link]\n" 265 | ) 266 | sys.exit(1) 267 | 268 | # --- Start Textual TUI --- 269 | try: 270 | # Pass resolved_writable_roots directly to TuiApp if needed, or Agent 271 | tui_app = CodexTuiApp( 272 | config=app_config, 273 | initial_prompt=prompt, 274 | initial_images=list(image), 275 | ) 276 | tui_app.run() 277 | # Exit with TUI's return code if available, otherwise 0 278 | sys.exit(tui_app.return_code or 0) 279 | except Exception as e: 280 | # Catch potential errors during TUI setup or run 281 | stderr_console.print(f"[bold red]Error running Textual TUI:[/bold red] {e}") 282 | traceback.print_exc(file=sys.stderr) 283 | sys.exit(1) 284 | 285 | 286 | def main(): 287 | """Entry point for the package.""" 288 | # Custom help text including completion subcommand 289 | ctx = click.Context(codexy, info_name="codexy") 290 | help_text = ctx.get_help() 291 | # Check if help flag is present *before* parsing arguments 292 | if "--help" in sys.argv or "-h" in sys.argv: 293 | click.echo(help_text) 294 | click.echo("\nCommands:\n completion Generate shell completion script.") 295 | sys.exit(0) 296 | 297 | # Let click handle the actual command execution and argument parsing 298 | codexy() # No obj={} needed here, click handles context 299 | 300 | 301 | if __name__ == "__main__": 302 | main() 303 | -------------------------------------------------------------------------------- /codexy/tui/widgets/chat/input_area.py: -------------------------------------------------------------------------------- 1 | from typing import cast 2 | 3 | from rich.text import Text 4 | from textual import events 5 | from textual.app import ComposeResult 6 | from textual.binding import Binding 7 | from textual.containers import Container, Horizontal 8 | from textual.message import Message 9 | from textual.reactive import reactive 10 | from textual.timer import Timer 11 | from textual.widgets import Static, TextArea 12 | 13 | from ....utils.storage import HistoryEntry 14 | from .thinking_indicator import ThinkingIndicator 15 | 16 | 17 | class ChatInputArea(Container): 18 | """Component containing input field and thinking indicator.""" 19 | 20 | DEFAULT_CSS = """ 21 | ChatInputArea { 22 | height: auto; 23 | max-height: 50%; 24 | border-top: thick $accent; 25 | padding: 0; 26 | background: $panel; 27 | margin-bottom: 1; 28 | } 29 | ChatInputArea > TextArea { 30 | height: auto; 31 | min-height: 3; 32 | max-height: 20; 33 | border: none; 34 | margin: 0; 35 | background: $surface; 36 | scrollbar-gutter: stable; 37 | } 38 | ChatInputArea > TextArea:focus { 39 | border: none; 40 | } 41 | ChatInputArea > ThinkingIndicator { 42 | border: none; 43 | padding: 1; 44 | color: $text-muted; 45 | height: 3; /* Match help bar height */ 46 | background: $surface; 47 | } 48 | /* Style the container for help text and tokens */ 49 | ChatInputArea > #input-help-container { 50 | height: 1; 51 | background: $surface; 52 | padding: 0 1; /* Add padding */ 53 | width: 1fr; 54 | /* We don't need layout: horizontal here, Container default is vertical, */ 55 | /* but the items inside will determine layout */ 56 | } 57 | ChatInputArea > #input-help-container Horizontal { /* Style the inner Horizontal */ 58 | align: left middle; /* Vertically align items */ 59 | height: 1; 60 | } 61 | ChatInputArea > #input-help-container #input-help-text { /* Style the help text Static */ 62 | width: 1fr; /* Allow it to take up remaining space */ 63 | color: $text-muted; 64 | text-overflow: ellipsis; 65 | overflow: hidden; 66 | height: 1; 67 | } 68 | ChatInputArea > #input-help-container #input-help-tokens { /* Style the token count Static */ 69 | width: auto; /* Take only needed space */ 70 | color: $accent; /* Use accent color for visibility */ 71 | text-style: bold; 72 | margin-left: 1; /* Space between help and tokens */ 73 | height: 1; 74 | text-align: right; /* Align token count to the right */ 75 | } 76 | """ 77 | 78 | BINDINGS = [ 79 | Binding("up", "history_prev", "Prev History", show=False, priority=True), 80 | Binding("down", "history_next", "Next History", show=False, priority=True), 81 | ] 82 | 83 | # --- Reactives --- 84 | is_loading: reactive[bool] = reactive(False) 85 | thinking_seconds: reactive[int] = reactive(0) 86 | token_usage_percent: reactive[float] = reactive(100.0) 87 | 88 | # --- State --- 89 | _command_history: list[HistoryEntry] = [] 90 | _history_index: int | None = None 91 | _draft_input: str = "" 92 | _thinking_timer: Timer | None = None 93 | 94 | # --- Messages --- 95 | class Submit(Message): 96 | """Sent when user submits input.""" 97 | 98 | def __init__(self, value: str): 99 | self.value = value 100 | super().__init__() 101 | 102 | # --- UI Composition --- 103 | def compose(self) -> ComposeResult: 104 | yield TextArea(language=None, theme="css", soft_wrap=True, show_line_numbers=False, id="input-textarea") 105 | yield ThinkingIndicator(id="thinking") 106 | # -- Updated help section -- 107 | with Container(id="input-help-container"): 108 | with Horizontal(): 109 | yield Static( 110 | r"\[Ctrl+J] Submit | \[Up/Down] History | \[ESC] Cancel/Close", 111 | classes="input-help-text", # Use class for easier targeting 112 | id="input-help-text", 113 | ) 114 | yield Static("", id="input-help-tokens") # Placeholder for token count 115 | 116 | def on_mount(self) -> None: 117 | """Set initial state and focus when mounted.""" 118 | # Ensure TextArea exists before focusing 119 | try: 120 | self.query_one("#input-textarea", TextArea).focus() 121 | except Exception as e: 122 | self.log.warning(f"Could not focus input textarea on mount: {e}") 123 | 124 | def on_unmount(self) -> None: 125 | """Ensure timer is stopped when component unmounts.""" 126 | if self._thinking_timer: 127 | self._thinking_timer.stop() 128 | self._thinking_timer = None 129 | 130 | def watch_token_usage_percent(self, new_value: float) -> None: 131 | """Update the token usage display.""" 132 | try: 133 | token_widget = self.query_one("#input-help-tokens", Static) 134 | if token_widget.is_mounted: 135 | percent_str = f"{new_value:.0f}%" 136 | # Add color coding based on percentage 137 | style = "" 138 | if new_value < 10: 139 | style = "bold red" 140 | elif new_value < 25: 141 | style = "bold yellow" 142 | elif new_value < 50: 143 | style = "yellow" 144 | # Update with Rich Text for styling 145 | token_widget.update(Text(f"Ctx: {percent_str}", style=style)) 146 | except Exception as e: 147 | if self.is_mounted: 148 | self.log.error(f"Error updating token usage display: {e}") 149 | 150 | # --- Public API --- 151 | def set_loading(self, loading: bool): 152 | """Set loading state.""" 153 | self.is_loading = loading 154 | 155 | def set_history(self, history: list[HistoryEntry]): 156 | """Set command history.""" 157 | self._command_history = history 158 | self._history_index = None 159 | 160 | def get_input_value(self) -> str: 161 | """Get current input value.""" 162 | try: 163 | textarea = cast(TextArea, self.query_one("#input-textarea")) 164 | if textarea.is_mounted: 165 | return textarea.text 166 | except Exception: 167 | pass 168 | return "" 169 | 170 | def set_input_value(self, value: str): 171 | """Set input value and move cursor to end.""" 172 | try: 173 | textarea = cast(TextArea, self.query_one("#input-textarea")) 174 | if textarea.is_mounted: 175 | textarea.load_text(value) 176 | # Delay moving cursor to ensure text is loaded 177 | self.call_after_refresh(lambda: textarea.move_cursor(textarea.document.end)) 178 | except Exception: 179 | pass 180 | 181 | def focus_input(self): 182 | """Focus on input field.""" 183 | try: 184 | if self.is_mounted: 185 | textarea = self.query_one("#input-textarea", TextArea) 186 | if textarea.is_mounted: 187 | textarea.focus() 188 | except Exception: 189 | pass 190 | 191 | def clear_input(self): 192 | """Clear input field.""" 193 | self.set_input_value("") 194 | self._draft_input = "" 195 | self._history_index = None 196 | 197 | # --- Watchers --- 198 | def watch_is_loading(self, loading: bool) -> None: 199 | """Switch between input field and indicator based on loading state.""" 200 | try: 201 | thinking_indicator = cast(ThinkingIndicator, self.query_one("#thinking")) 202 | textarea = cast(TextArea, self.query_one("#input-textarea")) 203 | help_container = self.query_one("#input-help-container") 204 | 205 | thinking_indicator.display = loading 206 | textarea.display = not loading 207 | help_container.display = not loading 208 | 209 | if loading: 210 | self.thinking_seconds = 0 211 | thinking_indicator.set_thinking_seconds(0) 212 | if self._thinking_timer: 213 | self._thinking_timer.stop() 214 | self._thinking_timer = self.set_interval(1.0, self._update_thinking_timer) 215 | else: 216 | if self._thinking_timer: 217 | self._thinking_timer.stop() 218 | self._thinking_timer = None 219 | if textarea.is_mounted: 220 | self.call_after_refresh(textarea.focus) 221 | 222 | except Exception as e: 223 | if self.is_mounted: 224 | self.log.error(f"Error in watch_is_loading: {e}") 225 | 226 | def _update_thinking_timer(self): 227 | """Update thinking timer.""" 228 | if self.is_loading: 229 | self.thinking_seconds += 1 230 | try: 231 | thinking_indicator = cast(ThinkingIndicator, self.query_one("#thinking")) 232 | if thinking_indicator.is_mounted: 233 | thinking_indicator.set_thinking_seconds(self.thinking_seconds) 234 | except Exception: 235 | # If component is unmounted, stop timer 236 | if self._thinking_timer: 237 | self._thinking_timer.stop() 238 | self._thinking_timer = None 239 | else: 240 | if self._thinking_timer: 241 | self._thinking_timer.stop() 242 | self._thinking_timer = None 243 | 244 | # --- Actions and Event Handlers --- 245 | def action_submit(self): 246 | """Handle submit action (e.g. Ctrl+J).""" 247 | if not self.is_loading: 248 | try: 249 | textarea = cast(TextArea, self.query_one("#input-textarea")) 250 | value = textarea.text.strip() 251 | if value: 252 | self.log(f"Submitting value: {value!r}") 253 | self.post_message(self.Submit(value)) 254 | # Clear operation moved to App level 255 | else: 256 | self.log("Submit ignored, value is empty.") 257 | except Exception as e: 258 | self.log.error(f"Error during submit action: {e}") 259 | 260 | def on_key(self, event: events.Key) -> None: 261 | """Handle key events, especially Ctrl+J.""" 262 | if event.key == "ctrl+j": 263 | if not self.is_loading: 264 | self.log.info("Ctrl+J detected, stopping event and calling action_submit.") 265 | event.stop() 266 | self.action_submit() 267 | 268 | def action_history_prev(self): 269 | """Handle navigating up through the command history""" 270 | if self.is_loading: 271 | return 272 | 273 | try: 274 | textarea = cast(TextArea, self.query_one("#input-textarea")) 275 | # Only trigger history when cursor is on the first line 276 | if textarea.cursor_location[0] != 0: 277 | return # Cursor is not on the first line, let TextArea handle the up arrow 278 | 279 | if not self._command_history: 280 | return 281 | 282 | if self._history_index is None: 283 | self._draft_input = textarea.text 284 | new_index = len(self._command_history) - 1 285 | else: 286 | new_index = max(0, self._history_index - 1) 287 | 288 | if new_index != self._history_index: 289 | self._history_index = new_index 290 | self.set_input_value(self._command_history[self._history_index]["command"]) 291 | except Exception as e: 292 | self.log.error(f"Error in action_history_prev: {e}") 293 | 294 | def action_history_next(self): 295 | """Handle navigating down through the command history""" 296 | if self.is_loading: 297 | return 298 | 299 | try: 300 | textarea = cast(TextArea, self.query_one("#input-textarea")) 301 | # Only trigger history when cursor is on the last line 302 | if textarea.cursor_location[0] != textarea.document.line_count - 1: 303 | return # Cursor is not on the last line, let TextArea handle the down arrow 304 | 305 | if self._history_index is None: 306 | return 307 | 308 | new_index = self._history_index + 1 309 | 310 | if new_index >= len(self._command_history): 311 | self._history_index = None 312 | self.set_input_value(self._draft_input) 313 | else: 314 | self._history_index = new_index 315 | self.set_input_value(self._command_history[self._history_index]["command"]) 316 | except Exception as e: 317 | self.log.error(f"Error in action_history_next: {e}") 318 | -------------------------------------------------------------------------------- /codexy/tui/widgets/chat/command_review.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | from typing import TypedDict 4 | 5 | from rich.syntax import Syntax 6 | from rich.text import Text 7 | from textual import events 8 | from textual.app import ComposeResult 9 | from textual.containers import Vertical 10 | from textual.message import Message 11 | from textual.reactive import reactive 12 | from textual.widgets import Button, Input, Label, RadioButton, RadioSet, Static 13 | 14 | from ....approvals import ApprovalMode 15 | 16 | 17 | class CommandReviewResult(TypedDict): 18 | approved: bool 19 | always_approve: bool 20 | feedback: str | None 21 | 22 | 23 | class CommandReviewWidget(Static): 24 | """Display command to be approved and get user's decision.""" 25 | 26 | DEFAULT_CSS = """ 27 | CommandReviewWidget { 28 | height: auto; 29 | max-height: 70%; 30 | width: 80%; 31 | background: $panel; 32 | padding: 1; 33 | overflow-y: auto; 34 | border: round $accent; 35 | } 36 | CommandReviewWidget > Vertical { 37 | height: auto; 38 | } 39 | CommandReviewWidget #command-display { 40 | margin-bottom: 1; 41 | height: auto; 42 | max-height: 15; 43 | overflow-y: auto; 44 | border: round $accent-lighten-2; 45 | background: $surface; 46 | padding: 1; 47 | border-title-align: left; 48 | border-title-style: bold; 49 | border-title-color: $text; 50 | } 51 | CommandReviewWidget #explanation-container { 52 | height: auto; 53 | } 54 | CommandReviewWidget #explanation-display { 55 | margin-top: 1; 56 | margin-bottom: 1; 57 | padding: 1; 58 | border: round $primary-lighten-1; 59 | height: auto; 60 | max-height: 15; 61 | overflow-y: auto; 62 | background: $boost; 63 | } 64 | CommandReviewWidget #explanation-display .explanation-title { 65 | margin-bottom: 1; 66 | text-style: bold; 67 | } 68 | CommandReviewWidget #approval-options { 69 | height: auto; 70 | border: none; 71 | margin-top: 1; 72 | } 73 | CommandReviewWidget RadioSet { 74 | width: 1fr; 75 | height: auto; 76 | } 77 | CommandReviewWidget RadioButton { 78 | height: 1; 79 | margin-bottom: 1; 80 | } 81 | CommandReviewWidget #feedback-container { 82 | height: auto; 83 | } 84 | CommandReviewWidget #feedback-input-label { 85 | margin-bottom: 1; 86 | } 87 | CommandReviewWidget #feedback-input { 88 | margin-top: 0; 89 | height: 3; 90 | } 91 | CommandReviewWidget #feedback-input Input { 92 | width: 1fr; 93 | } 94 | CommandReviewWidget #return-button { 95 | margin-top: 1; 96 | } 97 | """ 98 | 99 | # --- State --- 100 | _tool_name: reactive[str] = reactive("") 101 | _command_display: reactive[str] = reactive("") 102 | _tool_id: reactive[str | None] = reactive(None) 103 | _mode: reactive[str] = reactive("select") 104 | _explanation: reactive[str | None] = reactive(None) 105 | _feedback: reactive[str] = reactive("") 106 | _approval_mode: ApprovalMode = ApprovalMode.SUGGEST 107 | 108 | # --- Messages --- 109 | class ReviewResult(Message): 110 | """Sent when the user makes an approval decision.""" 111 | 112 | def __init__(self, approved: bool, tool_id: str | None, always_approve: bool = False, feedback: str | None = None): 113 | self.approved = approved 114 | self.tool_id = tool_id 115 | self.always_approve = always_approve 116 | self.feedback = feedback 117 | super().__init__() 118 | 119 | # --- UI Composition & Updates --- 120 | def compose(self) -> ComposeResult: 121 | with Vertical(): 122 | # Use Static to display the command, and set the border title 123 | yield Static("", id="command-display") 124 | with Vertical(id="explanation-container", classes="-hidden"): 125 | yield Label("Explanation:", classes="explanation-title") 126 | yield Static(id="explanation-display") 127 | yield Button("Back to Options", id="return-button", variant="default") 128 | yield RadioSet(id="approval-options") 129 | with Vertical(id="feedback-container", classes="-hidden"): 130 | yield Label("Provide feedback (optional, press Enter to deny):", id="feedback-input-label") 131 | yield Input(placeholder="Reason for denial...", id="feedback-input") 132 | 133 | def set_tool_info( 134 | self, 135 | tool_name: str, 136 | command_display: str, 137 | tool_id: str, 138 | approval_mode: ApprovalMode = ApprovalMode.SUGGEST, 139 | ): 140 | """Set the tool information to be reviewed.""" 141 | self._tool_name = tool_name 142 | self._command_display = command_display 143 | self._tool_id = tool_id 144 | self._approval_mode = approval_mode 145 | self.update_command_display() 146 | self.set_mode("select") 147 | 148 | def set_explanation(self, explanation: str): 149 | """Set and display command explanation.""" 150 | self._explanation = explanation 151 | self.set_mode("explanation") 152 | 153 | def set_mode(self, mode: str): 154 | """Switch the review component mode.""" 155 | self._mode = mode 156 | is_select = mode == "select" 157 | is_input = mode == "input" 158 | is_explanation = mode == "explanation" 159 | 160 | self.query_one("#approval-options").display = is_select 161 | self.query_one("#feedback-container").display = is_input 162 | self.query_one("#explanation-container").display = is_explanation 163 | 164 | if is_select: 165 | self.build_radio_options() 166 | # Ensure RadioSet is visible后再聚焦 167 | self.call_later(lambda: self.query_one("#approval-options", RadioSet).focus()) 168 | elif is_input: 169 | input_widget = self.query_one("#feedback-input", Input) 170 | input_widget.value = "" 171 | self.call_later(input_widget.focus) 172 | elif is_explanation: 173 | self.update_explanation_display() 174 | self.call_later(lambda: self.query_one("#return-button", Button).focus()) 175 | 176 | def update_command_display(self): 177 | """Update command display area, with formatting.""" 178 | display_content: str | Syntax | Text 179 | try: 180 | parsed = json.loads(self._command_display) 181 | pretty_json = json.dumps(parsed, indent=2) 182 | # If successful, use Syntax to highlight JSON 183 | display_content = Syntax( 184 | pretty_json, 185 | "json", 186 | theme="github-dark", # Choose other themes 187 | line_numbers=False, 188 | word_wrap=True, 189 | ) 190 | except json.JSONDecodeError: 191 | # If not JSON or contains special markers (like patch), handle it specially 192 | if "*** Begin Patch" in self._command_display or "<<<<<<< SEARCH" in self._command_display: 193 | # Colorize patch/diff text 194 | lines = [] 195 | for line in self._command_display.splitlines(): 196 | if line.startswith("+") and not line.startswith("+++"): 197 | lines.append(Text(line, style="green")) 198 | elif line.startswith("-") and not line.startswith("---"): 199 | lines.append(Text(line, style="red")) 200 | elif line.startswith("@@"): 201 | lines.append(Text(line, style="cyan")) 202 | else: 203 | lines.append(Text(line)) 204 | display_content = Text("\n").join(lines) 205 | else: 206 | # For normal text, use Text and allow folding 207 | display_content = Text(self._command_display, overflow="fold") 208 | 209 | # Update Static component's content and border title 210 | command_static = self.query_one("#command-display", Static) 211 | # Set border title to tool name 212 | command_static.border_title = f"Tool: {self._tool_name}" if self._tool_name else "Command / Operation" 213 | command_static.update(display_content) 214 | 215 | def update_explanation_display(self): 216 | """Update explanation display area.""" 217 | explanation_text = self._explanation or "Loading explanation..." 218 | self.query_one("#explanation-display", Static).update(explanation_text) 219 | 220 | def build_radio_options(self): 221 | """Build approval options RadioButton.""" 222 | radioset = self.query_one("#approval-options", RadioSet) 223 | radioset.remove_children() 224 | 225 | # Use a unique timestamp or counter to ensure unique IDs 226 | unique_suffix = str(int(time.time() * 1000000) % 1000000) # Use microseconds for uniqueness 227 | 228 | options = [RadioButton("Yes (y)", id=f"yes_{unique_suffix}", value=True)] 229 | if self._approval_mode != ApprovalMode.SUGGEST and self._tool_name == "execute_command": 230 | options.append(RadioButton("Yes, always approve this command for this session (a)", id=f"always_{unique_suffix}")) 231 | options.extend( 232 | [ 233 | RadioButton("Edit / Provide Feedback (e)", id=f"edit_{unique_suffix}"), 234 | RadioButton("No, continue generation (n)", id=f"no_continue_{unique_suffix}"), 235 | RadioButton("No, stop generation (ESC)", id=f"no_stop_{unique_suffix}"), 236 | ] 237 | ) 238 | 239 | radioset.mount_all(options) 240 | 241 | # --- Event Handlers --- 242 | def on_radio_set_changed(self, event: RadioSet.Changed) -> None: 243 | """Handle the change in the RadioSet.""" 244 | event.stop() 245 | selected_button = event.pressed 246 | if not selected_button: 247 | return 248 | 249 | decision_value = selected_button.id 250 | if decision_value: 251 | self.handle_decision(decision_value) 252 | 253 | def on_input_submitted(self, event: Input.Submitted) -> None: 254 | """Handle the submission event of the feedback input box (i.e. denial).""" 255 | if self._mode == "input": 256 | event.stop() 257 | feedback = event.value.strip() or "Denied by user via feedback input." 258 | self.post_message(self.ReviewResult(approved=False, tool_id=self._tool_id, feedback=feedback)) 259 | 260 | def on_button_pressed(self, event: Button.Pressed) -> None: 261 | """Handle the "Return to Options" button.""" 262 | if event.button.id == "return-button": 263 | event.stop() 264 | self.set_mode("select") 265 | 266 | def on_key(self, event: events.Key) -> None: 267 | """Handle keyboard shortcuts.""" 268 | key = event.key 269 | should_stop = False 270 | if self._mode == "select": 271 | if key == "y": 272 | self.handle_decision("yes") 273 | should_stop = True 274 | elif key == "a" and self._approval_mode != ApprovalMode.SUGGEST and self._tool_name == "execute_command": 275 | self.handle_decision("always") 276 | should_stop = True 277 | elif key == "e": 278 | self.handle_decision("edit") 279 | should_stop = True 280 | elif key == "n": 281 | self.handle_decision("no_continue") 282 | should_stop = True 283 | elif key == "escape": 284 | self.handle_decision("no_stop") 285 | should_stop = True 286 | elif self._mode == "explanation": 287 | # Allow ESC or Enter to go back 288 | if key == "escape" or key == "enter": 289 | self.set_mode("select") 290 | should_stop = True 291 | elif self._mode == "input": 292 | if key == "escape": 293 | # Get value from input box, use default denial message if empty 294 | feedback_input = self.query_one("#feedback-input", Input) 295 | feedback = feedback_input.value.strip() or "Denied by user via ESC." 296 | self.post_message(self.ReviewResult(approved=False, tool_id=self._tool_id, feedback=feedback)) 297 | should_stop = True 298 | 299 | if should_stop: 300 | event.stop() 301 | 302 | def handle_decision(self, decision_id: str): 303 | """Handle user's decision (based on button ID).""" 304 | # Extract the base decision type by removing the unique suffix 305 | if "_" in decision_id: 306 | base_decision = decision_id.rsplit("_", 1)[0] 307 | else: 308 | base_decision = decision_id 309 | 310 | if base_decision == "yes": 311 | self.post_message(self.ReviewResult(approved=True, tool_id=self._tool_id)) 312 | elif base_decision == "always": 313 | self.post_message(self.ReviewResult(approved=True, tool_id=self._tool_id, always_approve=True)) 314 | elif base_decision == "edit": 315 | self.set_mode("input") 316 | elif base_decision == "no_continue": 317 | self.post_message(self.ReviewResult(approved=False, tool_id=self._tool_id, feedback="Denied by user (continue).")) 318 | elif base_decision == "no_stop": 319 | self.post_message(self.ReviewResult(approved=False, tool_id=self._tool_id, feedback="Denied by user (stop).")) 320 | -------------------------------------------------------------------------------- /codexy/tui/widgets/chat/message_display.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | 4 | import pyperclip 5 | from rich.syntax import Syntax 6 | from rich.text import Text 7 | from textual.app import ComposeResult 8 | from textual.containers import Horizontal, Vertical 9 | from textual.timer import Timer 10 | from textual.widgets import Button, Label, Markdown, Static 11 | 12 | 13 | class BaseMessageDisplay(Static): 14 | """Base class for message display.""" 15 | 16 | DEFAULT_CSS = """ 17 | BaseMessageDisplay { 18 | width: auto; 19 | max-width: 85%; 20 | min-width: 30%; 21 | height: auto; 22 | } 23 | """ 24 | 25 | 26 | class UserMessageDisplay(BaseMessageDisplay): 27 | """Display user messages, with text contained in a bordered container.""" 28 | 29 | DEFAULT_CSS = """ 30 | UserMessageDisplay { 31 | border: round green; 32 | padding: 0 1; 33 | width: auto; 34 | max-width: 85%; 35 | height: auto; 36 | background: $boost; 37 | } 38 | UserMessageDisplay > Label { 39 | height: auto; 40 | } 41 | """ 42 | 43 | def __init__(self, text: str, **kwargs): 44 | self._text_content = text # Store the raw text 45 | # Initialize Static without renderable, we use compose now 46 | super().__init__(**kwargs) 47 | 48 | def compose(self) -> ComposeResult: 49 | """Yields the actual Label widget that holds the text.""" 50 | # Format the text for display within the inner Label 51 | formatted_text = Text.assemble(("User:", "bold green"), "\n", self._text_content) 52 | yield Label(formatted_text) 53 | 54 | # Optional: Method to update text if needed later 55 | def update_text(self, new_text: str) -> None: 56 | self._text_content = new_text 57 | formatted_text = Text.assemble(("User:", "bold green"), "\n", self._text_content) 58 | try: 59 | label = self.query_one(Label) 60 | label.update(formatted_text) 61 | except Exception as e: 62 | self.log.error(f"Error updating UserMessageDisplay: {e}") 63 | 64 | 65 | class AssistantMessageDisplay(BaseMessageDisplay): 66 | """Display assistant messages, with Markdown support and a copy button.""" 67 | 68 | DEFAULT_CSS = """ 69 | AssistantMessageDisplay { 70 | padding: 0 1; 71 | border-left: thick $accent; 72 | width: auto; 73 | max-width: 85%; 74 | height: auto; 75 | } 76 | AssistantMessageDisplay > Vertical { /* Use Vertical to stack Markdown and Button */ 77 | height: auto; 78 | } 79 | AssistantMessageDisplay > Vertical > Markdown { 80 | margin: 0; 81 | padding: 0; 82 | height: auto; 83 | } 84 | AssistantMessageDisplay > Vertical > Button.copy-button { 85 | display: none; /* Start hidden */ 86 | width: auto; 87 | height: 1; 88 | min-width: 8; /* "Copy" + padding */ 89 | margin-top: 1; 90 | padding: 0 1; 91 | border: none; /* Minimalist button style */ 92 | background: $primary-background; 93 | color: $text; 94 | } 95 | AssistantMessageDisplay > Vertical > Button.copy-button:hover { 96 | background: $primary; 97 | } 98 | AssistantMessageDisplay > Vertical > Button.copy-button.copied { 99 | background: $success; 100 | } 101 | """ 102 | 103 | THROTTLE_INTERVAL: float = 0.1 104 | _last_update_time: float = 0.0 105 | 106 | def __init__(self, initial_text: str = "", **kwargs): 107 | super().__init__(**kwargs) # Initialize parent Static class 108 | self._full_text: str = initial_text 109 | self.styles.height = "auto" 110 | self._copy_button_text_timer: Timer | None = None 111 | 112 | def compose(self) -> ComposeResult: 113 | with Vertical(): 114 | yield Markdown(self._full_text, id="assistant-markdown-content") 115 | yield Button("Copy", id="copy-text-button", classes="copy-button") 116 | 117 | def _update_markdown_widget(self): 118 | """Actual method to update the Markdown component content.""" 119 | try: 120 | md_widget = self.query_one("#assistant-markdown-content", Markdown) 121 | # Use the latest _full_text to update 122 | md_widget.update(self._full_text) 123 | self.styles.height = "auto" # Ensure parent container height adapts 124 | except Exception as e: 125 | # If an error occurs during lookup or update, record it 126 | # Check if self is mounted to prevent logging after unmount 127 | if self.is_mounted: 128 | self.log.error(f"Error updating Markdown widget: {e}") 129 | 130 | def append_text(self, delta: str): 131 | """ 132 | Append text to internal state and update Markdown component 133 | based on throttling strategy. 134 | """ 135 | # 1. Always update internal text buffer 136 | self._full_text += delta 137 | 138 | # 2. Check if UI should be updated (throttling) 139 | now = time.monotonic() 140 | if now - self._last_update_time >= self.THROTTLE_INTERVAL: 141 | # Use call_later to schedule UI update back to main thread 142 | self.app.call_later(self._update_markdown_widget) 143 | self._last_update_time = now 144 | 145 | def update_text(self, new_text: str): 146 | """ 147 | Replace the content of the Markdown component completely 148 | and update the internal state. 149 | This method may be called on the main thread, directly updating. 150 | """ 151 | # 1. Update internal state 152 | self._full_text = new_text 153 | # 2. Directly call the update method (since it's on the main thread) 154 | self._update_markdown_widget() 155 | 156 | def finalize_text(self): 157 | """Force final UI update and show the copy button.""" 158 | self.app.call_later(self._show_final_content_and_button) 159 | 160 | def _show_final_content_and_button(self): 161 | """Helper to update UI on the main thread.""" 162 | self._update_markdown_widget() 163 | try: 164 | copy_button = self.query_one("#copy-text-button", Button) 165 | copy_button.display = True # Show the button 166 | copy_button.label = "Copy" # Reset label 167 | except Exception as e: 168 | if self.is_mounted: 169 | self.log.error(f"Error showing copy button: {e}") 170 | 171 | async def on_button_pressed(self, event: Button.Pressed) -> None: 172 | if event.button.id == "copy-text-button": 173 | event.stop() 174 | if self._full_text: 175 | pyperclip.copy(self._full_text) 176 | event.button.label = "Copied!" 177 | event.button.add_class("copied") 178 | 179 | if self._copy_button_text_timer: 180 | self._copy_button_text_timer.stop() 181 | self._copy_button_text_timer = self.set_timer(2.0, lambda: self._revert_copy_button_text(event.button)) 182 | else: 183 | event.button.label = "Nothing to copy" 184 | self.set_timer(2.0, lambda: self._revert_copy_button_text(event.button)) 185 | 186 | def _revert_copy_button_text(self, button: Button): 187 | if button.is_mounted: 188 | button.label = "Copy" 189 | button.remove_class("copied") 190 | self._copy_button_text_timer = None 191 | 192 | 193 | # --- ToolCallDisplay, ToolOutputDisplay, SystemMessageDisplay --- 194 | 195 | 196 | class ToolCallDisplay(BaseMessageDisplay): 197 | """Display tool call information.""" 198 | 199 | DEFAULT_CSS = """ 200 | ToolCallDisplay { 201 | padding: 1; 202 | border: round yellow; 203 | height: auto; 204 | width: auto; 205 | max-width: 85%; 206 | background: $panel-lighten-1; 207 | } 208 | ToolCallDisplay Horizontal { height: 1; } 209 | ToolCallDisplay .tool-name { text-style: bold; color: yellow; width: auto; padding: 0; margin: 0; } 210 | ToolCallDisplay .tool-id { color: $text-muted; text-align: right; width: 1fr; padding: 0; margin: 0; } 211 | ToolCallDisplay #args-display { margin-top: 1; padding: 1; background: $surface; border: solid $accent; height: auto; max-height: 15; overflow-y: auto; width: 1fr; } 212 | ToolCallDisplay #args-display Syntax { width: 100%; height: auto; } 213 | ToolCallDisplay #args-display .placeholder { color: $text-muted; text-style: italic; } 214 | """ 215 | 216 | def __init__(self, function_name: str, tool_id: str, **kwargs): 217 | super().__init__(**kwargs) 218 | self.function_name = function_name 219 | self.tool_id = tool_id 220 | self._arguments = "" 221 | self._finalized = False 222 | self.styles.height = "auto" 223 | 224 | def compose(self) -> ComposeResult: 225 | # Use Horizontal container, it will automatically handle horizontal layout 226 | with Horizontal(): 227 | yield Static(f"Tool Call: {self.function_name}", classes="tool-name") 228 | yield Static(f"ID: {self.tool_id}", classes="tool-id") 229 | 230 | yield Static( 231 | Text("(Receiving arguments...)"), 232 | id="args-display", 233 | expand=False, 234 | classes="placeholder", 235 | ) 236 | 237 | def on_mount(self) -> None: 238 | if self._finalized: 239 | # It is necessary to manually update once here, because some model tool call parameters return too quickly 240 | # and complete within a few deltas, causing ToolCallDisplay to not be mounted yet, 241 | # which leads to an inability to correctly update args-display during finalize_arguments 242 | self._update_args_display(True) 243 | 244 | def append_arguments(self, delta: str): 245 | if not self._finalized: 246 | self._arguments += delta 247 | self._update_args_display() 248 | 249 | def finalize_arguments(self): 250 | self._finalized = True 251 | self._update_args_display(final=True) 252 | 253 | def _update_args_display(self, final: bool = False): 254 | try: 255 | args_static = self.query_one("#args-display", Static) 256 | args_static.styles.height = "auto" # Ensure height recalculates 257 | display_content: Syntax | Text 258 | 259 | try: 260 | parsed_args = json.loads(self._arguments) 261 | pretty_json = json.dumps(parsed_args, indent=2) 262 | display_content = Syntax( 263 | pretty_json, 264 | "json", 265 | theme="github-dark", # 或者其他主题 266 | line_numbers=False, 267 | word_wrap=True, 268 | # background_color="transparent", # <<< 移除此行 269 | ) 270 | args_static.remove_class("placeholder") # <<< 操作 Static 的类 271 | except json.JSONDecodeError: 272 | if final and self._arguments.strip(): # Only show error if final and not empty 273 | display_content = Text(f"Invalid JSON:\n{self._arguments}", style="red", overflow="fold") 274 | args_static.remove_class("placeholder") 275 | elif not self._arguments.strip() and not final: # Still receiving 276 | display_content = Text("(Receiving arguments...)") 277 | args_static.add_class("placeholder") 278 | else: # Empty or non-final invalid string 279 | display_content = Text(self._arguments, overflow="fold") 280 | if not self._arguments.strip(): 281 | args_static.add_class("placeholder") 282 | else: 283 | args_static.remove_class("placeholder") 284 | 285 | args_static.update(display_content) 286 | self.styles.height = "auto" # Trigger parent resize 287 | 288 | except Exception as e: 289 | if self.is_mounted: 290 | try: 291 | args_static = self.query_one("#args-display", Static) 292 | if args_static.is_mounted: 293 | self.log.error(f"Error updating args display for {self.tool_id}: {e}") 294 | except Exception: # Guard against query failing too 295 | self.log.error(f"Error updating args display (query failed) for {self.tool_id}: {e}") 296 | 297 | 298 | class ToolOutputDisplay(BaseMessageDisplay): 299 | """Display the output of a tool execution.""" 300 | 301 | DEFAULT_CSS = """ 302 | ToolOutputDisplay { 303 | padding: 1; 304 | border: round $surface; 305 | height: auto; 306 | width: auto; 307 | max-width: 85%; 308 | background: $surface; 309 | } 310 | ToolOutputDisplay .tool-output-header { color: $text-muted; text-style: italic; margin-bottom: 1; height: 1; } 311 | ToolOutputDisplay .tool-output-content { margin-top: 1; max-height: 20; overflow-y: auto; width: 1fr; height: auto; } 312 | ToolOutputDisplay .tool-output-error { color: $error; border-left: thick $error; padding-left: 1; } 313 | """ 314 | 315 | def __init__(self, tool_id: str, output: str, is_error: bool = False, **kwargs): 316 | super().__init__(**kwargs) 317 | self.tool_id = tool_id 318 | self.output = output 319 | self.is_error = is_error 320 | self.styles.height = "auto" 321 | 322 | def compose(self) -> ComposeResult: 323 | yield Static(f"Output for tool call: {self.tool_id}", classes="tool-output-header") 324 | # Explicitly disable markup parsing for tool output content 325 | output_content = Static(self.output, classes="tool-output-content", expand=False, markup=False) 326 | if self.is_error: 327 | output_content.add_class("tool-output-error") 328 | yield output_content 329 | 330 | 331 | class SystemMessageDisplay(BaseMessageDisplay): 332 | """Display system messages (e.g. errors, notifications).""" 333 | 334 | DEFAULT_CSS = """ 335 | SystemMessageDisplay { 336 | color: $text-muted; 337 | text-style: italic; 338 | border: wide white; /* Ensure border color is explicitly not muted */ 339 | padding: 0 1; 340 | height: auto; 341 | margin-bottom: 1; 342 | } 343 | """ 344 | 345 | def __init__(self, text: str, style: str = "dim", **kwargs): 346 | # Use Text object for potential styling 347 | super().__init__(Text(text, style=style), **kwargs) 348 | self.styles.height = "auto" 349 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | Codexy Logo 3 |

4 | 5 |

Codexy

6 |

Lightweight coding agent that runs in your terminal (OpenAI Codex CLI Python version)

7 | 8 |

9 | 中文文档 | English 10 |

11 | 12 |

pip install -U codexy

13 | 14 | ![Codexy demo GIF](./assets/codexy-demo.gif) 15 | 16 | ![Codexy demo 2 GIF](./assets/codexy-demo-2.gif) 17 | 18 | --- 19 | 20 |
21 | Table of Contents 22 | 23 | - [Original TypeScript Version](#original-typescript-version) 24 | - [Experimental Technology Disclaimer](#experimental-technology-disclaimer) 25 | - [Quickstart](#quickstart) 26 | - [Why Codexy?](#why-codexy) 27 | - [Security Model & Permissions](#securitymodelpermissions) 28 | - [System Requirements](#systemrequirements) 29 | - [CLI Reference](#clireference) 30 | - [Configuration](#configuration) 31 | - [Project Docs](#projectdocs) 32 | - [Contributing](#contributing) 33 | - [License](#license) 34 | - [Zero Data Retention (ZDR) Organization Limitation](#zero-data-retention-zdr-organization-limitation) 35 | 36 |
37 | 38 | --- 39 | 40 | ## Original TypeScript Version 41 | 42 | This project is a Python reimplementation of the original OpenAI Codex CLI, written in TypeScript. You can find the original repository here: 43 | 44 | [openai/codex (TypeScript)](https://github.com/openai/codex) 45 | 46 | This Python version aims to provide similar functionality using Python tools and libraries. 47 | 48 | ## Experimental Technology Disclaimer 49 | 50 | Codexy (the Python implementation of Codex CLI) is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome: 51 | 52 | - Bug reports 53 | - Feature requests 54 | - Pull requests 55 | - Good vibes 56 | 57 | Help us improve by filing issues or submitting PRs (see the Contributing section)! 58 | 59 | ## Quickstart 60 | 61 | Install globally using pip: 62 | 63 | ```shell 64 | pip install -U codexy 65 | ``` 66 | 67 | Next, set your OpenAI API key as an environment variable: 68 | 69 | ```shell 70 | export OPENAI_API_KEY="your-api-key-here" 71 | # Optional: Set a custom base URL for the OpenAI API (e.g., for a proxy or self-hosted service) 72 | # export OPENAI_BASE_URL="your-custom-base-url" 73 | # Optional: Set the API request timeout in milliseconds 74 | # export OPENAI_TIMEOUT_MS="300000" # e.g., 300000 for 5 minutes 75 | ``` 76 | 77 | > **Note:** This command sets the key only for your current terminal session. To make it permanent, add the `export` line to your shell's configuration file (e.g., `~/.zshrc`, `~/.bashrc`). 78 | > 79 | > **Tip:** You can also place your API key and other environment variables into a `.env` file at the root of your project: 80 | > 81 | > ```env 82 | > OPENAI_API_KEY=your-api-key-here 83 | > # Optional: 84 | > # OPENAI_BASE_URL=your-custom-base-url 85 | > # OPENAI_TIMEOUT_MS=300000 86 | > ``` 87 | > 88 | > The CLI will automatically load variables from `.env` using `python-dotenv`. 89 | 90 | Run interactively: 91 | 92 | ```shell 93 | codexy 94 | ``` 95 | 96 | Or, run with a prompt as input (and optionally in `Full Auto` mode): 97 | 98 | ```shell 99 | codexy "explain this codebase to me" 100 | ``` 101 | 102 | ```shell 103 | # Be cautious with auto-approval modes 104 | codexy --approval-mode full-auto "create the fanciest todo-list app" 105 | ``` 106 | 107 | That's it – Codexy will interact with the OpenAI API, suggest file changes or commands, and (depending on your approval mode) execute them. 108 | 109 | --- 110 | 111 | ## Why Codexy? 112 | 113 | Codexy aims to bring the power of the original Codex CLI to the Python ecosystem. It's built for developers who prefer Python tooling or want to integrate agentic coding capabilities into Python workflows. 114 | 115 | - **Familiar Python Stack:** Uses common Python libraries like `click`, `textual`, `openai`, `httpx`. 116 | - **Terminal-Native:** Designed for developers who live in the terminal. 117 | - **Agentic Capabilities:** Understands prompts, interacts with code, suggests file edits, and can execute commands. 118 | - **Configurable Approvals:** Control the level of autonomy the agent has. 119 | - **Open Source:** Contribute to its development and see how it works. 120 | 121 | --- 122 | 123 | ## Security Model & Permissions 124 | 125 | Codexy lets you decide the level of autonomy the agent has via the `--approval-mode` flag (or configuration file). The modes determine what actions require your explicit confirmation: 126 | 127 | | Mode | What the agent may do without asking | Still requires approval | Notes | 128 | | ------------------------- | ------------------------------------------------------- | ------------------------------------------- | ------------------------------------------------------------------------ | 129 | | **Suggest**
(default) | • Read files
• Run known safe read-only commands¹ | • **All** file edits/writes
• Shell commands | Safest mode, requires confirmation for most actions. | 130 | | **Auto Edit** | • Read files
• Apply file edits/writes
• Safe reads¹ | • Shell commands | Automatically modifies files, but asks before running commands. | 131 | | **Full Auto** | • Read/write files
• Run shell commands²
• Safe reads¹ | – | Attempts auto-approval, **BUT sandboxing is NOT YET IMPLEMENTED**. | 132 | | **Dangerous Auto** | • Read/write files
• Run shell commands | – | **UNSAFE**. Auto-approves everything without sandboxing. Use with caution. | 133 | 134 | ¹ *Known safe read-only commands include `ls`, `cat`, `pwd`, `git status`, etc. User-configurable via `safe_commands` in config.* 135 | ² *While `full-auto` aims for sandboxed execution, **sandboxing is NOT YET IMPLEMENTED** in this Python version. Commands will run directly.* 136 | 137 | **⚠️ Important:** The Python version (`codexy`) currently **lacks the platform-specific sandboxing** (like macOS Seatbelt or Docker/iptables) found in the original TypeScript version. In `full-auto` mode, commands are executed directly on your system without network or filesystem restrictions imposed by the tool itself. The `dangerous-auto` mode explicitly runs everything unsandboxed. Use auto-approval modes with extreme caution, especially outside of trusted environments or version-controlled repositories. 138 | 139 | Codexy will show a warning/confirmation if you start in `auto-edit`, `full-auto`, or `dangerous-auto` while the directory is _not_ tracked by Git, so you have a safety net via version control. 140 | 141 | --- 142 | 143 | ## System Requirements 144 | 145 | | Requirement | Details | 146 | | ------------- | ------------------------------------------ | 147 | | Operating Sys | Linux, macOS, Windows (cross-platform) | 148 | | Python | **3.10 or newer** (see `pyproject.toml`) | 149 | | Pip | For installation | 150 | | Git (optional)| Recommended for safety | 151 | | Dependencies | `click`, `textual`, `openai`, etc. | 152 | 153 | --- 154 | 155 | ## CLI Reference 156 | 157 | ``` 158 | Usage: codexy [OPTIONS] [PROMPT] 159 | 160 | Interactive REPL for Codex agent. 161 | 162 | codexy Interactive REPL 163 | codexy "..." Initial prompt for interactive REPL 164 | 165 | Options: 166 | --model, -m TEXT Model to use (e.g., o4-mini). 167 | --image, -i PATH Path(s) to image files. (Not fully implemented) 168 | --view, -v PATH Inspect a saved rollout. (Not implemented) 169 | --quiet, -q Non-interactive mode. (Not implemented) 170 | --config, -c Open instructions file in editor. 171 | --writable-root, -w PATH Writable folder (for future sandboxing). 172 | --approval-mode, -a [suggest|auto-edit|full-auto|dangerous-auto] 173 | Override approval policy. 174 | --auto-edit Alias for --approval-mode=auto-edit. 175 | --full-auto Alias for --approval-mode=full-auto. 176 | --no-project-doc Do not include codex.md. 177 | --project-doc PATH Include additional markdown file. 178 | --full-stdout Do not truncate command output. 179 | --notify Enable desktop notifications. (Not implemented) 180 | --flex-mode Enable "flex-mode" tier. (Not implemented) 181 | --dangerously-auto-approve-everything 182 | Alias for --approval-mode=dangerous-auto. 183 | --full-context, -f Full-context mode. (Not implemented) 184 | --version Show the version and exit. 185 | -h, --help Show this message and exit. 186 | 187 | Commands: 188 | completion Generate shell completion script. 189 | ``` 190 | 191 | **In-App Commands (within the TUI):** 192 | 193 | | Command | Description | 194 | | --------------- | ----------------------------------------------- | 195 | | `/help` | Show commands and shortcuts | 196 | | `/model` | Switch LLM model (if before first response) | 197 | | `/approval` | Switch auto-approval mode | 198 | | `/history` | Show command history overlay | 199 | | `/clear` | Clear screen and current conversation context | 200 | | `/clearhistory` | Clear command history file | 201 | | `/bug` | Open browser to file a bug report (Not Implemented) | 202 | | `/compact` | Condense context summary (Not Implemented) | 203 | | `q` / `exit` | Quit the application | 204 | 205 | --- 206 | 207 | ## Configuration 208 | 209 | Codexy looks for configuration files in `~/.codexy/` (note the `codexy` directory name). 210 | 211 | - **`~/.codexy/config.yaml`** (or `.yml`, `.json`): Main configuration. 212 | - **`~/.codexy/instructions.md`**: Global custom instructions for the agent. 213 | - **`~/.codexy/history.json`**: Stores command history. 214 | 215 | **Example `config.yaml`:** 216 | 217 | ```yaml 218 | # ~/.codexy/config.yaml 219 | model: o4-mini # Default model to use 220 | approval_mode: suggest # suggest | auto-edit | full-auto | dangerous-auto 221 | full_auto_error_mode: ask-user # ask-user | ignore-and-continue 222 | notify: false # Enable desktop notifications (Not fully implemented) 223 | history: 224 | max_size: 1000 225 | save_history: true 226 | safe_commands: # Commands safe to auto-approve in 'suggest' mode 227 | - git status 228 | - ls -la 229 | ``` 230 | 231 | **Example `instructions.md`:** 232 | 233 | ```markdown 234 | - Always use snake_case for Python variables. 235 | - Add type hints to all function definitions. 236 | - Prefer f-strings for formatting. 237 | ``` 238 | 239 | ### Memory Compression 240 | 241 | To help manage the conversation history and prevent exceeding the model's context length limit, Codexy includes a memory compression feature. When enabled, it automatically compresses older parts of the conversation history. 242 | 243 | **How it Works:** 244 | 245 | * The initial system prompt (from `instructions.md` or project docs) is always kept, if present. 246 | * A configurable number of the most recent messages in the conversation are kept uncompressed. 247 | * Messages between the initial system prompt and the recent messages are replaced with a single system notification indicating that a portion of the history has been summarized (e.g., `[System: X previous message(s) were summarized due to context length constraints.]`). 248 | 249 | **Configuration:** 250 | 251 | These settings are configured within the `memory` object in your `~/.codexy/config.json` or `~/.codexy/config.yaml` file. 252 | 253 | * `enable_compression` (boolean): 254 | * Set to `true` to enable the memory compression feature. 255 | * Default: `false`. 256 | * `compression_threshold_factor` (float): 257 | * A value between 0.0 and 1.0. Compression is triggered when the estimated token count of the current conversation history exceeds this factor multiplied by the model's maximum context window size. 258 | * For example, if the model's max tokens is 4096 and this factor is `0.8`, compression will be attempted when the history exceeds approximately 3277 tokens. 259 | * Default: `0.8`. 260 | * `keep_recent_messages` (integer): 261 | * The number of most recent messages to always keep uncompressed at the end of the conversation. 262 | * Default: `5`. 263 | 264 | **Example `config.json`:** 265 | 266 | ```json 267 | { 268 | "model": "o4-mini", 269 | "memory": { 270 | "enable_compression": true, 271 | "compression_threshold_factor": 0.75, 272 | "keep_recent_messages": 10 273 | } 274 | // ... other settings 275 | } 276 | ``` 277 | 278 | **Example `config.yaml`:** 279 | 280 | ```yaml 281 | model: o4-mini 282 | memory: 283 | enable_compression: true 284 | compression_threshold_factor: 0.75 285 | keep_recent_messages: 10 286 | # ... other settings 287 | ``` 288 | 289 | --- 290 | 291 | ## Project Docs 292 | 293 | Similar to the original Codex CLI, Codexy can load project-specific context from a `codex.md` (or `.codex.md`, `CODEX.md`) file. 294 | 295 | It searches the current directory first, then walks up to the Git root (`.git` directory). If found, its content is appended to your global `instructions.md`. 296 | 297 | Disable this behavior with `--no-project-doc` or by setting the environment variable `CODEXY_DISABLE_PROJECT_DOC=1`. 298 | 299 | --- 300 | 301 | ## Contributing 302 | 303 | Contributions are welcome! Please refer to the main project [CONTRIBUTING guidelines](https://github.com/openai/codex/blob/main/README.md#contributing). 304 | 305 | For Python-specific development: 306 | 307 | - This project uses [PDM](https://pdm-project.org/) for dependency management. 308 | - Install dependencies: `pdm install -G:dev` 309 | - Run tests: `pdm run pytest` 310 | - Format code: `pdm run ruff format .` 311 | - Lint code: `pdm run ruff check .` 312 | 313 | --- 314 | 315 | ## License 316 | 317 | This project is licensed under the Apache-2.0 License. See the [LICENSE](./LICENSE) file. 318 | 319 | --- 320 | 321 | ## Zero Data Retention (ZDR) Organization Limitation 322 | 323 | > **Note:** Codexy (Python) currently inherits the same limitation as the original Codex CLI and does **not** support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled due to its reliance on API features incompatible with ZDR. You may encounter 400 errors if your organization uses ZDR. 324 | --------------------------------------------------------------------------------