├── tests ├── core │ └── __init__.py ├── utils │ ├── __init__.py │ └── test_model_info.py ├── test_config.py └── tools │ └── test_apply_diff_tool.py ├── codexy ├── tui │ ├── widgets │ │ ├── __init__.py │ │ ├── overlays │ │ │ ├── __init__.py │ │ │ ├── history_overlay.py │ │ │ ├── help_overlay.py │ │ │ ├── model_overlay.py │ │ │ └── approval_overlay.py │ │ └── chat │ │ │ ├── __init__.py │ │ │ ├── thinking_indicator.py │ │ │ ├── history_view.py │ │ │ ├── header.py │ │ │ ├── input_area.py │ │ │ ├── command_review.py │ │ │ └── message_display.py │ └── __init__.py ├── __init__.py ├── exceptions.py ├── __main__.py ├── utils │ ├── __init__.py │ ├── model_utils.py │ ├── token_utils.py │ ├── storage.py │ ├── update_checker.py │ ├── model_info.py │ ├── security_check.py │ └── filesystem.py ├── tools │ ├── __init__.py │ ├── execute_command_tool.py │ └── apply_diff_tool.py └── cli │ ├── completion_scripts.py │ └── main.py ├── assets ├── logo.png ├── codexy-demo.gif └── codexy-demo-2.gif ├── .pre-commit-config.yaml ├── docs ├── pre-commit_ZH.md └── pre-commit.md ├── pyproject.toml ├── .gitignore ├── README_ZH.md ├── LICENSE └── README.md /tests/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /codexy/tui/widgets/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /codexy/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.0.10" 2 | PACKAGE_NAME = "codexy" 3 | -------------------------------------------------------------------------------- /assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndersonBY/codexy/HEAD/assets/logo.png -------------------------------------------------------------------------------- /codexy/tui/__init__.py: -------------------------------------------------------------------------------- 1 | from .app import CodexTuiApp 2 | 3 | __all__ = ["CodexTuiApp"] 4 | -------------------------------------------------------------------------------- /assets/codexy-demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndersonBY/codexy/HEAD/assets/codexy-demo.gif -------------------------------------------------------------------------------- /assets/codexy-demo-2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndersonBY/codexy/HEAD/assets/codexy-demo-2.gif -------------------------------------------------------------------------------- /codexy/tui/widgets/overlays/__init__.py: -------------------------------------------------------------------------------- 1 | from .approval_overlay import ApprovalModeOverlay 2 | from .help_overlay import HelpOverlay 3 | from .history_overlay import HistoryOverlay 4 | from .model_overlay import ModelOverlay 5 | 6 | __all__ = [ 7 | "HistoryOverlay", 8 | "HelpOverlay", 9 | "ModelOverlay", 10 | "ApprovalModeOverlay", 11 | ] 12 | -------------------------------------------------------------------------------- /codexy/exceptions.py: -------------------------------------------------------------------------------- 1 | """Custom exceptions for the codexy project.""" 2 | 3 | 4 | class codexyError(Exception): 5 | """Base exception for codexy errors.""" 6 | 7 | pass 8 | 9 | 10 | class ToolError(codexyError): 11 | """Exception related to tool execution.""" 12 | 13 | pass 14 | 15 | 16 | class ConfigError(codexyError): 17 | """Exception related to configuration issues.""" 18 | 19 | pass 20 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/astral-sh/ruff-pre-commit 3 | # Ruff version. 4 | rev: v0.8.4 5 | hooks: 6 | # Run the linter. 7 | - id: ruff 8 | args: [--fix] 9 | # Run the formatter. 10 | - id: ruff-format 11 | 12 | - repo: https://github.com/pre-commit/pre-commit-hooks 13 | rev: v5.0.0 14 | hooks: 15 | - id: trailing-whitespace 16 | - id: end-of-file-fixer 17 | - id: check-yaml 18 | - id: check-added-large-files 19 | -------------------------------------------------------------------------------- /codexy/__main__.py: -------------------------------------------------------------------------------- 1 | """Allows running the CLI via 'python -m codexy'.""" 2 | 3 | # Ensure that the main CLI function is called when running as a module. 4 | # Import from the 'cli' module located at the root level relative to the package directory. 5 | # This assumes the parent directory containing 'cli.py' is accessible when running the module. 6 | from .cli.main import codexy 7 | 8 | if __name__ == "__main__": 9 | # Pass an empty object for context, similar to the original entry point check 10 | codexy(obj={}) 11 | -------------------------------------------------------------------------------- /codexy/tui/widgets/chat/__init__.py: -------------------------------------------------------------------------------- 1 | from .command_review import CommandReviewWidget 2 | from .header import ChatHeader 3 | from .history_view import ChatHistoryView 4 | from .input_area import ChatInputArea 5 | from .message_display import ( 6 | AssistantMessageDisplay, 7 | SystemMessageDisplay, 8 | ToolCallDisplay, 9 | ToolOutputDisplay, 10 | UserMessageDisplay, 11 | ) 12 | from .thinking_indicator import ThinkingIndicator 13 | 14 | __all__ = [ 15 | "ChatHeader", 16 | "ChatHistoryView", 17 | "ChatInputArea", 18 | "UserMessageDisplay", 19 | "AssistantMessageDisplay", 20 | "ToolCallDisplay", 21 | "ToolOutputDisplay", 22 | "SystemMessageDisplay", 23 | "CommandReviewWidget", 24 | "ThinkingIndicator", 25 | ] 26 | -------------------------------------------------------------------------------- /docs/pre-commit_ZH.md: -------------------------------------------------------------------------------- 1 | # Pre-commit 配置说明 2 | 3 | ## 概述 4 | 5 | 本项目已配置 pre-commit hooks,在每次 git commit 前自动执行代码检查和格式化。 6 | 7 | ## 包含的 hooks 8 | 9 | 1. **ruff lint** - 代码质量检查并自动修复 10 | 2. **ruff format** - 代码格式化 11 | 3. **trailing-whitespace** - 移除行尾空白 12 | 4. **end-of-file-fixer** - 确保文件以换行符结尾 13 | 5. **check-yaml** - YAML 文件语法检查 14 | 6. **check-added-large-files** - 防止提交大文件 15 | 16 | ## 使用方法 17 | 18 | ### 安装依赖 19 | ```bash 20 | pdm install 21 | ``` 22 | 23 | ### 安装 pre-commit hooks 24 | ```bash 25 | pdm run pre-commit install 26 | ``` 27 | 28 | ### 手动运行所有检查 29 | ```bash 30 | pdm run pre-commit run --all-files 31 | ``` 32 | 33 | ### 手动运行特定 hook 34 | ```bash 35 | pdm run pre-commit run ruff-format 36 | ``` 37 | 38 | ## 注意事项 39 | 40 | - 如果 pre-commit 检查失败,提交会被阻止 41 | - 如果代码被自动修复,需要重新 `git add` 并提交 42 | - 可以使用 `git commit --no-verify` 跳过 pre-commit 检查(不推荐) 43 | -------------------------------------------------------------------------------- /docs/pre-commit.md: -------------------------------------------------------------------------------- 1 | # Pre-commit Configuration Guide 2 | 3 | ## Overview 4 | 5 | This project has pre-commit hooks configured to automatically run code checks and formatting before each git commit. 6 | 7 | ## Included hooks 8 | 9 | 1. **ruff lint** - Code quality check with automatic fixes 10 | 2. **ruff format** - Code formatting 11 | 3. **trailing-whitespace** - Remove trailing whitespace 12 | 4. **end-of-file-fixer** - Ensure files end with a newline 13 | 5. **check-yaml** - YAML file syntax check 14 | 6. **check-added-large-files** - Prevent committing large files 15 | 16 | ## Usage 17 | 18 | ### Install dependencies 19 | ```bash 20 | pdm install 21 | ``` 22 | 23 | ### Install pre-commit hooks 24 | ```bash 25 | pdm run pre-commit install 26 | ``` 27 | 28 | ### Run all checks manually 29 | ```bash 30 | pdm run pre-commit run --all-files 31 | ``` 32 | 33 | ### Run specific hook manually 34 | ```bash 35 | pdm run pre-commit run ruff-format 36 | ``` 37 | 38 | ## Notes 39 | 40 | - If pre-commit checks fail, the commit will be blocked 41 | - If code is automatically fixed, you need to `git add` again and commit 42 | - You can use `git commit --no-verify` to skip pre-commit checks (not recommended) 43 | -------------------------------------------------------------------------------- /codexy/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Expose necessary functions from submodules 2 | 3 | from .filesystem import check_in_git, short_cwd, shorten_path 4 | from .model_info import get_max_tokens_for_model, get_model_max_tokens 5 | from .model_utils import ( 6 | format_model_for_display, 7 | get_available_models, 8 | is_model_supported, 9 | preload_models, 10 | sort_models_for_display, 11 | ) 12 | from .storage import ( 13 | DEFAULT_HISTORY_CONFIG, 14 | HistoryConfig, 15 | HistoryEntry, 16 | add_to_history, 17 | clear_command_history, 18 | load_command_history, 19 | save_command_history, 20 | ) 21 | from .token_utils import approximate_tokens_used 22 | from .update_checker import UpdateInfo, check_for_updates 23 | 24 | __all__ = [ 25 | "check_in_git", 26 | "shorten_path", 27 | "short_cwd", 28 | "load_command_history", 29 | "save_command_history", 30 | "add_to_history", 31 | "clear_command_history", 32 | "HistoryEntry", 33 | "DEFAULT_HISTORY_CONFIG", 34 | "HistoryConfig", 35 | "check_for_updates", 36 | "UpdateInfo", 37 | "get_available_models", 38 | "is_model_supported", 39 | "preload_models", 40 | "sort_models_for_display", 41 | "format_model_for_display", 42 | "get_max_tokens_for_model", 43 | "get_model_max_tokens", 44 | "approximate_tokens_used", 45 | ] 46 | -------------------------------------------------------------------------------- /codexy/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """Implementations for the tools callable by the agent.""" 2 | 3 | from .apply_diff_tool import APPLY_DIFF_TOOL_DEF, apply_diff_tool 4 | from .apply_patch_tool import APPLY_PATCH_TOOL_DEF, apply_patch 5 | from .execute_command_tool import EXECUTE_COMMAND_TOOL_DEF, execute_command_tool 6 | from .file_tools import ( 7 | LIST_FILES_TOOL_DEF, 8 | READ_FILE_TOOL_DEF, 9 | WRITE_TO_FILE_TOOL_DEF, 10 | list_files_tool, 11 | read_file_tool, 12 | write_to_file_tool, 13 | ) 14 | 15 | # --- Tool Registration --- 16 | # Map tool names (used by the LLM) to their Python functions 17 | TOOL_REGISTRY = { 18 | "execute_command": execute_command_tool, 19 | "read_file": read_file_tool, 20 | "write_to_file": write_to_file_tool, 21 | "list_files": list_files_tool, 22 | "apply_diff": apply_diff_tool, 23 | "apply_patch": apply_patch, 24 | } 25 | 26 | # Combine all tool definitions 27 | AVAILABLE_TOOL_DEFS = [ 28 | EXECUTE_COMMAND_TOOL_DEF, 29 | READ_FILE_TOOL_DEF, 30 | WRITE_TO_FILE_TOOL_DEF, 31 | LIST_FILES_TOOL_DEF, 32 | APPLY_DIFF_TOOL_DEF, 33 | APPLY_PATCH_TOOL_DEF, 34 | ] 35 | 36 | __all__ = [ 37 | "read_file_tool", 38 | "write_to_file_tool", 39 | "list_files_tool", 40 | "apply_patch", 41 | "apply_diff_tool", 42 | "execute_command_tool", 43 | "AVAILABLE_TOOL_DEFS", 44 | "TOOL_REGISTRY", 45 | ] 46 | -------------------------------------------------------------------------------- /codexy/tui/widgets/chat/thinking_indicator.py: -------------------------------------------------------------------------------- 1 | from textual.reactive import reactive 2 | from textual.timer import Timer 3 | from textual.widgets import Static 4 | 5 | 6 | class ThinkingIndicator(Static): 7 | """Display "Thinking..." animation and timer.""" 8 | 9 | DEFAULT_CSS = """ 10 | ThinkingIndicator { 11 | height: auto; 12 | padding: 1; 13 | } 14 | """ 15 | 16 | message: reactive[str] = reactive("Thinking") 17 | thinking_seconds: reactive[int] = reactive(0) 18 | _dots: reactive[str] = reactive(".") 19 | _timer: Timer | None = None 20 | 21 | def on_mount(self) -> None: 22 | """Start animation timer.""" 23 | self.update_display() 24 | self._timer = self.set_interval(0.5, self.update_dots) 25 | 26 | def on_unmount(self) -> None: 27 | """Stop timer.""" 28 | if self._timer: 29 | self._timer.stop() 30 | 31 | def update_dots(self) -> None: 32 | """Update animation dots.""" 33 | if len(self._dots) < 3: 34 | self._dots += "." 35 | else: 36 | self._dots = "." 37 | self.update_display() 38 | 39 | def watch_thinking_seconds(self, seconds: int) -> None: 40 | """Update display when seconds change.""" 41 | self.update_display() 42 | 43 | def update_display(self) -> None: 44 | """Update displayed text.""" 45 | display_text = f"{self.message}{self._dots} ({self.thinking_seconds}s)" 46 | self.update(display_text) 47 | 48 | def set_thinking_seconds(self, seconds: int): 49 | """External call to update seconds.""" 50 | self.thinking_seconds = seconds 51 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["pdm-backend"] 3 | build-backend = "pdm.backend" 4 | 5 | [project] 6 | authors = [{ name = "AndersonBY", email = "anderson@163.com" }] 7 | classifiers = [ 8 | "Programming Language :: Python :: 3", 9 | "License :: OSI Approved :: Apache Software License", 10 | "Operating System :: OS Independent", 11 | "Development Status :: 3 - Alpha", 12 | "Environment :: Console", 13 | "Intended Audience :: Developers", 14 | "Topic :: Software Development :: Code Generators", 15 | ] 16 | dependencies = [ 17 | "click>=8.0", 18 | "PyYAML>=6.0", 19 | "openai>=1.0", 20 | "rich>=13.0", 21 | "detect-secrets>=1.5.0", 22 | "textual", 23 | "httpx>=0.28.1", 24 | "packaging>=25.0", 25 | "python-dotenv", 26 | "pyperclip>=1.9.0", 27 | ] 28 | description = "A Python implementation of the Codex CLI tool." 29 | name = "codexy" 30 | readme = "README.md" 31 | requires-python = ">=3.10" 32 | version = "0.0.10" 33 | 34 | [project.urls] 35 | "Bug Tracker" = "https://github.com/andersonby/codexy/issues" 36 | "Homepage" = "https://github.com/andersonby/codexy" 37 | 38 | [project.scripts] 39 | codexy = "codexy.cli.main:codexy" 40 | 41 | [tool.pdm] 42 | distribution = true 43 | 44 | [tool.pdm.build] 45 | excludes = ["tests"] 46 | 47 | [dependency-groups] 48 | dev = ["textual-dev", "pytest>=8.3.5", "pre-commit", "ruff"] 49 | 50 | [tool.ruff] 51 | line-length = 130 52 | target-version = "py310" 53 | 54 | [tool.ruff.lint] 55 | select = [ 56 | "E", # pycodestyle errors 57 | "W", # pycodestyle warnings 58 | "F", # pyflakes 59 | "I", # isort 60 | "B", # flake8-bugbear 61 | "C4", # flake8-comprehensions 62 | "UP", # pyupgrade 63 | ] 64 | 65 | ignore = [ 66 | "E501", # Line too long (controlled by line-length) 67 | ] 68 | 69 | [tool.ruff.format] 70 | quote-style = "double" 71 | indent-style = "space" 72 | line-ending = "auto" 73 | -------------------------------------------------------------------------------- /tests/utils/test_model_info.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | from io import StringIO 4 | 5 | from codexy.utils.model_info import DEFAULT_MAX_TOKENS, MODEL_MAX_TOKENS, get_model_max_tokens 6 | 7 | 8 | class TestGetModelMaxTokens(unittest.TestCase): 9 | def test_known_model_names(self): 10 | self.assertEqual(get_model_max_tokens("gpt-4"), MODEL_MAX_TOKENS["gpt-4"]) 11 | self.assertEqual(get_model_max_tokens("gpt-3.5-turbo-16k"), MODEL_MAX_TOKENS["gpt-3.5-turbo-16k"]) 12 | self.assertEqual(get_model_max_tokens("o4-mini"), MODEL_MAX_TOKENS["o4-mini"]) 13 | self.assertEqual(get_model_max_tokens("gpt-4-turbo"), MODEL_MAX_TOKENS["gpt-4-turbo"]) 14 | self.assertEqual(get_model_max_tokens("gpt-4-32k"), MODEL_MAX_TOKENS["gpt-4-32k"]) 15 | 16 | def test_model_name_with_known_key_prefix(self): 17 | # Test variants that should match a more general key due to prefix matching logic 18 | self.assertEqual(get_model_max_tokens("gpt-4-turbo-preview"), MODEL_MAX_TOKENS["gpt-4-turbo"]) 19 | self.assertEqual(get_model_max_tokens("gpt-4-0125-preview"), MODEL_MAX_TOKENS["gpt-4-turbo"]) 20 | self.assertEqual(get_model_max_tokens("gpt-4-1106-preview"), MODEL_MAX_TOKENS["gpt-4-turbo"]) 21 | self.assertEqual(get_model_max_tokens("custom-gpt-4-model"), MODEL_MAX_TOKENS["gpt-4"]) 22 | self.assertEqual(get_model_max_tokens("gpt-3.5-turbo-instruct"), 4096) 23 | self.assertEqual(get_model_max_tokens("my-o4-mini-variant"), MODEL_MAX_TOKENS["o4-mini"]) 24 | 25 | def test_unknown_model_name(self): 26 | original_stderr = sys.stderr 27 | sys.stderr = captured_stderr = StringIO() 28 | try: 29 | self.assertEqual(get_model_max_tokens("unknown-model-xyz"), DEFAULT_MAX_TOKENS) 30 | self.assertIn("Warning: Unknown model name 'unknown-model-xyz'", captured_stderr.getvalue()) 31 | finally: 32 | sys.stderr = original_stderr 33 | 34 | def test_order_of_checking(self): 35 | # Ensure that "gpt-4-turbo" is checked before "gpt-4" 36 | self.assertEqual(get_model_max_tokens("gpt-4-turbo-specific-variant"), MODEL_MAX_TOKENS["gpt-4-turbo"]) 37 | # Ensure "gpt-4-32k" is checked before "gpt-4" 38 | self.assertEqual(get_model_max_tokens("gpt-4-32k-specific-variant"), MODEL_MAX_TOKENS["gpt-4-32k"]) 39 | # Ensure "gpt-3.5-turbo-16k" is checked before "gpt-3.5-turbo" 40 | self.assertEqual(get_model_max_tokens("gpt-3.5-turbo-16k-variant"), MODEL_MAX_TOKENS["gpt-3.5-turbo-16k"]) 41 | 42 | 43 | if __name__ == "__main__": 44 | unittest.main() 45 | -------------------------------------------------------------------------------- /codexy/tui/widgets/chat/history_view.py: -------------------------------------------------------------------------------- 1 | from textual.containers import Container, VerticalScroll 2 | 3 | from .message_display import ( 4 | AssistantMessageDisplay, 5 | BaseMessageDisplay, 6 | SystemMessageDisplay, 7 | ToolCallDisplay, 8 | ToolOutputDisplay, 9 | UserMessageDisplay, 10 | ) 11 | 12 | 13 | class ChatHistoryView(VerticalScroll): 14 | """Display the scrollable area for chat message history.""" 15 | 16 | DEFAULT_CSS = """ 17 | ChatHistoryView { 18 | border: none; 19 | padding: 0 1; 20 | } 21 | ChatHistoryView > Container { 22 | /* Ensure containers take full width for alignment */ 23 | width: 100%; 24 | height: auto; 25 | /* Add some spacing between message containers */ 26 | margin-bottom: 1; 27 | } 28 | ChatHistoryView > .user-message-container { 29 | align-horizontal: right; /* Align user messages to the right */ 30 | } 31 | ChatHistoryView > .assistant-message-container, 32 | ChatHistoryView > .tool-call-container, 33 | ChatHistoryView > .tool-output-container, 34 | ChatHistoryView > .system-message-container { 35 | align-horizontal: left; /* Align others to the left */ 36 | } 37 | """ 38 | 39 | def add_message(self, message_widget: BaseMessageDisplay): 40 | """ 41 | Add a new message component to the history view. 42 | Now wraps the message component in a Container to control alignment. 43 | """ 44 | # Create a container to wrap the message component 45 | container = Container(message_widget) 46 | container.styles.height = "auto" # Ensure container height adapts 47 | 48 | # Add CSS class based on message type 49 | if isinstance(message_widget, UserMessageDisplay): 50 | container.add_class("user-message-container") 51 | elif isinstance(message_widget, AssistantMessageDisplay): 52 | container.add_class("assistant-message-container") 53 | elif isinstance(message_widget, ToolCallDisplay): 54 | container.add_class("tool-call-container") 55 | elif isinstance(message_widget, ToolOutputDisplay): 56 | container.add_class("tool-output-container") 57 | elif isinstance(message_widget, SystemMessageDisplay): 58 | container.add_class("system-message-container") 59 | else: 60 | container.add_class("other-message-container") 61 | 62 | # Mount the wrapped container, not the message component directly 63 | self.mount(container) 64 | # Scroll to the bottom, ensuring new messages are visible 65 | self.call_after_refresh(self.scroll_end, animate=True) 66 | 67 | def clear(self): 68 | """Clear all history messages.""" 69 | self.remove_children() 70 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # Pipfile.lock 90 | 91 | # poetry 92 | # Poetry pyproject.toml: https://python-poetry.org/docs/pyproject/ 93 | # It is generally recommended to include poetry.lock in version control. 94 | # poetry.lock 95 | 96 | # pdm 97 | # Similar to Pipfile.lock and poetry.lock, it is generally recommended to include pdm.lock in version control. 98 | # pdm.lock 99 | # pdm stores its cache in the .pdm directory, which should be ignored. 100 | .pdm/ 101 | 102 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 103 | __pypackages__/ 104 | 105 | # Celery stuff 106 | celerybeat-schedule 107 | celerybeat.pid 108 | 109 | # SageMath parsed files 110 | *.sage.py 111 | 112 | # Environments 113 | .env 114 | .venv 115 | env/ 116 | venv/ 117 | ENV/ 118 | env.bak/ 119 | venv.bak/ 120 | 121 | # Spyder project settings 122 | .spyderproject 123 | .spyproject 124 | 125 | # Rope project settings 126 | .ropeproject 127 | 128 | # mkdocs documentation 129 | /site 130 | 131 | # mypy 132 | .mypy_cache/ 133 | .dmypy.json 134 | dmypy.json 135 | 136 | # Pyre type checker 137 | .pyre/ 138 | 139 | # pytype static analysis results 140 | .pytype/ 141 | 142 | # Cython debug symbols 143 | cython_debug/ 144 | 145 | # Editor directories and files 146 | .vscode/ 147 | .idea/ 148 | .history/ 149 | *.swp 150 | *~ 151 | .project 152 | .pydevproject 153 | .settings/ 154 | 155 | # OS generated files 156 | .DS_Store 157 | Thumbs.db 158 | Icon? 159 | .Spotlight-V100 160 | 161 | # Logs 162 | *.log 163 | 164 | # Environment variables 165 | .env* 166 | !.env.example 167 | 168 | # Coverage data 169 | coverage/ 170 | .nyc_output/ # Keep if using JS tools alongside Python? Unlikely for now. 171 | 172 | # Other Caches 173 | .cache/ 174 | .pytest_cache/ 175 | 176 | .pdm-python 177 | pdm.lock 178 | -------------------------------------------------------------------------------- /codexy/tui/widgets/overlays/history_overlay.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from typing import cast 3 | 4 | from rich.text import Text 5 | from textual import events 6 | from textual.app import ComposeResult 7 | from textual.message import Message 8 | from textual.widgets import Label, ListItem, ListView, Static 9 | 10 | from ....utils.storage import HistoryEntry 11 | 12 | 13 | class HistoryOverlay(Static): 14 | """A floating layer for displaying and selecting command history.""" 15 | 16 | DEFAULT_CSS = """ 17 | HistoryOverlay ListView { 18 | border: none; 19 | background: $panel-darken-1; 20 | } 21 | HistoryOverlay Label { 22 | padding: 0 1; 23 | color: $text-muted; 24 | height: 1; 25 | } 26 | HistoryOverlay ListItem { 27 | padding: 0 1; 28 | height: 1; 29 | } 30 | HistoryOverlay ListItem > Static { 31 | height: 1; 32 | } 33 | HistoryOverlay ListItem :hover { 34 | background: $accent-darken-1; 35 | } 36 | HistoryOverlay ListItem.--highlight { 37 | background: $accent !important; 38 | color: $text !important; 39 | } 40 | HistoryOverlay ListItem.--highlight:focus { 41 | background: $accent-darken-1 !important; 42 | } 43 | """ 44 | 45 | # --- Messages --- 46 | class SelectHistory(Message): 47 | """Sent when a user selects a history entry.""" 48 | 49 | def __init__(self, command: str): 50 | self.command = command 51 | super().__init__() 52 | 53 | class ExitHistory(Message): 54 | """Sent when a user exits history view (e.g. by pressing ESC).""" 55 | 56 | pass 57 | 58 | # --- UI Composition & Updates --- 59 | def compose(self) -> ComposeResult: 60 | yield Label("Command History (↑/↓ Select, Enter Use, Esc Close)") 61 | yield ListView(id="history-list") 62 | 63 | def set_history(self, history_entries: list[HistoryEntry]): 64 | """Fill the list with history entries.""" 65 | list_view = self.query_one("#history-list", ListView) 66 | list_view.clear() # Clear old entries 67 | # Iterate in reverse to show latest at the top 68 | for entry in reversed(history_entries): 69 | # Format timestamp 70 | dt = datetime.fromtimestamp(entry["timestamp"]) 71 | time_str = dt.strftime("%Y-%m-%d %H:%M:%S") 72 | # Create display text 73 | display_text = Text.assemble((f"{time_str} ", "dim"), (entry["command"], "")) 74 | # Create ListItem, storing original command as its value 75 | # Note: ListItem itself doesn't have a value property, we might need to subclass 76 | # Or use another way to store the original command. A simple method is to use ID. 77 | # Alternatively, when selected, extract from Label. 78 | # For simplicity, we will extract in on_list_view_selected. 79 | list_view.append(ListItem(Static(display_text))) # Use Static to display Rich Text 80 | # If list is not empty, highlight first (latest) 81 | if len(list_view): 82 | list_view.index = 0 83 | 84 | # --- Event Handlers --- 85 | def on_list_view_selected(self, event: ListView.Selected) -> None: 86 | """Handle list item selection event.""" 87 | event.stop() 88 | selected_item = event.item 89 | if selected_item: 90 | # Extract original command text from Static component 91 | static_widget = selected_item.query_one(Static) 92 | rich_text = cast(Text, static_widget.renderable) # Assuming it's Text 93 | # Extract command part (assuming timestamp followed by command) 94 | command_text = rich_text.plain.split(" ", 2)[-1] # Simple split logic 95 | self.post_message(self.SelectHistory(command_text)) 96 | 97 | # Allow closing via ESC 98 | def on_key(self, event: events.Key) -> None: 99 | if event.key == "escape": 100 | event.stop() 101 | self.post_message(self.ExitHistory()) 102 | -------------------------------------------------------------------------------- /codexy/tui/widgets/overlays/help_overlay.py: -------------------------------------------------------------------------------- 1 | from rich.text import Text 2 | from textual import events 3 | from textual.app import ComposeResult 4 | from textual.containers import VerticalScroll 5 | from textual.message import Message 6 | from textual.widgets import Label, Static 7 | 8 | 9 | class HelpOverlay(Static): 10 | """An overlay that displays help information about commands and shortcuts.""" 11 | 12 | DEFAULT_CSS = """ 13 | HelpOverlay { 14 | layer: help_layer; 15 | display: none; 16 | align: center middle; 17 | width: 80%; 18 | max-width: 80; 19 | height: 80%; 20 | max-height: 25; 21 | border: thick $accent; 22 | background: $panel; 23 | padding: 1 2; 24 | overflow-y: auto; 25 | } 26 | HelpOverlay.-active { 27 | display: block; 28 | } 29 | HelpOverlay #help-title { 30 | width: 100%; 31 | text-align: center; 32 | margin-bottom: 1; 33 | text-style: bold; 34 | } 35 | HelpOverlay .help-section-title { 36 | margin-top: 1; 37 | text-style: bold underline; 38 | } 39 | HelpOverlay Static.help-command .command { 40 | color: $secondary; 41 | text-style: bold; 42 | width: 15; 43 | } 44 | HelpOverlay Static.help-command .description { 45 | width: 1fr; 46 | } 47 | HelpOverlay Static.help-key .key { 48 | color: $accent; 49 | text-style: bold; 50 | width: 10; 51 | } 52 | HelpOverlay Static.help-key .description { 53 | width: 1fr; 54 | } 55 | HelpOverlay Static.help-line { 56 | height: 1; 57 | width: 100%; 58 | margin-bottom: 1; 59 | } 60 | HelpOverlay #help-footer { 61 | margin-top: 1; 62 | width: 100%; 63 | text-align: center; 64 | color: $text-muted; 65 | } 66 | """ 67 | 68 | COMMANDS: list[tuple[str, str]] = [ 69 | ("/help", "Show this help overlay"), 70 | ("/model", "Switch the LLM model in-session"), 71 | ("/approval", "Switch auto-approval mode"), 72 | ("/history", "Show command & file history for this session"), 73 | ("/clear", "Clear screen & context"), 74 | ("/clearhistory", "Clear command history from disk"), 75 | ("/bug", "File a bug report with session log"), 76 | ("/compact", "Condense context into a summary (not implemented)"), 77 | ("q | exit | :q", "Exit codexy"), 78 | ] 79 | 80 | KEYBINDINGS: list[tuple[str, str]] = [ 81 | ("Ctrl+J/Ctrl+Enter", "Submit message / Approve command"), 82 | ("Up/Down", "Navigate history / options"), 83 | ("ESC", "Cancel input / Deny command / Close overlay"), 84 | ("Ctrl+Q", "Quit Application"), 85 | ("F1", "Show this help overlay"), 86 | ("F2", "Change Model (not implemented)"), 87 | ("F3", "Change Approval Mode (not implemented)"), 88 | ("F4", "Show Command History"), 89 | # ("Ctrl+X", "Open External Editor (not implemented)"), 90 | ] 91 | 92 | class ExitHelp(Message): 93 | """Message to signal exiting the help overlay.""" 94 | 95 | pass 96 | 97 | def compose(self) -> ComposeResult: 98 | yield Label("Available Commands & Shortcuts", id="help-title") 99 | with VerticalScroll(): 100 | yield Label("Slash Commands", classes="help-section-title") 101 | 102 | for command, description in self.COMMANDS: 103 | line_text = Text.assemble( 104 | (f"{command:<15}", "bold"), 105 | f" - {description}", 106 | ) 107 | yield Static(line_text, classes="help-line help-command") 108 | 109 | yield Label("Keyboard Shortcuts", classes="help-section-title") 110 | 111 | for key, description in self.KEYBINDINGS: 112 | line_text = Text.assemble( 113 | (f"{key:<10}", "bold"), 114 | f" - {description}", 115 | ) 116 | yield Static(line_text, classes="help-line help-key") 117 | 118 | yield Label("Press ESC to close", id="help-footer") 119 | 120 | def on_key(self, event: events.Key) -> None: 121 | """Handle key press to close the overlay.""" 122 | if event.key == "escape": 123 | event.stop() 124 | # Post message to the App to handle closing 125 | self.post_message(self.ExitHelp()) 126 | -------------------------------------------------------------------------------- /codexy/cli/completion_scripts.py: -------------------------------------------------------------------------------- 1 | _COMPLETION_SCRIPTS = { 2 | "bash": """ 3 | _codexy_completion() { 4 | local cur prev words cword 5 | _get_comp_words_by_ref -n : cur prev words cword 6 | 7 | # Basic file/directory completion for options that take paths 8 | if [[ "$prev" == "--image" || "$prev" == "-i" || "$prev" == "--view" || "$prev" == "-v" || "$prev" == "--writable-root" || "$prev" == "-w" || "$prev" == "--project-doc" ]]; then 9 | _filedir 10 | return 0 11 | fi 12 | 13 | # Completion for the approval-mode option 14 | if [[ "$prev" == "--approval-mode" || "$prev" == "-a" ]]; then 15 | COMPREPLY=( $(compgen -W "suggest auto-edit full-auto dangerous-auto" -- "$cur") ) 16 | return 0 17 | fi 18 | 19 | # Completion for the model option (can add common models here if desired) 20 | if [[ "$prev" == "--model" || "$prev" == "-m" ]]; then 21 | COMPREPLY=( $(compgen -W "o4-mini o3 gpt-4.1 gpt-4o" -- "$cur") ) 22 | return 0 23 | fi 24 | 25 | # General argument completion (e.g., main prompt) or option names 26 | if [[ "$cur" == -* ]]; then 27 | COMPREPLY=( $(compgen -W "-h --help --version --model -m --image -i --view -v --quiet -q --config -c --writable-root -w --approval-mode -a --auto-edit --full-auto --no-project-doc --project-doc --full-stdout --notify --dangerously-auto-approve-everything --full-context -f" -- "$cur") ) 28 | else 29 | # Default to file/directory completion for arguments if not an option 30 | _filedir 31 | fi 32 | 33 | return 0 34 | } 35 | complete -F _codexy_completion codexy 36 | """, 37 | "zsh": """ 38 | #compdef codexy 39 | 40 | _codexy() { 41 | local -a options 42 | options=( 43 | '(-h --help)'{-h,--help}'[Show help message]' 44 | '--version[Show version information]' 45 | '(-m --model)'{-m,--model=}'[Model to use]: :(o4-mini o3 gpt-4.1 gpt-4o)' 46 | '(-i --image)'{-i,--image=}'[Path to image file]:_files' 47 | '(-v --view)'{-v,--view=}'[Path to rollout file]:_files' 48 | '(-q --quiet)'{-q,--quiet}'[Non-interactive mode]' 49 | '(-c --config)'{-c,--config}'[Open instructions file]' 50 | '(-w --writable-root)'{-w,--writable-root=}'[Writable root for full-auto]:_files -/' 51 | '(-a --approval-mode)'{-a,--approval-mode=}'[Approval policy]: :(suggest auto-edit full-auto dangerous-auto)' 52 | '--auto-edit[Auto-approve file edits]' 53 | '--full-auto[Auto-approve edits and sandboxed commands]' 54 | '--no-project-doc[Do not include codex.md]' 55 | '--project-doc=[Path to project doc]:_files' 56 | '--full-stdout[Do not truncate stdout/stderr]' 57 | '--notify[Enable desktop notifications]' 58 | '--dangerously-auto-approve-everything[Auto-approve everything unsandboxed (DANGEROUS)]' 59 | '(-f --full-context)'{-f,--full-context}'[Full-context mode]' 60 | '*:prompt:_files' 61 | ) 62 | _arguments $options 63 | } 64 | _codexy 65 | """, 66 | "fish": """ 67 | # fish completion for codexy 68 | complete -c codexy -f -a "completion" -d "Generate shell completion script" 69 | 70 | # Options for main command 71 | complete -c codexy -s h -l help -d 'Show help message' 72 | complete -c codexy -l version -d 'Show version information' 73 | complete -c codexy -s m -l model -d 'Model to use' -xa "o4-mini o3 gpt-4.1 gpt-4o" 74 | complete -c codexy -s i -l image -d 'Path to image file' -r -F 75 | complete -c codexy -s v -l view -d 'Path to rollout file' -r -F 76 | complete -c codexy -s q -l quiet -d 'Non-interactive mode' 77 | complete -c codexy -s c -l config -d 'Open instructions file' 78 | complete -c codexy -s w -l writable-root -d 'Writable root for full-auto' -r -F 79 | complete -c codexy -s a -l approval-mode -d 'Approval policy' -xa "suggest auto-edit full-auto dangerous-auto" 80 | complete -c codexy -l auto-edit -d 'Auto-approve file edits' 81 | complete -c codexy -l full-auto -d 'Auto-approve edits and sandboxed commands' 82 | complete -c codexy -l no-project-doc -d 'Do not include codex.md' 83 | complete -c codexy -l project-doc -d 'Path to project doc' -r -F 84 | complete -c codexy -l full-stdout -d 'Do not truncate stdout/stderr' 85 | complete -c codexy -l notify -d 'Enable desktop notifications' 86 | complete -c codexy -l dangerously-auto-approve-everything -d 'Auto-approve everything unsandboxed (DANGEROUS)' 87 | complete -c codexy -l full-context -d 'Full-context mode' 88 | 89 | # Options for 'completion' command 90 | complete -c codexy -n "__fish_seen_subcommand_from completion" -f -a "bash zsh fish" -d "Shell type" 91 | 92 | # Default argument completion (likely file paths or prompt text) 93 | complete -c codexy -f -a "(__fish_complete_path)" 94 | """, 95 | } 96 | -------------------------------------------------------------------------------- /codexy/tui/widgets/chat/header.py: -------------------------------------------------------------------------------- 1 | from typing import cast 2 | 3 | from textual.app import ComposeResult 4 | from textual.containers import Horizontal 5 | from textual.reactive import reactive 6 | from textual.widgets import Label, Static 7 | 8 | from ....config import AppConfig 9 | from ....utils.filesystem import short_cwd 10 | 11 | 12 | class ChatHeader(Static): 13 | """Display the title bar for chat session information.""" 14 | 15 | DEFAULT_CSS = """ 16 | ChatHeader { 17 | dock: top; 18 | width: 100%; 19 | background: $accent-darken-2; /* Use theme color */ 20 | color: $text; 21 | height: auto; 22 | padding: 0 1; 23 | border-bottom: thick $accent; /* Add bottom border */ 24 | } 25 | ChatHeader Horizontal { 26 | width: 1fr; 27 | height: 1; /* Force height to 1 for single line */ 28 | align: left middle; 29 | overflow: hidden; /* Hide overflow if content is too long */ 30 | } 31 | ChatHeader Label { 32 | margin-right: 2; 33 | height: 1; 34 | text-style: bold; 35 | content-align: left middle; 36 | overflow: hidden; /* Prevent label content itself from wrapping */ 37 | text-overflow: ellipsis; /* Add ellipsis if label content is too long */ 38 | } 39 | ChatHeader .info { 40 | color: $text-muted; 41 | text-style: none; 42 | width: auto; /* Let info labels take their needed width */ 43 | } 44 | ChatHeader .value { 45 | color: $text; 46 | text-style: bold; 47 | width: auto; /* Let value labels take their needed width */ 48 | max-width: 25%; /* Limit max width of value labels */ 49 | } 50 | ChatHeader #session-label { 51 | /* Allow session ID to take more space if needed, but still limit */ 52 | max-width: 35%; 53 | width: 1fr; /* Allow it to shrink if needed */ 54 | text-align: right; /* Align session ID to the right */ 55 | } 56 | """ 57 | 58 | # Keep its own reactives to display the data 59 | session_id: reactive[str] = reactive("N/A") 60 | cwd: reactive[str] = reactive("N/A") 61 | model: reactive[str] = reactive("N/A") 62 | approval_mode: reactive[str] = reactive("N/A") 63 | 64 | # Store config for reference if needed, but maybe not necessary 65 | _app_config: AppConfig | None = None 66 | 67 | def compose(self) -> ComposeResult: 68 | with Horizontal(): 69 | yield Label("Dir:", classes="info") 70 | yield Label(self.cwd, classes="value") 71 | yield Label("Model:", classes="info") 72 | yield Label(self.model, classes="value") 73 | yield Label("Approval:", classes="info") 74 | yield Label(self.approval_mode, classes="value", id="approval-label") 75 | # Use remaining space for session ID, aligned right 76 | yield Label("Session:", classes="info", shrink=True) # Allow info label to shrink 77 | yield Label(self.session_id, classes="value", id="session-label") 78 | 79 | def update_info(self, config: AppConfig, session_id: str | None = None): 80 | """Update Header display info (called once on mount usually).""" 81 | self._app_config = config 82 | self.session_id = session_id or "N/A" 83 | self.cwd = short_cwd() 84 | self.model = config.get("model", "N/A") 85 | self.approval_mode = config.get("effective_approval_mode", "N/A") 86 | 87 | # Watchers update the specific Label widgets 88 | def watch_cwd(self, new_cwd: str) -> None: 89 | try: 90 | label = cast(Label, self.query("Label").filter(".value").first()) 91 | label.update(new_cwd) 92 | except Exception: 93 | pass 94 | 95 | def watch_model(self, new_model: str) -> None: 96 | try: 97 | model_label = cast(Label, self.query("Label").filter(".value")[1]) # Assume Model is the second value 98 | model_label.update(new_model) 99 | except Exception: 100 | pass 101 | 102 | def watch_approval_mode(self, new_mode: str) -> None: 103 | try: 104 | approval_label = cast(Label, self.query("Label").filter(".value")[2]) # Assume Approval is the third value 105 | approval_label.update(new_mode) 106 | except Exception: 107 | pass 108 | 109 | def watch_session_id(self, new_id: str) -> None: 110 | try: 111 | session_label = cast(Label, self.query_one("#session-label", Label)) 112 | session_label.update(new_id) 113 | except Exception: 114 | pass 115 | -------------------------------------------------------------------------------- /codexy/utils/model_utils.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | 4 | from openai import APIError, AsyncOpenAI 5 | 6 | # --- Constants --- 7 | # Define recommended models (adjust as needed) 8 | RECOMMENDED_MODELS: list[str] = ["o4-mini", "o3", "gpt-4o", "gpt-4.1"] 9 | MODEL_LIST_TIMEOUT_SECONDS = 5.0 # Timeout for fetching model list 10 | 11 | # --- Caching --- 12 | # Simple in-memory cache for the model list 13 | _cached_models: list[str] | None = None 14 | _cache_lock = asyncio.Lock() 15 | _is_fetching = False 16 | 17 | # --- Functions --- 18 | 19 | 20 | async def _fetch_models_from_api(client: AsyncOpenAI) -> list[str]: 21 | """Fetches the list of models from the OpenAI API.""" 22 | global _is_fetching 23 | if _is_fetching: 24 | # Avoid concurrent fetches if one is already in progress 25 | print("Model fetch already in progress, waiting...", file=sys.stderr) 26 | while _is_fetching: 27 | await asyncio.sleep(0.1) 28 | return _cached_models or [] # Return potentially updated cache 29 | 30 | _is_fetching = True 31 | try: 32 | print("Fetching available models from OpenAI API...", file=sys.stderr) 33 | models_response = await client.models.list() 34 | # Extract model IDs, filter out older models if desired, and sort 35 | # Example filtering: models starting with 'gpt-', 'ft:', 'o3', 'o4' 36 | models = sorted( 37 | m.id 38 | for m in models_response.data 39 | if m.id and (m.id.startswith("gpt-") or m.id.startswith("ft:") or m.id.startswith("o3") or m.id.startswith("o4")) 40 | ) 41 | print(f"Fetched {len(models)} models.", file=sys.stderr) 42 | return models 43 | except APIError as e: 44 | print(f"Warning: API Error fetching models: {e.code} - {e.message}", file=sys.stderr) 45 | return [] # Return empty on API error 46 | except Exception as e: 47 | print(f"Warning: Unexpected error fetching models: {e}", file=sys.stderr) 48 | return [] # Return empty on other errors 49 | finally: 50 | _is_fetching = False 51 | 52 | 53 | async def get_available_models(client: AsyncOpenAI, force_refresh: bool = False) -> list[str]: 54 | """ 55 | Gets the list of available models, using cache if available and not forced. 56 | Adds recommended models even if the API call fails. 57 | """ 58 | global _cached_models 59 | async with _cache_lock: 60 | if _cached_models is None or force_refresh: 61 | fetched_models = await _fetch_models_from_api(client) 62 | # Combine fetched models with recommended models, ensuring uniqueness and sorting 63 | combined_models = set(fetched_models) | set(RECOMMENDED_MODELS) 64 | _cached_models = sorted(combined_models) 65 | if not fetched_models: 66 | print("Warning: Using only recommended models due to fetch failure.", file=sys.stderr) 67 | 68 | return _cached_models if _cached_models is not None else list(RECOMMENDED_MODELS) # Fallback 69 | 70 | 71 | async def preload_models(client: AsyncOpenAI): 72 | """Initiates the model fetching process in the background.""" 73 | async with _cache_lock: 74 | if _cached_models is None and not _is_fetching: 75 | asyncio.create_task(_fetch_models_from_api(client)) 76 | 77 | 78 | async def is_model_supported(model_id: str, client: AsyncOpenAI) -> bool: 79 | """Checks if a given model ID is likely supported.""" 80 | if not model_id: 81 | return False 82 | # Assume recommended models are always supported initially 83 | if model_id in RECOMMENDED_MODELS: 84 | return True 85 | try: 86 | available = await get_available_models(client) 87 | return model_id in available 88 | except Exception: 89 | # If check fails, conservatively assume it might be supported 90 | return True 91 | 92 | 93 | def sort_models_for_display(models: list[str], current_model: str) -> list[str]: 94 | """Sorts models, putting recommended and current at the top.""" 95 | recommended_set = set(RECOMMENDED_MODELS) 96 | current_list = [m for m in models if m == current_model] 97 | recommended_list = sorted([m for m in models if m in recommended_set and m != current_model]) 98 | other_list = sorted([m for m in models if m not in recommended_set and m != current_model]) 99 | return current_list + recommended_list + other_list 100 | 101 | 102 | def format_model_for_display(model_id: str, current_model: str) -> str: 103 | """Formats the model ID for display, adding markers.""" 104 | prefix = "" 105 | if model_id == current_model: 106 | prefix += "✓ " # Checkmark for current 107 | if model_id in RECOMMENDED_MODELS: 108 | prefix += "⭐ " # Star for recommended 109 | return f"{prefix}{model_id}" 110 | -------------------------------------------------------------------------------- /codexy/utils/token_utils.py: -------------------------------------------------------------------------------- 1 | """Utilities for estimating token usage.""" 2 | 3 | import json 4 | import math 5 | from collections.abc import Sequence 6 | 7 | from openai.types.chat import ChatCompletionContentPartParam, ChatCompletionMessageParam, ChatCompletionMessageToolCall 8 | 9 | # Simple approximation: 4 characters per token on average 10 | CHARS_PER_TOKEN_ESTIMATE = 4 11 | 12 | 13 | def _count_chars_in_content(content: str | Sequence[ChatCompletionContentPartParam] | None) -> int: 14 | """Counts characters in message content, handling different formats.""" 15 | if content is None: 16 | return 0 17 | if isinstance(content, str): 18 | return len(content) 19 | if isinstance(content, list): 20 | count = 0 21 | for part in content: 22 | if isinstance(part, dict): 23 | part_type = part.get("type") 24 | if part_type == "text" or part_type == "input_text" or part_type == "output_text": 25 | text_part = part.get("text") 26 | if isinstance(text_part, str): 27 | count += len(text_part) 28 | elif part_type == "input_file": # As in codex-cli 29 | filename_part = part.get("filename") 30 | if isinstance(filename_part, str): 31 | count += len(filename_part) 32 | # Ignore image URLs for token count approximation 33 | # elif part_type == "image_url": 34 | # pass 35 | # Handle refusal type if present in history items (like in TS version) 36 | elif part_type == "refusal": 37 | refusal_part = part.get("refusal") 38 | if isinstance(refusal_part, str): 39 | count += len(refusal_part) 40 | return count 41 | return 0 42 | 43 | 44 | def _count_chars_in_tool_calls(tool_calls: list[ChatCompletionMessageToolCall] | None) -> int: 45 | """Counts characters in tool call names and arguments.""" 46 | count = 0 47 | if tool_calls and isinstance(tool_calls, list): 48 | for tool_call in tool_calls: 49 | if isinstance(tool_call, dict): 50 | function_data = tool_call.get("function") 51 | if isinstance(function_data, dict): 52 | count += len(function_data.get("name", "")) 53 | # Arguments might be stored differently, handle safely 54 | args = function_data.get("arguments", "") 55 | if isinstance(args, str): 56 | count += len(args) 57 | elif isinstance(args, dict): # Handle if arguments are dict 58 | try: 59 | count += len(json.dumps(args)) 60 | except TypeError: 61 | count += len(str(args)) # Fallback 62 | return count 63 | 64 | 65 | def approximate_tokens_used(history: list[ChatCompletionMessageParam]) -> int: 66 | """ 67 | Roughly estimates the number of tokens used by the message history. 68 | Excludes system messages from the count, includes tool calls and outputs. 69 | """ 70 | char_count = 0 71 | for message in history: 72 | # Ensure message is a dictionary before proceeding 73 | if not isinstance(message, dict): 74 | continue 75 | 76 | role = message.get("role") 77 | 78 | # Only count user and assistant messages for context usage approximation 79 | if role == "user" or role == "assistant": 80 | message_content = message.get("content", "") 81 | if isinstance(message_content, str): 82 | char_count += _count_chars_in_content(message_content) 83 | elif isinstance(message_content, list): 84 | for part in message_content: 85 | if isinstance(part, dict): 86 | part_type = part.get("type") 87 | if part_type == "text": 88 | char_count += _count_chars_in_content(part.get("text", "")) 89 | # Add handling for other part types if needed 90 | # Add contribution from tool calls if present 91 | tool_calls = message.get("tool_calls") 92 | if tool_calls and isinstance(tool_calls, list): 93 | for tool_call in tool_calls: 94 | if isinstance(tool_call, dict): 95 | function_data = tool_call.get("function") 96 | if isinstance(function_data, dict): 97 | char_count += len(function_data.get("name", "")) 98 | char_count += len(function_data.get("arguments", "")) 99 | elif role == "tool": 100 | # Also count tool responses (content field) 101 | tool_content = message.get("content") 102 | if isinstance(tool_content, str): 103 | char_count += len(tool_content) 104 | elif isinstance(tool_content, list): 105 | for part in tool_content: 106 | if isinstance(part, dict): 107 | part_type = part.get("type") 108 | if part_type == "text": 109 | char_count += _count_chars_in_content(part.get("text", "")) 110 | 111 | # Estimate tokens based on character count 112 | return math.ceil(char_count / CHARS_PER_TOKEN_ESTIMATE) 113 | -------------------------------------------------------------------------------- /codexy/utils/storage.py: -------------------------------------------------------------------------------- 1 | """Utilities for persistent storage like command history.""" 2 | 3 | import json 4 | import sys 5 | import time 6 | from typing import TypedDict 7 | 8 | from ..config import CONFIG_DIR 9 | from ..utils.security_check import SecurityChecker 10 | 11 | security_checker = SecurityChecker() 12 | 13 | 14 | # Assuming config types might be shared or defined elsewhere, 15 | # but defining locally for clarity if not. 16 | # If AppConfig/HistoryConfig are defined in config.py, import them instead. 17 | class HistoryConfig(TypedDict, total=False): 18 | max_size: int 19 | save_history: bool 20 | 21 | 22 | class HistoryEntry(TypedDict): 23 | command: str 24 | timestamp: float 25 | 26 | 27 | # Default history config 28 | DEFAULT_HISTORY_CONFIG: HistoryConfig = { 29 | "max_size": 1000, 30 | "save_history": True, 31 | } 32 | 33 | 34 | HISTORY_FILE = CONFIG_DIR / "history.json" 35 | 36 | 37 | def is_sensitive_command(command: str) -> bool: 38 | """Checks if a command contains potential secrets using detect-secrets.""" 39 | messages = security_checker.check_line(command) 40 | if messages: 41 | # Log sensitivity check failure to stderr for debugging/awareness 42 | print( 43 | f"[History] Command '{command[:20]}...' potentially sensitive, skipping save.", 44 | file=sys.stderr, 45 | ) 46 | return True 47 | return False 48 | 49 | 50 | # --- Command History Functions --- 51 | 52 | 53 | def load_command_history() -> list[HistoryEntry]: 54 | """Loads command history from the history file.""" 55 | if not HISTORY_FILE.exists(): 56 | return [] 57 | try: 58 | with open(HISTORY_FILE, encoding="utf-8") as f: 59 | history_data = json.load(f) 60 | # Basic validation: check if it's a list 61 | if isinstance(history_data, list): 62 | # Further validation could be added here to check structure of entries 63 | # For now, assume the structure is correct if it's a list 64 | return history_data 65 | else: 66 | print(f"Warning: History file {HISTORY_FILE} does not contain a valid list. Starting fresh.", file=sys.stderr) 67 | return [] 68 | except (OSError, json.JSONDecodeError) as e: 69 | # Use stderr for warnings/errors that shouldn't pollute normal output 70 | print(f"Warning: Failed to load command history from {HISTORY_FILE}. Starting fresh. Error: {e}", file=sys.stderr) 71 | return [] 72 | except Exception as e: # Catch unexpected errors 73 | print( 74 | f"Warning: An unexpected error occurred loading history {HISTORY_FILE}. Starting fresh. Error: {e}", 75 | file=sys.stderr, 76 | ) 77 | return [] 78 | 79 | 80 | def save_command_history(history: list[HistoryEntry], config: HistoryConfig | None = None): 81 | """Saves command history to the history file.""" 82 | cfg_to_use = config if config else DEFAULT_HISTORY_CONFIG 83 | max_size = cfg_to_use.get("max_size", DEFAULT_HISTORY_CONFIG.get("max_size", 1000)) 84 | 85 | try: 86 | HISTORY_FILE.parent.mkdir(parents=True, exist_ok=True) 87 | trimmed_history = history[-max_size:] 88 | with open(HISTORY_FILE, "w", encoding="utf-8") as f: 89 | json.dump(trimmed_history, f, indent=2, ensure_ascii=False) 90 | except OSError as e: 91 | print(f"Error: Failed to save command history to {HISTORY_FILE}: {e}", file=sys.stderr) 92 | except Exception as e: 93 | print(f"Error: An unexpected error occurred saving history to {HISTORY_FILE}: {e}", file=sys.stderr) 94 | 95 | 96 | def add_to_history( 97 | command: str, 98 | history: list[HistoryEntry], 99 | config: HistoryConfig | None = None, 100 | ) -> list[HistoryEntry]: 101 | """ 102 | Adds a command to the history list if configured to save, it's not sensitive (using detect-secrets), 103 | and it's not an immediate duplicate. Saves the updated history to disk. 104 | 105 | Returns: 106 | The potentially updated history list. 107 | """ 108 | cfg_to_use = config if config else DEFAULT_HISTORY_CONFIG 109 | should_save = cfg_to_use.get("save_history", DEFAULT_HISTORY_CONFIG.get("save_history", True)) 110 | 111 | if not should_save: 112 | return history 113 | 114 | trimmed_command = command.strip() 115 | if not trimmed_command: 116 | return history 117 | 118 | # Check for sensitivity using detect-secrets 119 | if is_sensitive_command(trimmed_command): 120 | return history # Don't save sensitive commands 121 | 122 | # Check for immediate duplicate 123 | if history and history[-1]["command"] == trimmed_command: 124 | return history 125 | 126 | new_entry: HistoryEntry = { 127 | "command": trimmed_command, 128 | "timestamp": time.time(), 129 | } 130 | 131 | new_history = history + [new_entry] 132 | save_command_history(new_history, cfg_to_use) # Save handles trimming 133 | 134 | max_size = cfg_to_use.get("max_size", DEFAULT_HISTORY_CONFIG.get("max_size", 1000)) 135 | return new_history[-max_size:] 136 | 137 | 138 | def clear_command_history(): 139 | """Clears the command history by overwriting the file with an empty list.""" 140 | try: 141 | HISTORY_FILE.parent.mkdir(parents=True, exist_ok=True) 142 | with open(HISTORY_FILE, "w", encoding="utf-8") as f: 143 | json.dump([], f) 144 | # Removed print statement - feedback should be in TUI 145 | # print(f"Command history cleared ({HISTORY_FILE})") 146 | except OSError as e: 147 | print(f"Error: Failed to clear command history file {HISTORY_FILE}: {e}", file=sys.stderr) 148 | # Re-raise or handle more gracefully depending on requirements 149 | raise # Or return False/status code 150 | except Exception as e: 151 | print(f"Error: An unexpected error occurred clearing history {HISTORY_FILE}: {e}", file=sys.stderr) 152 | raise # Or return False/status code 153 | -------------------------------------------------------------------------------- /codexy/tui/widgets/overlays/model_overlay.py: -------------------------------------------------------------------------------- 1 | from rich.text import Text 2 | from textual import events 3 | from textual.app import ComposeResult 4 | from textual.containers import VerticalScroll 5 | from textual.message import Message 6 | from textual.reactive import reactive 7 | from textual.widgets import Label, ListItem, ListView, Static 8 | 9 | from ....utils.model_utils import format_model_for_display, sort_models_for_display 10 | 11 | 12 | # Type for list items carrying the model ID 13 | class ModelItem(ListItem): 14 | def __init__(self, model_id: str, display_text: Text): 15 | super().__init__(Label(display_text)) 16 | self.model_id = model_id 17 | 18 | 19 | class ModelOverlay(Static): 20 | """An overlay for selecting an OpenAI model.""" 21 | 22 | DEFAULT_CSS = """ 23 | ModelOverlay { 24 | layer: model_overlay_layer; 25 | display: none; 26 | align: center middle; 27 | width: 80%; 28 | max-width: 60; 29 | height: 80%; 30 | max-height: 25; 31 | border: thick $accent; 32 | background: $panel; 33 | padding: 1; 34 | } 35 | ModelOverlay.-active { 36 | display: block; 37 | } 38 | ModelOverlay #model-overlay-title { 39 | width: 100%; 40 | text-align: center; 41 | margin-bottom: 1; 42 | text-style: bold; 43 | } 44 | ModelOverlay #model-list-view { 45 | border: none; 46 | background: $panel-darken-1; 47 | } 48 | ModelOverlay #model-overlay-footer { 49 | margin-top: 1; 50 | width: 100%; 51 | text-align: center; 52 | color: $text-muted; 53 | } 54 | ModelOverlay #model-overlay-error { 55 | margin-top: 1; 56 | text-align: center; 57 | color: $error; 58 | } 59 | ModelOverlay ListItem { 60 | padding: 0 1; 61 | height: 1; 62 | } 63 | ModelOverlay ListItem Label { 64 | height: 1; 65 | } 66 | ModelOverlay ListItem :hover { 67 | background: $accent-darken-1; 68 | } 69 | ModelOverlay ListItem.--highlight { 70 | background: $accent !important; 71 | color: $text !important; 72 | } 73 | ModelOverlay ListItem.--highlight:focus { 74 | background: $accent-darken-1 !important; 75 | } 76 | """ 77 | 78 | # --- Reactives --- 79 | available_models: reactive[list[str]] = reactive(list) 80 | current_model: reactive[str] = reactive("") 81 | can_switch: reactive[bool] = reactive(True) # Controls if switching is allowed 82 | 83 | # --- Messages --- 84 | class Selected(Message): 85 | """Sent when a model is selected.""" 86 | 87 | def __init__(self, model_id: str): 88 | self.model_id = model_id 89 | super().__init__() 90 | 91 | class Exit(Message): 92 | """Sent when the overlay is exited without selection.""" 93 | 94 | pass 95 | 96 | def compose(self) -> ComposeResult: 97 | yield Label("Switch Model", id="model-overlay-title") 98 | yield Label("Cannot switch model after conversation starts.", id="model-overlay-error", classes="-hidden") 99 | with VerticalScroll(): 100 | yield ListView(id="model-list-view") 101 | yield Label("↑/↓ Select, Enter Confirm, Esc Cancel", id="model-overlay-footer") 102 | 103 | def on_mount(self) -> None: 104 | """Focus the list view when mounted.""" 105 | self.call_later(self.focus_list) 106 | 107 | def focus_list(self) -> None: 108 | """Safely focus the ListView.""" 109 | try: 110 | list_view = self.query_one(ListView) 111 | if list_view.is_mounted: 112 | list_view.focus() 113 | except Exception as e: 114 | self.log.error(f"Error focusing model list: {e}") 115 | 116 | def watch_can_switch(self, can_switch: bool) -> None: 117 | """Update UI based on whether switching is allowed.""" 118 | self.query_one("#model-list-view").display = can_switch 119 | self.query_one("#model-overlay-error").set_class(not can_switch, "-active") 120 | self.query_one("#model-overlay-error").display = not can_switch # Ensure it's visible 121 | 122 | def watch_available_models(self, new_models: list[str]) -> None: 123 | """Update the list view when available models change.""" 124 | self._populate_list() 125 | 126 | def watch_current_model(self, new_current_model: str) -> None: 127 | """Update the list view when the current model changes.""" 128 | self._populate_list() 129 | 130 | def _populate_list(self): 131 | """Populate the ListView with models.""" 132 | if not self.is_mounted: # Don't populate if not mounted 133 | return 134 | 135 | list_view = self.query_one("#model-list-view", ListView) 136 | list_view.clear() 137 | 138 | if not self.can_switch: 139 | return # Don't populate if switching isn't allowed 140 | 141 | sorted_list = sort_models_for_display(self.available_models, self.current_model) 142 | highlighted_index: int | None = None 143 | 144 | for index, model_id in enumerate(sorted_list): 145 | display_text = format_model_for_display(model_id, self.current_model) 146 | rich_text = Text.from_markup(display_text) # Convert potentially marked-up string 147 | list_view.append(ModelItem(model_id, rich_text)) 148 | if model_id == self.current_model: 149 | highlighted_index = index 150 | 151 | if highlighted_index is not None and len(list_view) > 0: 152 | list_view.index = highlighted_index 153 | 154 | def on_list_view_selected(self, event: ListView.Selected) -> None: 155 | """Handle selection from the list view.""" 156 | event.stop() 157 | if self.can_switch and isinstance(event.item, ModelItem): 158 | selected_model_id = event.item.model_id 159 | self.log(f"Model selected: {selected_model_id}") 160 | self.post_message(self.Selected(selected_model_id)) 161 | 162 | def on_key(self, event: events.Key) -> None: 163 | """Handle key presses, specifically Escape.""" 164 | if event.key == "escape": 165 | event.stop() 166 | self.log("Model overlay exited via Escape.") 167 | self.post_message(self.Exit()) 168 | -------------------------------------------------------------------------------- /codexy/utils/update_checker.py: -------------------------------------------------------------------------------- 1 | """Utility for checking for new versions of the codexy package on PyPI.""" 2 | 3 | import asyncio 4 | import json 5 | import sys 6 | from datetime import datetime, timezone 7 | from importlib import metadata 8 | from typing import TypedDict, cast 9 | 10 | import httpx 11 | from packaging.version import parse as parse_version 12 | 13 | from .. import PACKAGE_NAME 14 | from ..config import CONFIG_DIR 15 | 16 | # Constants 17 | PYPI_URL_TEMPLATE = f"https://pypi.org/pypi/{PACKAGE_NAME}/json" 18 | UPDATE_CHECK_FREQUENCY_SECONDS = 60 * 60 * 24 # Check once per day 19 | STATE_FILE = CONFIG_DIR / "update_check.json" 20 | 21 | 22 | class UpdateCheckState(TypedDict, total=False): 23 | """Structure for storing the last update check timestamp.""" 24 | 25 | last_check_ts: float # Store timestamp as float (seconds since epoch) 26 | 27 | 28 | class UpdateInfo(TypedDict): 29 | """Structure for returning update information.""" 30 | 31 | current_version: str 32 | latest_version: str 33 | 34 | 35 | # --- State Management --- 36 | 37 | 38 | def _read_state() -> UpdateCheckState | None: 39 | """Reads the last check state from the JSON file.""" 40 | if not STATE_FILE.exists(): 41 | return None 42 | try: 43 | with open(STATE_FILE, encoding="utf-8") as f: 44 | data = json.load(f) 45 | if isinstance(data, dict) and "last_check_ts" in data: 46 | return cast(UpdateCheckState, data) # Use cast after validation 47 | else: 48 | print(f"Warning: Invalid format in {STATE_FILE}. Ignoring.", file=sys.stderr) 49 | return None 50 | except (OSError, json.JSONDecodeError) as e: 51 | print(f"Warning: Could not read update check state from {STATE_FILE}: {e}", file=sys.stderr) 52 | return None 53 | except Exception as e: 54 | print(f"Warning: Unexpected error reading update state {STATE_FILE}: {e}", file=sys.stderr) 55 | return None 56 | 57 | 58 | def _write_state(state: UpdateCheckState): 59 | """Writes the current check state to the JSON file.""" 60 | try: 61 | STATE_FILE.parent.mkdir(parents=True, exist_ok=True) 62 | with open(STATE_FILE, "w", encoding="utf-8") as f: 63 | json.dump(state, f, indent=2) 64 | except OSError as e: 65 | print(f"Error: Could not write update check state to {STATE_FILE}: {e}", file=sys.stderr) 66 | except Exception as e: 67 | print(f"Error: Unexpected error writing update state {STATE_FILE}: {e}", file=sys.stderr) 68 | 69 | 70 | # --- Version Information --- 71 | 72 | 73 | async def _get_current_version() -> str | None: 74 | """Gets the currently installed version of the package.""" 75 | try: 76 | return metadata.version(PACKAGE_NAME) 77 | except metadata.PackageNotFoundError: 78 | print(f"Warning: Package '{PACKAGE_NAME}' not found. Cannot determine current version.", file=sys.stderr) 79 | return None 80 | except Exception as e: 81 | print(f"Warning: Error getting current version for '{PACKAGE_NAME}': {e}", file=sys.stderr) 82 | return None 83 | 84 | 85 | async def _fetch_latest_version() -> str | None: 86 | """Fetches the latest version string from PyPI.""" 87 | try: 88 | async with httpx.AsyncClient(timeout=10.0) as client: # Add a timeout 89 | response = await client.get(PYPI_URL_TEMPLATE) 90 | response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx) 91 | data = response.json() 92 | return data.get("info", {}).get("version") 93 | except httpx.RequestError as e: 94 | # Network-related errors 95 | print(f"Warning: Network error checking for updates: {e}", file=sys.stderr) 96 | return None 97 | except httpx.HTTPStatusError as e: 98 | # Errors for 4xx/5xx responses 99 | print( 100 | f"Warning: HTTP error checking for updates: {e.response.status_code} - {e.response.text[:100]}...", 101 | file=sys.stderr, 102 | ) 103 | return None 104 | except json.JSONDecodeError: 105 | print(f"Warning: Could not decode JSON response from PyPI for {PACKAGE_NAME}.", file=sys.stderr) 106 | return None 107 | except Exception as e: 108 | print(f"Warning: Unexpected error fetching latest version: {e}", file=sys.stderr) 109 | return None 110 | 111 | 112 | # --- Main Check Function --- 113 | 114 | 115 | async def check_for_updates() -> UpdateInfo | None: 116 | """ 117 | Checks PyPI for a newer version of the package if enough time has passed. 118 | 119 | Returns: 120 | An UpdateInfo dictionary if a newer version is found, otherwise None. 121 | """ 122 | now_ts = datetime.now(timezone.utc).timestamp() 123 | state = _read_state() 124 | last_check_ts = state.get("last_check_ts", 0.0) if state else 0.0 125 | 126 | # Check if enough time has passed since the last check 127 | if (now_ts - last_check_ts) < UPDATE_CHECK_FREQUENCY_SECONDS: 128 | # print("Debug: Update check skipped, frequency not met.", file=sys.stderr) 129 | return None 130 | 131 | print("Checking for codexy updates...", file=sys.stderr) # Indicate check is running 132 | 133 | # Get current and latest versions concurrently 134 | current_version_str, latest_version_str = await asyncio.gather( 135 | _get_current_version(), # Run sync metadata call in thread 136 | _fetch_latest_version(), 137 | ) 138 | 139 | # Update state regardless of whether the check succeeded, to avoid constant checks on failure 140 | _write_state({"last_check_ts": now_ts}) 141 | 142 | if not current_version_str or not latest_version_str: 143 | print("Debug: Could not determine current or latest version.", file=sys.stderr) 144 | return None # Cannot compare if either version is missing 145 | 146 | try: 147 | current_version = parse_version(current_version_str) 148 | latest_version = parse_version(latest_version_str) 149 | 150 | if latest_version > current_version: 151 | print(f"Update found: {current_version_str} -> {latest_version_str}", file=sys.stderr) 152 | return { 153 | "current_version": current_version_str, 154 | "latest_version": latest_version_str, 155 | } 156 | else: 157 | # print(f"Debug: Already on the latest version ({current_version_str}).", file=sys.stderr) 158 | return None 159 | except Exception as e: # Catch errors during version parsing/comparison 160 | print( 161 | f"Warning: Error comparing versions ('{current_version_str}', '{latest_version_str}'): {e}", 162 | file=sys.stderr, 163 | ) 164 | return None 165 | -------------------------------------------------------------------------------- /codexy/utils/model_info.py: -------------------------------------------------------------------------------- 1 | """Stores information about supported models, like context length.""" 2 | 3 | import sys 4 | from typing import TypedDict 5 | 6 | # Default from oai/models.go (but this might change) 7 | # Using a common default for unknown models. 8 | DEFAULT_MAX_TOKENS = 4096 9 | 10 | 11 | # Define the structure for model information 12 | class ModelInfo(TypedDict): 13 | label: str 14 | max_context_length: int # Using tokens as the unit 15 | 16 | 17 | # Dictionary mapping model IDs to their information 18 | # Based on codex-cli/src/utils/model-info.ts, but simplified for common models 19 | # We estimate context length in tokens. 20 | MODEL_INFO_REGISTRY: dict[str, ModelInfo] = { 21 | "o1-pro-2025-03-19": {"label": "o1 Pro (2025-03-19)", "max_context_length": 200000}, 22 | "o3": {"label": "o3", "max_context_length": 200000}, 23 | "o3-2025-04-16": {"label": "o3 (2025-04-16)", "max_context_length": 200000}, 24 | "o4-mini": {"label": "o4 Mini", "max_context_length": 200000}, 25 | "gpt-4.1-nano": {"label": "GPT-4.1 Nano", "max_context_length": 1000000}, 26 | "gpt-4.1-nano-2025-04-14": {"label": "GPT-4.1 Nano (2025-04-14)", "max_context_length": 1000000}, 27 | "o4-mini-2025-04-16": {"label": "o4 Mini (2025-04-16)", "max_context_length": 200000}, 28 | "gpt-4": {"label": "GPT-4", "max_context_length": 8192}, 29 | "o1-preview-2024-09-12": {"label": "o1 Preview (2024-09-12)", "max_context_length": 128000}, 30 | "gpt-4.1-mini": {"label": "GPT-4.1 Mini", "max_context_length": 1000000}, 31 | "gpt-3.5-turbo-instruct-0914": {"label": "GPT-3.5 Turbo Instruct (0914)", "max_context_length": 4096}, 32 | "gpt-4o-mini-search-preview": {"label": "GPT-4o Mini Search Preview", "max_context_length": 128000}, 33 | "gpt-4.1-mini-2025-04-14": {"label": "GPT-4.1 Mini (2025-04-14)", "max_context_length": 1000000}, 34 | "chatgpt-4o-latest": {"label": "ChatGPT-4o Latest", "max_context_length": 128000}, 35 | "gpt-3.5-turbo-1106": {"label": "GPT-3.5 Turbo (1106)", "max_context_length": 16385}, 36 | "gpt-4o-search-preview": {"label": "GPT-4o Search Preview", "max_context_length": 128000}, 37 | "gpt-4-turbo": {"label": "GPT-4 Turbo", "max_context_length": 128000}, 38 | "gpt-4o-realtime-preview-2024-12-17": { 39 | "label": "GPT-4o Realtime Preview (2024-12-17)", 40 | "max_context_length": 128000, 41 | }, 42 | "gpt-3.5-turbo-instruct": {"label": "GPT-3.5 Turbo Instruct", "max_context_length": 4096}, 43 | "gpt-3.5-turbo": {"label": "GPT-3.5 Turbo", "max_context_length": 16385}, 44 | "gpt-4-turbo-preview": {"label": "GPT-4 Turbo Preview", "max_context_length": 128000}, 45 | "gpt-4o-mini-search-preview-2025-03-11": { 46 | "label": "GPT-4o Mini Search Preview (2025-03-11)", 47 | "max_context_length": 128000, 48 | }, 49 | "gpt-4-0125-preview": {"label": "GPT-4 (0125) Preview", "max_context_length": 128000}, 50 | "gpt-4o-2024-11-20": {"label": "GPT-4o (2024-11-20)", "max_context_length": 128000}, 51 | "o3-mini": {"label": "o3 Mini", "max_context_length": 200000}, 52 | "gpt-4o-2024-05-13": {"label": "GPT-4o (2024-05-13)", "max_context_length": 128000}, 53 | "gpt-4-turbo-2024-04-09": {"label": "GPT-4 Turbo (2024-04-09)", "max_context_length": 128000}, 54 | "gpt-3.5-turbo-16k": {"label": "GPT-3.5 Turbo 16k", "max_context_length": 16385}, 55 | "o3-mini-2025-01-31": {"label": "o3 Mini (2025-01-31)", "max_context_length": 200000}, 56 | "o1-preview": {"label": "o1 Preview", "max_context_length": 128000}, 57 | "o1-2024-12-17": {"label": "o1 (2024-12-17)", "max_context_length": 128000}, 58 | "gpt-4-0613": {"label": "GPT-4 (0613)", "max_context_length": 8192}, 59 | "o1": {"label": "o1", "max_context_length": 128000}, 60 | "o1-pro": {"label": "o1 Pro", "max_context_length": 200000}, 61 | "gpt-4.5-preview": {"label": "GPT-4.5 Preview", "max_context_length": 128000}, 62 | "gpt-4.5-preview-2025-02-27": {"label": "GPT-4.5 Preview (2025-02-27)", "max_context_length": 128000}, 63 | "gpt-4o-search-preview-2025-03-11": {"label": "GPT-4o Search Preview (2025-03-11)", "max_context_length": 128000}, 64 | "gpt-4o": {"label": "GPT-4o", "max_context_length": 128000}, 65 | "gpt-4o-mini": {"label": "GPT-4o Mini", "max_context_length": 128000}, 66 | "gpt-4o-2024-08-06": {"label": "GPT-4o (2024-08-06)", "max_context_length": 128000}, 67 | "gpt-4.1": {"label": "GPT-4.1", "max_context_length": 1000000}, 68 | "gpt-4.1-2025-04-14": {"label": "GPT-4.1 (2025-04-14)", "max_context_length": 1000000}, 69 | "gpt-4o-mini-2024-07-18": {"label": "GPT-4o Mini (2024-07-18)", "max_context_length": 128000}, 70 | "o1-mini": {"label": "o1 Mini", "max_context_length": 128000}, 71 | "gpt-3.5-turbo-0125": {"label": "GPT-3.5 Turbo (0125)", "max_context_length": 16385}, 72 | "o1-mini-2024-09-12": {"label": "o1 Mini (2024-09-12)", "max_context_length": 128000}, 73 | "gpt-4-1106-preview": {"label": "GPT-4 (1106) Preview", "max_context_length": 128000}, 74 | "deepseek-chat": {"label": "DeepSeek Chat", "max_context_length": 64000}, 75 | "deepseek-reasoner": {"label": "DeepSeek Reasoner", "max_context_length": 64000}, 76 | } 77 | 78 | # Legacy mapping for backward compatibility 79 | MODEL_MAX_TOKENS = { 80 | "gpt-4-turbo": 128000, 81 | "gpt-4-32k": 32768, 82 | "gpt-4.1-32k": 32768, 83 | "gpt-4": 8192, 84 | "gpt-4.1": 1000000, 85 | "gpt-3.5-turbo-16k": 16385, 86 | "gpt-3.5-turbo": 16385, 87 | "o4-mini": 200000, 88 | "gpt-4o": 128000, 89 | "gpt-4o-mini": 128000, 90 | } 91 | 92 | 93 | def get_max_tokens_for_model(model_name: str) -> int: 94 | """ 95 | Returns the maximum context tokens for a given model name using the new registry. 96 | """ 97 | if model_name in MODEL_INFO_REGISTRY: 98 | return MODEL_INFO_REGISTRY[model_name]["max_context_length"] 99 | 100 | # Fallback to legacy mapping 101 | if model_name in MODEL_MAX_TOKENS: 102 | return MODEL_MAX_TOKENS[model_name] 103 | 104 | # Check for well-known prefixes (order matters - more specific first) 105 | if "gpt-4-turbo" in model_name: 106 | return 128000 107 | if "gpt-4-32k" in model_name: 108 | return 32768 109 | if "gpt-3.5-turbo-16k" in model_name: 110 | return 16385 111 | if "gpt-3.5-turbo-instruct" in model_name: 112 | return 4096 # gpt-3.5-turbo-instruct has different token limit than regular gpt-3.5-turbo 113 | if "gpt-3.5-turbo" in model_name: 114 | return 16385 115 | if "gpt-4" in model_name: 116 | return 8192 117 | if "o4-mini" in model_name: 118 | return 200000 119 | if "gpt-4o" in model_name: 120 | return 128000 121 | 122 | print(f"Warning: Unknown model name '{model_name}'. Using default max tokens: {DEFAULT_MAX_TOKENS}", file=sys.stderr) 123 | return DEFAULT_MAX_TOKENS 124 | 125 | 126 | def get_model_max_tokens(model_name: str) -> int: 127 | """ 128 | Alias for get_max_tokens_for_model for backward compatibility. 129 | """ 130 | return get_max_tokens_for_model(model_name) 131 | -------------------------------------------------------------------------------- /codexy/utils/security_check.py: -------------------------------------------------------------------------------- 1 | """ 2 | Security Check Module - Responsible for Checking File and Directory Security 3 | """ 4 | 5 | import re 6 | from dataclasses import dataclass 7 | from pathlib import Path 8 | 9 | from detect_secrets.core.secrets_collection import SecretsCollection 10 | from detect_secrets.settings import default_settings 11 | 12 | 13 | @dataclass 14 | class SuspiciousFileResult: 15 | """Suspicious File Result Class 16 | 17 | Attributes: 18 | file_path: File path as string 19 | messages: List of suspicious reasons 20 | """ 21 | 22 | file_path: str # Keep as string for serialization compatibility 23 | messages: list[str] 24 | 25 | 26 | class SecurityChecker: 27 | """Security Checker Class""" 28 | 29 | # Suspicious file name patterns 30 | SUSPICIOUS_FILE_PATTERNS = [ 31 | r"\.env($|\..*$)", # Environment variable files 32 | r".*_rsa$", # RSA keys 33 | r".*\.pem$", # PEM certificates 34 | r".*\.key$", # Key files 35 | r".*\.pfx$", # PFX certificates 36 | r".*\.p12$", # P12 certificates 37 | r".*\.pkcs12$", # PKCS12 certificates 38 | r".*\.keystore$", # Keystore 39 | r".*\.jks$", # Java keystore 40 | r".*\.kdbx$", # KeePass database 41 | r".*\.psafe3$", # Password Safe database 42 | ] 43 | 44 | # Suspicious file content patterns 45 | SUSPICIOUS_CONTENT_PATTERNS = [ 46 | # API keys 47 | r"api[_-]?key.*['\"][0-9a-zA-Z]{32,}['\"]", 48 | r"api[_-]?secret.*['\"][0-9a-zA-Z]{32,}['\"]", 49 | # Access tokens 50 | r"access[_-]?token.*['\"][0-9a-zA-Z]{32,}['\"]", 51 | r"auth[_-]?token.*['\"][0-9a-zA-Z]{32,}['\"]", 52 | # AWS related 53 | r"AKIA[0-9A-Z]{16}", # AWS access key ID 54 | r"aws[_-]?secret.*['\"][0-9a-zA-Z/+=]{32,}['\"]", 55 | # Database connection strings 56 | r"jdbc:.*:@.*:\d+:.*", # JDBC connection string 57 | r"mongodb(\+srv)?://[^/\s]+:[^/\s]+@[^/\s]+", # MongoDB connection URI 58 | r"postgres://[^/\s]+:[^/\s]+@[^/\s]+", # PostgreSQL connection URI 59 | # Private keys 60 | r"-----BEGIN (?:RSA )?PRIVATE KEY-----", 61 | # Passwords 62 | r"password.*['\"][^'\"\s]{8,}['\"]", 63 | r"passwd.*['\"][^'\"\s]{8,}['\"]", 64 | r"pwd.*['\"][^'\"\s]{8,}['\"]", 65 | ] 66 | 67 | def __init__(self): 68 | """Initialize security checker""" 69 | self.suspicious_file_patterns = [re.compile(pattern, re.IGNORECASE) for pattern in self.SUSPICIOUS_FILE_PATTERNS] 70 | self.suspicious_content_patterns = [re.compile(pattern, re.IGNORECASE) for pattern in self.SUSPICIOUS_CONTENT_PATTERNS] 71 | self.checked_paths: set[str] = set() 72 | 73 | def check_line(self, line: str) -> list[str]: 74 | """Check security of a single line 75 | 76 | Args: 77 | line: Line content 78 | """ 79 | messages: list[str] = [] 80 | for pattern in self.suspicious_content_patterns: 81 | matches = pattern.finditer(line) 82 | for match in matches: 83 | matched_text = match.group() 84 | messages.append(f"Suspicious content pattern: {matched_text}") 85 | 86 | return messages 87 | 88 | def check_file(self, file_path: Path, content: str) -> list[str]: 89 | """Check security of a single file 90 | 91 | Args: 92 | file_path: Path object representing the file path 93 | content: File content 94 | 95 | Returns: 96 | List of suspicious reasons 97 | """ 98 | str_path = str(file_path) 99 | if str_path in self.checked_paths: 100 | return [] 101 | 102 | self.checked_paths.add(str_path) 103 | messages: list[str] = [] 104 | 105 | # Check file name 106 | for pattern in self.suspicious_file_patterns: 107 | if pattern.match(file_path.name): 108 | messages.append(f"Suspicious file name pattern: {file_path.name}") 109 | break 110 | 111 | # Check file content 112 | for pattern in self.suspicious_content_patterns: 113 | matches = pattern.finditer(content) 114 | for match in matches: 115 | matched_text = match.group() 116 | # Truncate matched text to avoid displaying sensitive information 117 | truncated_text = matched_text[:20] + "..." if len(matched_text) > 20 else matched_text 118 | messages.append(f"Suspicious content pattern: {truncated_text}") 119 | 120 | return messages 121 | 122 | def check_file_size(self, file_path: Path, max_size_mb: float = 10.0) -> list[str]: 123 | """Check file size 124 | 125 | Args: 126 | file_path: Path object representing the file path 127 | max_size_mb: Maximum allowed size in megabytes 128 | 129 | Returns: 130 | List of warning messages 131 | """ 132 | try: 133 | size_mb = file_path.stat().st_size / (1024 * 1024) 134 | if size_mb > max_size_mb: 135 | return [f"File size exceeds {max_size_mb}MB (current size: {size_mb:.2f}MB)"] 136 | except Exception as e: 137 | print(f"Error checking file size: {e}") 138 | 139 | return [] 140 | 141 | def check_files_with_secretlint(self, file_path: Path) -> list[str]: 142 | secrets = SecretsCollection() 143 | with default_settings(): 144 | secrets.scan_file(filename=str(file_path.absolute())) 145 | 146 | results = [] 147 | for secret in secrets: 148 | results.append(f"Secret detected: {secret[1].type}") 149 | return results 150 | 151 | 152 | def check_files(root_dir: str | Path, file_paths: list[str], file_contents: dict[str, str]) -> list[SuspiciousFileResult]: 153 | """Check security of multiple files 154 | 155 | Args: 156 | root_dir: Root directory path 157 | file_paths: List of file paths to check 158 | file_contents: Dictionary mapping file paths to their contents 159 | 160 | Returns: 161 | List of suspicious file results 162 | """ 163 | checker = SecurityChecker() 164 | results: list[SuspiciousFileResult] = [] 165 | root_path = Path(root_dir) 166 | 167 | for file_path in file_paths: 168 | # Convert to Path object for path operations 169 | full_path = root_path / file_path 170 | content = file_contents.get(file_path, "") 171 | 172 | messages = [] 173 | if full_path.exists(): 174 | messages.extend(checker.check_file(full_path, content)) 175 | messages.extend(checker.check_file_size(full_path)) 176 | messages.extend(checker.check_files_with_secretlint(full_path)) 177 | if messages: 178 | # Keep using relative path string in results 179 | results.append(SuspiciousFileResult(file_path=file_path, messages=messages)) 180 | 181 | return results 182 | -------------------------------------------------------------------------------- /codexy/utils/filesystem.py: -------------------------------------------------------------------------------- 1 | """Filesystem and path related utility functions for codexy.""" 2 | 3 | import os 4 | import subprocess 5 | from pathlib import Path 6 | 7 | 8 | def check_in_git(workdir: str | Path) -> bool: 9 | """ 10 | Checks if the given directory is part of a Git repository. 11 | 12 | Uses `git rev-parse --is-inside-work-tree` command which exits with 0 13 | if inside a work tree, and non-zero otherwise. 14 | 15 | Args: 16 | workdir: The directory path (string or Path object) to check. 17 | 18 | Returns: 19 | True if the directory is inside a Git work tree, False otherwise 20 | (including if git command fails or git is not found). 21 | """ 22 | workdir_path = Path(workdir).resolve() # Ensure absolute path 23 | cmd = ["git", "rev-parse", "--is-inside-work-tree"] 24 | 25 | try: 26 | # Run the git command in the specified directory 27 | # Suppress stdout and stderr as we only care about the return code 28 | # check=False prevents raising CalledProcessError on non-zero exit 29 | result = subprocess.run( 30 | cmd, 31 | cwd=str(workdir_path), 32 | stdout=subprocess.DEVNULL, 33 | stderr=subprocess.DEVNULL, 34 | check=False, # Do not raise an exception on non-zero exit 35 | creationflags=subprocess.CREATE_NO_WINDOW if os.name == "nt" else 0, # Hide console window on Windows 36 | ) 37 | # Return True if the command executed successfully (exit code 0) 38 | return result.returncode == 0 39 | except FileNotFoundError: 40 | # Handle case where 'git' command is not found 41 | # print("Warning: 'git' command not found. Cannot check repository status.") 42 | return False 43 | except Exception: 44 | # Catch any other potential errors during subprocess execution 45 | # print(f"Warning: Error checking git status in {workdir_path}: {e}") 46 | return False 47 | 48 | 49 | def shorten_path(p: str | Path, max_length: int = 40) -> str: 50 | """ 51 | Shortens a path string for display, similar to codex-cli's behavior. 52 | 53 | 1. Replaces the home directory prefix with '~'. 54 | 2. If the path is still longer than max_length, it removes components 55 | from the middle, replacing them with '...', keeping the beginning 56 | (root or ~) and the end (filename and potentially some parent dirs). 57 | 58 | Args: 59 | p: The path (string or Path object) to shorten. 60 | max_length: The maximum desired length for the output string. 61 | 62 | Returns: 63 | The shortened path string. 64 | """ 65 | try: 66 | abs_path = Path(p).resolve() 67 | home = Path.home() 68 | except Exception: 69 | # Fallback if path resolution fails 70 | return str(p)[:max_length] + ("..." if len(str(p)) > max_length else "") 71 | 72 | try: 73 | # Check if path is under home directory 74 | if abs_path == home or abs_path.is_relative_to(home): 75 | if abs_path == home: 76 | display_path = "~" 77 | else: 78 | # Use '/' for display consistency across platforms within '~' notation 79 | display_path = "~/" + str(abs_path.relative_to(home)).replace(os.sep, "/") 80 | else: 81 | display_path = str(abs_path) 82 | except ValueError: 83 | # is_relative_to throws ValueError if paths are on different drives (Windows) 84 | display_path = str(abs_path) 85 | except Exception: 86 | # Fallback for other potential errors 87 | display_path = str(abs_path) 88 | 89 | if len(display_path) <= max_length: 90 | return display_path 91 | 92 | # Path is too long, apply shortening logic using '/' as separator for consistency 93 | display_path_unix = display_path.replace(os.sep, "/") 94 | parts = display_path_unix.split("/") 95 | 96 | # Filter out empty parts that might result from leading/trailing slashes or '//' 97 | parts = [part for part in parts if part] 98 | 99 | # Determine the prefix (e.g., '~/', '/') 100 | prefix = "" 101 | path_parts_for_suffix = parts # Assume we use all parts for suffix initially 102 | 103 | if display_path.startswith("~"): 104 | prefix = "~/" 105 | # parts already excludes '~', path_parts_for_suffix remains parts 106 | elif abs_path.is_absolute(): 107 | prefix = "/" # Simple root prefix for display 108 | # path_parts_for_suffix remains parts 109 | 110 | # Need to handle Windows drive letters specifically if not under home 111 | elif os.name == "nt" and len(str(abs_path)) > 2 and str(abs_path)[1] == ":": 112 | drive = str(abs_path)[:2] 113 | prefix = drive + "/" # Display as C:/ 114 | # Adjust parts if they include the drive 115 | if parts and parts[0] == drive: 116 | path_parts_for_suffix = parts[1:] 117 | 118 | # Iterate backwards, adding components until max_length is approached 119 | best_fit = "" 120 | # Keep at least the filename (last part) 121 | min_parts_to_keep = 1 if path_parts_for_suffix else 0 122 | 123 | # Iterate keeping at least `min_parts_to_keep` up to all parts 124 | for i in range(min_parts_to_keep, len(path_parts_for_suffix) + 1): 125 | # Take the last 'i' parts for the suffix 126 | suffix_parts = path_parts_for_suffix[len(path_parts_for_suffix) - i :] 127 | suffix = "/".join(suffix_parts) # Use '/' for joining 128 | 129 | # Construct candidate string 130 | candidate = prefix 131 | # Add ellipsis only if parts were actually omitted 132 | # Check if the number of suffix parts is less than total available parts 133 | if i < len(path_parts_for_suffix): 134 | candidate += ".../" 135 | candidate += suffix 136 | 137 | if len(candidate) <= max_length: 138 | best_fit = candidate # Found a candidate that fits 139 | # Continue loop to find the longest possible fit that still fits 140 | else: 141 | # If adding this part made it too long, the *previous* best_fit was optimal 142 | # If this was the *first* part tried (i == min_parts_to_keep) and it's already too long, 143 | # best_fit will still be empty. 144 | break 145 | 146 | # If no candidate ever fit (e.g., prefix + ... + filename was too long) 147 | if not best_fit: 148 | # Fallback: ellipsis + truncated filename 149 | filename = path_parts_for_suffix[-1] if path_parts_for_suffix else "" 150 | ellipsis_prefix = prefix + ".../" if prefix else ".../" 151 | available_chars = max_length - len(ellipsis_prefix) 152 | if available_chars < 1: 153 | return ellipsis_prefix[:max_length] # Cannot even fit ellipsis+part 154 | return ellipsis_prefix + filename[-available_chars:] 155 | else: 156 | return best_fit 157 | 158 | 159 | def short_cwd(max_length: int = 40) -> str: 160 | """Returns a shortened version of the current working directory.""" 161 | return shorten_path(Path.cwd(), max_length) 162 | -------------------------------------------------------------------------------- /codexy/tools/execute_command_tool.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shlex 3 | import subprocess 4 | from pathlib import Path 5 | 6 | from openai.types.chat import ChatCompletionToolParam 7 | 8 | PROJECT_ROOT = Path.cwd() 9 | DEFAULT_MAX_OUTPUT_LINES = 20 10 | 11 | 12 | def execute_command_tool( 13 | command: str, 14 | cwd: str | None = None, 15 | is_sandboxed: bool = False, 16 | allowed_write_paths: list[Path] | None = None, 17 | full_stdout: bool = False, 18 | ) -> str: 19 | """ 20 | Executes a shell command and returns its output (stdout and stderr). 21 | If is_sandboxed is True, attempts to run the command with shell=False within 22 | one of the allowed_write_paths. 23 | """ 24 | if not command: 25 | return "Error: Empty command received." 26 | 27 | effective_cwd_path = Path(cwd) if cwd else PROJECT_ROOT 28 | try: 29 | # Resolve CWD to an absolute path to prevent relative path issues 30 | effective_cwd = effective_cwd_path.resolve(strict=True) 31 | except FileNotFoundError: 32 | return f"Error: Working directory '{effective_cwd_path}' not found." 33 | except Exception as e: 34 | return f"Error resolving working directory '{effective_cwd_path}': {e}" 35 | 36 | if not effective_cwd.is_dir(): 37 | return f"Error: Working directory '{effective_cwd}' is not a directory." 38 | 39 | # --- Sandboxing Logic --- 40 | if is_sandboxed: 41 | print(f"Attempting sandboxed execution for: '{command}'") 42 | if not allowed_write_paths: 43 | print(f"[Sandbox] No allowed_write_paths provided, using project root: {PROJECT_ROOT}") 44 | allowed_write_paths = [PROJECT_ROOT] 45 | 46 | # Ensure allowed paths are resolved absolute paths 47 | resolved_allowed_paths = [] 48 | for p in allowed_write_paths: 49 | try: 50 | resolved_allowed_paths.append(Path(p).resolve(strict=True)) 51 | except Exception as e: 52 | return f"Error resolving allowed writable path '{p}': {e}" 53 | 54 | # Check if the effective CWD is within one of the allowed paths 55 | is_cwd_allowed = False 56 | for allowed_path in resolved_allowed_paths: 57 | if effective_cwd == allowed_path or str(effective_cwd).startswith(str(allowed_path) + os.sep): 58 | is_cwd_allowed = True 59 | break 60 | if not is_cwd_allowed: 61 | allowed_paths_str = ", ".join([str(p) for p in resolved_allowed_paths]) 62 | return f"Error: Sandboxed command CWD '{effective_cwd}' is not within allowed paths: [{allowed_paths_str}]" 63 | 64 | # Use shell=False for safety - requires splitting the command string 65 | try: 66 | cmd_list = shlex.split(command) 67 | if not cmd_list: # Handle empty command after split 68 | return "Error: Empty command after parsing for sandbox execution." 69 | print(f"Executing sandboxed (shell=False): {cmd_list} in '{effective_cwd}'") 70 | result = subprocess.run( 71 | cmd_list, # Pass list of args 72 | shell=False, # <<< IMPORTANT: No shell for sandboxed commands 73 | cwd=effective_cwd, 74 | capture_output=True, 75 | text=True, 76 | timeout=60, 77 | check=False, 78 | ) 79 | except Exception as e: 80 | # Catch errors during shlex.split or subprocess.run with shell=False 81 | return f"Error executing sandboxed command '{command}': {e}" 82 | 83 | # --- Default (Non-Sandboxed) Execution --- 84 | else: 85 | # Keep shell=True for now for non-sandboxed, but acknowledge the risk. 86 | # Consider switching to shell=False + shlex.split here too eventually. 87 | print(f"Executing command (shell=True): '{command}' in '{effective_cwd}'") 88 | try: 89 | result = subprocess.run( 90 | command, 91 | shell=True, # <<< Risk acknowledged 92 | cwd=effective_cwd, 93 | capture_output=True, 94 | text=True, 95 | timeout=60, 96 | check=False, 97 | ) 98 | except Exception as e: 99 | return f"Error executing command '{command}': {e}" 100 | 101 | # --- Process result (common for both sandboxed and non-sandboxed) --- 102 | try: 103 | output = f"Exit Code: {result.returncode}\n" 104 | stdout = result.stdout.strip() if result.stdout else "" 105 | stderr = result.stderr.strip() if result.stderr else "" 106 | 107 | if not full_stdout: 108 | stdout_lines = stdout.splitlines() 109 | stderr_lines = stderr.splitlines() 110 | if len(stdout_lines) > DEFAULT_MAX_OUTPUT_LINES: 111 | stdout = ( 112 | "\n".join(stdout_lines[:DEFAULT_MAX_OUTPUT_LINES]) 113 | + f"\n... ({len(stdout_lines) - DEFAULT_MAX_OUTPUT_LINES} more lines truncated)" 114 | ) 115 | if len(stderr_lines) > DEFAULT_MAX_OUTPUT_LINES: 116 | stderr = ( 117 | "\n".join(stderr_lines[:DEFAULT_MAX_OUTPUT_LINES]) 118 | + f"\n... ({len(stderr_lines) - DEFAULT_MAX_OUTPUT_LINES} more lines truncated)" 119 | ) 120 | 121 | if stdout: 122 | output += f"--- stdout ---\n{stdout}\n" 123 | if stderr: 124 | output += f"--- stderr ---\n{stderr}\n" 125 | 126 | return output.strip() 127 | 128 | except subprocess.TimeoutExpired: 129 | return f"Error: Command '{command}' timed out after 60 seconds." 130 | except FileNotFoundError: 131 | return f"Error: Command not found or shell execution failed for '{command}'." 132 | except Exception as e: 133 | return f"Error processing result for command '{command}': {e}" 134 | 135 | 136 | EXECUTE_COMMAND_TOOL_DEF: ChatCompletionToolParam = { 137 | "type": "function", 138 | "function": { 139 | "name": "execute_command", 140 | "description": "Execute a CLI command on the user's system. Use this for system operations, running scripts, file manipulations (like mkdir, rm, mv), etc. Always prefer using dedicated file operation tools if available.", 141 | "parameters": { 142 | "type": "object", 143 | "properties": { 144 | "command": { 145 | "type": "string", 146 | "description": "The CLI command to execute (e.g., 'ls -la', 'python script.py', 'mkdir new_dir').", 147 | }, 148 | "cwd": { 149 | "type": "string", 150 | "description": "Optional working directory to execute the command in. Defaults to the project root.", 151 | }, 152 | # <<< It might be better *not* to expose sandbox/write paths/full_stdout to the LLM directly. 153 | # The agent should decide these based on context and policy. 154 | # Keeping them internal to the Python implementation. 155 | }, 156 | "required": ["command"], 157 | }, 158 | }, 159 | } 160 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import unittest 3 | from pathlib import Path 4 | from tempfile import TemporaryDirectory 5 | from typing import Any 6 | 7 | import yaml 8 | 9 | from codexy.config import ( 10 | DEFAULT_MEMORY_COMPRESSION_THRESHOLD_FACTOR, 11 | DEFAULT_MEMORY_ENABLE_COMPRESSION, 12 | DEFAULT_MEMORY_ENABLED, 13 | DEFAULT_MEMORY_KEEP_RECENT_MESSAGES, 14 | EMPTY_STORED_CONFIG, 15 | load_config, 16 | ) 17 | 18 | 19 | class TestConfigLoadingMemory(unittest.TestCase): 20 | def setUp(self): 21 | self.temp_dir = TemporaryDirectory() 22 | self.temp_path = Path(self.temp_dir.name) 23 | # Create a dummy instructions file to prevent warnings/errors 24 | (self.temp_path / "instructions.md").write_text("Test instructions") 25 | 26 | def tearDown(self): 27 | self.temp_dir.cleanup() 28 | 29 | def _write_config(self, data: dict[str, Any], format: str = "json") -> Path: 30 | if format == "json": 31 | config_file = self.temp_path / "config.json" 32 | with open(config_file, "w") as f: 33 | json.dump(data, f, indent=2) 34 | return config_file 35 | elif format == "yaml": 36 | config_file = self.temp_path / "config.yaml" 37 | with open(config_file, "w") as f: 38 | yaml.dump(data, f) 39 | return config_file 40 | raise ValueError("Unsupported format") 41 | 42 | def test_load_config_no_memory_section(self): 43 | """Test loading config when memory section is entirely missing.""" 44 | config_data: dict[str, Any] = { 45 | "model": "test-model", 46 | # No memory section 47 | } 48 | config_file = self._write_config(config_data) 49 | app_config = load_config(config_path=config_file, instructions_path=(self.temp_path / "instructions.md")) 50 | 51 | # Memory should be None if not enabled and not present 52 | self.assertIsNone(app_config.get("memory")) 53 | 54 | def test_load_config_memory_disabled_explicitly(self): 55 | """Test loading config when memory.enabled is false.""" 56 | config_data: dict[str, Any] = { 57 | "model": "test-model", 58 | "memory": { 59 | "enabled": False, 60 | "enable_compression": True, # This should be ignored if memory is disabled 61 | }, 62 | } 63 | config_file = self._write_config(config_data) 64 | app_config = load_config(config_path=config_file, instructions_path=(self.temp_path / "instructions.md")) 65 | 66 | loaded_memory_config = app_config.get("memory") 67 | self.assertIsNotNone(loaded_memory_config) 68 | if loaded_memory_config: # for type checker 69 | self.assertFalse(loaded_memory_config.get("enabled")) 70 | # Other fields should still be populated with defaults even if memory is disabled 71 | self.assertEqual(loaded_memory_config.get("enable_compression"), DEFAULT_MEMORY_ENABLE_COMPRESSION) 72 | self.assertEqual( 73 | loaded_memory_config.get("compression_threshold_factor"), DEFAULT_MEMORY_COMPRESSION_THRESHOLD_FACTOR 74 | ) 75 | self.assertEqual(loaded_memory_config.get("keep_recent_messages"), DEFAULT_MEMORY_KEEP_RECENT_MESSAGES) 76 | 77 | def test_load_config_memory_enabled_no_compression_settings(self): 78 | """Test loading config when memory is enabled, but no specific compression settings.""" 79 | config_data: dict[str, Any] = {"model": "test-model", "memory": {"enabled": True}} 80 | config_file = self._write_config(config_data) 81 | app_config = load_config(config_path=config_file, instructions_path=(self.temp_path / "instructions.md")) 82 | 83 | loaded_memory_config = app_config.get("memory") 84 | self.assertIsNotNone(loaded_memory_config) 85 | if loaded_memory_config: # for type checker 86 | self.assertTrue(loaded_memory_config.get("enabled")) 87 | self.assertEqual(loaded_memory_config.get("enable_compression"), DEFAULT_MEMORY_ENABLE_COMPRESSION) 88 | self.assertEqual( 89 | loaded_memory_config.get("compression_threshold_factor"), DEFAULT_MEMORY_COMPRESSION_THRESHOLD_FACTOR 90 | ) 91 | self.assertEqual(loaded_memory_config.get("keep_recent_messages"), DEFAULT_MEMORY_KEEP_RECENT_MESSAGES) 92 | 93 | def test_load_config_partial_memory_compression_settings(self): 94 | """Test loading config with memory enabled and partial compression settings.""" 95 | config_data: dict[str, Any] = { 96 | "model": "test-model", 97 | "memory": { 98 | "enabled": True, 99 | "enable_compression": True, 100 | "keep_recent_messages": 10, 101 | # compression_threshold_factor is missing 102 | }, 103 | } 104 | config_file = self._write_config(config_data) 105 | app_config = load_config(config_path=config_file, instructions_path=(self.temp_path / "instructions.md")) 106 | 107 | loaded_memory_config = app_config.get("memory") 108 | self.assertIsNotNone(loaded_memory_config) 109 | if loaded_memory_config: # for type checker 110 | self.assertTrue(loaded_memory_config.get("enabled")) 111 | self.assertTrue(loaded_memory_config.get("enable_compression")) 112 | self.assertEqual( 113 | loaded_memory_config.get("compression_threshold_factor"), DEFAULT_MEMORY_COMPRESSION_THRESHOLD_FACTOR 114 | ) # Should be default 115 | self.assertEqual(loaded_memory_config.get("keep_recent_messages"), 10) # Should be custom 116 | 117 | def test_load_config_full_memory_compression_settings(self): 118 | """Test loading config with memory enabled and all compression settings specified.""" 119 | custom_threshold = 0.7 120 | custom_keep_recent = 3 121 | config_data: dict[str, Any] = { 122 | "model": "test-model", 123 | "memory": { 124 | "enabled": True, 125 | "enable_compression": True, 126 | "compression_threshold_factor": custom_threshold, 127 | "keep_recent_messages": custom_keep_recent, 128 | }, 129 | } 130 | config_file = self._write_config(config_data) 131 | app_config = load_config(config_path=config_file, instructions_path=(self.temp_path / "instructions.md")) 132 | 133 | loaded_memory_config = app_config.get("memory") 134 | self.assertIsNotNone(loaded_memory_config) 135 | if loaded_memory_config: # for type checker 136 | self.assertTrue(loaded_memory_config.get("enabled")) 137 | self.assertTrue(loaded_memory_config.get("enable_compression")) 138 | self.assertEqual(loaded_memory_config.get("compression_threshold_factor"), custom_threshold) 139 | self.assertEqual(loaded_memory_config.get("keep_recent_messages"), custom_keep_recent) 140 | 141 | def test_load_config_yaml_format(self): 142 | """Test loading config with memory settings from a YAML file.""" 143 | custom_threshold = 0.65 144 | custom_keep_recent = 7 145 | config_data: dict[str, Any] = { 146 | "model": "test-model-yaml", 147 | "memory": { 148 | "enabled": True, 149 | "enable_compression": False, 150 | "compression_threshold_factor": custom_threshold, 151 | "keep_recent_messages": custom_keep_recent, 152 | }, 153 | } 154 | config_file = self._write_config(config_data, format="yaml") 155 | app_config = load_config(config_path=config_file, instructions_path=(self.temp_path / "instructions.md")) 156 | 157 | loaded_memory_config = app_config.get("memory") 158 | self.assertIsNotNone(loaded_memory_config) 159 | if loaded_memory_config: # for type checker 160 | self.assertTrue(loaded_memory_config.get("enabled")) 161 | self.assertFalse(loaded_memory_config.get("enable_compression")) 162 | self.assertEqual(loaded_memory_config.get("compression_threshold_factor"), custom_threshold) 163 | self.assertEqual(loaded_memory_config.get("keep_recent_messages"), custom_keep_recent) 164 | 165 | def test_empty_stored_config_defaults(self): 166 | """Verify that EMPTY_STORED_CONFIG has the correct default memory settings.""" 167 | memory_defaults = EMPTY_STORED_CONFIG.get("memory") 168 | self.assertIsNotNone(memory_defaults) 169 | if memory_defaults: # for type checker 170 | self.assertEqual(memory_defaults.get("enabled"), DEFAULT_MEMORY_ENABLED) 171 | self.assertEqual(memory_defaults.get("enable_compression"), DEFAULT_MEMORY_ENABLE_COMPRESSION) 172 | self.assertEqual(memory_defaults.get("compression_threshold_factor"), DEFAULT_MEMORY_COMPRESSION_THRESHOLD_FACTOR) 173 | self.assertEqual(memory_defaults.get("keep_recent_messages"), DEFAULT_MEMORY_KEEP_RECENT_MESSAGES) 174 | 175 | 176 | if __name__ == "__main__": 177 | unittest.main() 178 | -------------------------------------------------------------------------------- /codexy/tui/widgets/overlays/approval_overlay.py: -------------------------------------------------------------------------------- 1 | from rich.text import Text 2 | from textual import events 3 | from textual.app import ComposeResult 4 | from textual.message import Message 5 | from textual.reactive import reactive 6 | from textual.widgets import Label, OptionList, Static 7 | from textual.widgets.option_list import Option, OptionDoesNotExist 8 | 9 | from ....approvals import ApprovalMode 10 | 11 | 12 | # --- Widget --- 13 | class ApprovalModeOverlay(Static): 14 | """A component for selecting an approval mode.""" 15 | 16 | # --- Messages --- 17 | class ApprovalModeSelected(Message): 18 | """Sent when a user selects a new approval mode.""" 19 | 20 | def __init__(self, mode: ApprovalMode): 21 | self.mode: ApprovalMode = mode 22 | super().__init__() 23 | 24 | class ExitApprovalOverlay(Message): 25 | """Sent when a user cancels selection, closing the overlay.""" 26 | 27 | pass 28 | 29 | DEFAULT_CSS = """ 30 | ApprovalModeOverlay { 31 | layer: approval_overlay_layer; 32 | display: none; 33 | align: center middle; 34 | width: 80%; 35 | max-width: 60; 36 | height: auto; 37 | max-height: 15; 38 | border: thick $accent; 39 | background: $panel; 40 | padding: 1; 41 | } 42 | ApprovalModeOverlay.-active { 43 | display: block; 44 | } 45 | ApprovalModeOverlay #approval-overlay-title { 46 | width: 100%; 47 | text-align: center; 48 | margin-bottom: 1; 49 | text-style: bold; 50 | } 51 | ApprovalModeOverlay #current-mode-label { 52 | width: 100%; 53 | text-align: center; 54 | color: $text-muted; 55 | margin-bottom: 1; 56 | } 57 | ApprovalModeOverlay OptionList { 58 | border: none; 59 | background: $panel-darken-1; 60 | max-height: 10; 61 | height: auto; 62 | min-height: 1; 63 | } 64 | ApprovalModeOverlay #approval-overlay-footer { 65 | margin-top: 1; 66 | width: 100%; 67 | text-align: center; 68 | color: $text-muted; 69 | } 70 | ApprovalModeOverlay OptionList Option { 71 | padding: 0 1; 72 | height: 1; 73 | } 74 | ApprovalModeOverlay OptionList Option :hover { 75 | background: $accent-darken-1; 76 | } 77 | ApprovalModeOverlay OptionList Option.--highlight { 78 | background: $accent !important; 79 | color: $text !important; 80 | } 81 | ApprovalModeOverlay OptionList Option.--highlight:focus { 82 | background: $accent-darken-1 !important; 83 | } 84 | """ 85 | 86 | # --- Reactives --- 87 | current_mode: reactive[ApprovalMode] = reactive(ApprovalMode.SUGGEST) 88 | _option_list_id = "approval-mode-option-list" 89 | 90 | def compose(self) -> ComposeResult: 91 | """Build the UI elements of the overlay.""" 92 | yield Label("Switch Approval Mode", id="approval-overlay-title") 93 | yield Label(f"Current: {self.current_mode.value}", id="current-mode-label") 94 | # Use OptionList to display options 95 | yield OptionList(id=self._option_list_id) 96 | yield Label("↑/↓ Select, Enter Confirm, Esc Cancel", id="approval-overlay-footer") 97 | 98 | def on_mount(self) -> None: 99 | """Mounted, fill the list and set focus.""" 100 | self.log.info("ApprovalModeOverlay mounted.") 101 | # Use call_later to schedule _populate_list and focus_list 102 | self.call_later(self._populate_list) 103 | self.call_later(self.focus_list) 104 | 105 | def focus_list(self) -> None: 106 | """Safely focus the OptionList.""" 107 | try: 108 | option_list = self.query_one(f"#{self._option_list_id}", OptionList) 109 | if option_list.is_mounted: 110 | self.log.info("Focusing approval mode OptionList.") 111 | option_list.focus() 112 | else: 113 | self.log.warning("Attempted to focus OptionList but it was not mounted.") 114 | except Exception as e: 115 | self.log.error(f"Error focusing approval mode list: {e}") 116 | 117 | # --- Watchers --- 118 | def watch_current_mode(self, new_mode: ApprovalMode) -> None: 119 | """When current_mode changes, update the label and repopulate the list to update the highlight.""" 120 | self.log.info(f"Watched current_mode change to {new_mode.value}. Repopulating list.") 121 | try: 122 | label = self.query_one("#current-mode-label", Label) 123 | label.update(f"Current: {new_mode.value}") 124 | # Repopulate the list to ensure the highlight is correct 125 | self._populate_list() 126 | except Exception as e: 127 | # If this happens during unmount, ignore the error 128 | if self.is_mounted: 129 | self.log.warning(f"Could not update current mode label or list: {e}") 130 | 131 | # --- Internal Methods --- 132 | def _populate_list(self): 133 | """Populate the OptionList with approval modes.""" 134 | if not self.is_mounted: 135 | self.log.warning("Attempted to populate list, but overlay is not mounted.") 136 | return 137 | self.log.info(f"Executing _populate_list for mode {self.current_mode.value}") 138 | try: 139 | option_list = self.query_one(f"#{self._option_list_id}", OptionList) 140 | self.log.info(f"Found OptionList widget: {option_list}") 141 | option_list.clear_options() 142 | self.log.info("Cleared existing options.") 143 | 144 | highlighted_index: int | None = None 145 | options_to_add = [] 146 | 147 | # Create Option for each approval mode 148 | for index, mode in enumerate(ApprovalMode): 149 | description = "" 150 | style = "" 151 | if mode == ApprovalMode.SUGGEST: 152 | description = "Ask for edits & commands" 153 | elif mode == ApprovalMode.AUTO_EDIT: 154 | description = "Auto-edit files, ask for commands" 155 | elif mode == ApprovalMode.FULL_AUTO: 156 | description = "Auto-edit & sandboxed commands" 157 | elif mode == ApprovalMode.DANGEROUS_AUTO: 158 | description = "Auto-approve all (UNSAFE)" 159 | style = "bold red" 160 | 161 | display_text = Text.assemble( 162 | (f"{mode.value}", "bold" if mode == self.current_mode else ""), 163 | (f" - {description}", f"dim {style}" if style else "dim"), 164 | ) 165 | options_to_add.append(Option(display_text, id=mode.value)) 166 | if mode == self.current_mode: 167 | highlighted_index = index 168 | 169 | self.log.info(f"Prepared {len(options_to_add)} options to add.") 170 | option_list.add_options(options_to_add) 171 | self.log.info(f"Called add_options. OptionList now has {option_list.option_count} options.") 172 | 173 | if highlighted_index is not None and option_list.option_count > 0: 174 | try: 175 | self.log.info(f"Attempting to highlight index: {highlighted_index}") 176 | option_list.highlighted = highlighted_index 177 | self.log.info(f"Highlighted index set to {highlighted_index}") 178 | except OptionDoesNotExist: 179 | # If the index is invalid (should not happen unless the list is empty), record a warning 180 | self.log.warning(f"Could not highlight approval mode index {highlighted_index}") 181 | except Exception as high_e: 182 | self.log.error(f"Error setting highlighted index: {high_e}") 183 | 184 | except Exception as e: 185 | if self.is_mounted: 186 | self.log.error(f"Error populating approval mode list: {e}") 187 | 188 | # --- Event Handlers --- 189 | def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None: 190 | """Handle the selection event in the OptionList.""" 191 | event.stop() 192 | option_id = event.option.id 193 | if option_id is not None: 194 | try: 195 | selected_mode = ApprovalMode(option_id) 196 | self.log.info(f"Approval mode selected: {selected_mode.value}") 197 | self.post_message(self.ApprovalModeSelected(selected_mode)) 198 | except ValueError: 199 | self.log.error(f"Invalid approval mode ID selected: {option_id}") 200 | self.post_message(self.ExitApprovalOverlay()) 201 | else: 202 | # If the option has no ID (should not happen), also exit 203 | self.log.warning("Selected option has no ID.") 204 | self.post_message(self.ExitApprovalOverlay()) 205 | 206 | def on_key(self, event: events.Key) -> None: 207 | """Handle key events, especially Escape key.""" 208 | if event.key == "escape": 209 | event.stop() 210 | self.log.info("Approval overlay exited via Escape.") 211 | self.post_message(self.ExitApprovalOverlay()) 212 | -------------------------------------------------------------------------------- /codexy/tools/apply_diff_tool.py: -------------------------------------------------------------------------------- 1 | import re 2 | from pathlib import Path 3 | 4 | from openai.types.chat import ChatCompletionToolParam 5 | 6 | PROJECT_ROOT = Path.cwd() 7 | 8 | 9 | def parse_diff_blocks(diff_text: str) -> list[tuple[int, str, str]]: 10 | """Parses the multi-block diff string into individual (start_line, search, replace) tuples.""" 11 | blocks = [] 12 | # Regex to find SEARCH blocks with start_line, search content, and replace content 13 | # It handles potential variations in whitespace and the presence of the ------- and ======= markers. 14 | # It uses non-greedy matching (.*?) to avoid consuming subsequent blocks. 15 | pattern = re.compile( 16 | r"^\s*<<<<<<<\s*SEARCH\s*\n" # Start marker 17 | r":start_line:(\d+)\s*\n" # Capture start line number 18 | r"-{2,}\s*\n" # Separator (at least two hyphens) 19 | r"(.*?)" # Capture search content (non-greedy) 20 | r"={2,}\s*\n" # Separator (at least two equals signs) 21 | r"(.*?)" # Capture replace content (non-greedy) 22 | r">>>>>>>\s*REPLACE\s*$", # End marker 23 | re.MULTILINE | re.DOTALL, # Multiline and Dotall flags 24 | ) 25 | 26 | for match in pattern.finditer(diff_text): 27 | start_line = int(match.group(1)) 28 | search_content = match.group(2) 29 | replace_content = match.group(3) 30 | # Important: Normalize line endings in search/replace content for comparison 31 | search_content = search_content.replace("\r\n", "\n") 32 | replace_content = replace_content.replace("\r\n", "\n") 33 | blocks.append((start_line, search_content, replace_content)) 34 | 35 | if not blocks and diff_text.strip(): # Check if parsing failed but diff wasn't empty 36 | raise ValueError("Diff text provided but could not parse any valid SEARCH/REPLACE blocks.") 37 | 38 | # Sort blocks by start line descending to apply changes from bottom up, 39 | # which avoids messing up line numbers for subsequent changes in the same file. 40 | blocks.sort(key=lambda x: x[0], reverse=True) 41 | return blocks 42 | 43 | 44 | def apply_diff_tool(path: str, diff: str) -> str: 45 | """Applies changes to a file based on a diff string.""" 46 | if not path: 47 | return "Error: 'path' argument is required." 48 | if not diff: 49 | return "Error: 'diff' argument is required and cannot be empty." 50 | 51 | file_path = PROJECT_ROOT / path 52 | 53 | # --- Path Validation --- 54 | try: 55 | resolved_path = file_path.resolve(strict=True) # Must exist for diff 56 | if not str(resolved_path).startswith(str(PROJECT_ROOT)): 57 | return f"Error: Attempted to modify file outside of project root: {path}" 58 | if not resolved_path.is_file(): 59 | return f"Error: Path '{path}' is not a file." 60 | except FileNotFoundError: 61 | return f"Error: File not found at '{path}' (resolved to '{file_path}')" 62 | except Exception as e: 63 | return f"Error resolving path '{path}': {e}" 64 | 65 | # --- Parse Diff Blocks --- 66 | try: 67 | diff_blocks = parse_diff_blocks(diff) 68 | if not diff_blocks: 69 | return "Error: No valid SEARCH/REPLACE blocks found in the provided diff." 70 | except ValueError as e: 71 | return f"Error parsing diff: {e}" 72 | except Exception as e: 73 | return f"Unexpected error parsing diff: {e}" 74 | 75 | # --- Read File Content --- 76 | try: 77 | with open(resolved_path, encoding="utf-8") as f: 78 | original_lines = f.readlines() # Read lines into a list 79 | except Exception as e: 80 | return f"Error reading file '{path}' for diff application: {e}" 81 | 82 | # --- Apply Changes (Bottom-Up) --- 83 | modified_lines = list(original_lines) # Create a mutable copy 84 | applied_count = 0 85 | errors = [] 86 | 87 | for start_line, search_content, replace_content in diff_blocks: 88 | start_idx = start_line - 1 # Convert to 0-based index 89 | 90 | if "\n" not in search_content: 91 | if start_idx < len(modified_lines): 92 | current_line = modified_lines[start_idx].rstrip("\r\n") 93 | if current_line == search_content: 94 | modified_lines[start_idx] = replace_content + "\n" 95 | applied_count += 1 96 | print(f"Successfully applied single-line diff block at line {start_line} to {path}") 97 | continue 98 | else: 99 | print(f"Single line mismatch - expected: '{search_content}', actual: '{current_line}'") 100 | 101 | errors.append( 102 | f"Error applying block starting at line {start_line}: SEARCH content does not exactly match file content." 103 | ) 104 | continue 105 | 106 | # Multi-line processing 107 | search_lines = search_content.splitlines() 108 | num_search_lines = len(search_lines) 109 | 110 | # Check bounds 111 | if start_idx < 0 or start_idx + num_search_lines > len(modified_lines): 112 | errors.append( 113 | f"Error applying block starting at line {start_line}: Line range [{start_line}-{start_line + num_search_lines - 1}] is out of bounds (file has {len(modified_lines)} lines)." 114 | ) 115 | continue # Skip this block 116 | 117 | # Extract corresponding lines from file and remove line endings for content comparison 118 | match = True 119 | for i, search_line in enumerate(search_lines): 120 | file_line = modified_lines[start_idx + i].rstrip("\r\n") 121 | if file_line != search_line: 122 | print(f"Line {start_line + i} mismatch: '{file_line}' != '{search_line}'") 123 | match = False 124 | break 125 | 126 | if match: 127 | # Apply replacement 128 | replace_lines = [line + "\n" for line in replace_content.splitlines()] 129 | if not replace_lines: # Handle empty replacement content 130 | replace_lines = [""] 131 | 132 | modified_lines[start_idx : start_idx + num_search_lines] = replace_lines 133 | applied_count += 1 134 | print(f"Successfully applied multi-line diff block starting at line {start_line} to {path}") 135 | else: 136 | errors.append( 137 | f"Error applying block starting at line {start_line}: SEARCH content does not exactly match file content." 138 | ) 139 | 140 | # --- Write Modified Content Back --- 141 | if applied_count > 0 and not errors: # Only write if at least one block applied and no errors occurred 142 | try: 143 | with open(resolved_path, "w", encoding="utf-8") as f: 144 | f.writelines(modified_lines) 145 | return f"Successfully applied {applied_count} diff block(s) to '{path}'." 146 | except Exception as e: 147 | return f"Error writing modified content back to '{path}' after applying diffs: {e}" 148 | elif errors: 149 | error_summary = "\n".join(errors) 150 | return f"Failed to apply diff to '{path}'. {applied_count} block(s) applied before encountering errors:\n{error_summary}" 151 | else: # No blocks applied (e.g., all failed matching) 152 | return f"Failed to apply diff to '{path}'. No matching SEARCH blocks found or all blocks failed." 153 | 154 | 155 | APPLY_DIFF_TOOL_DEF: ChatCompletionToolParam = { 156 | "type": "function", 157 | "function": { 158 | "name": "apply_diff", 159 | "description": "Apply a specific change to a file using a search/replace block format. The SEARCH block must exactly match existing content.", 160 | "parameters": { 161 | "type": "object", 162 | "properties": { 163 | "path": { 164 | "type": "string", 165 | "description": "The relative path of the file to modify.", 166 | }, 167 | "diff": { 168 | "type": "string", 169 | "description": """A string defining the changes in SEARCH/REPLACE block format. 170 | 171 | **EXACT FORMAT REQUIRED:** 172 | ``` 173 | <<<<<<< SEARCH 174 | :start_line:LINE_NUMBER 175 | ------- 176 | EXACT_CONTENT_TO_FIND 177 | ======= 178 | NEW_CONTENT_TO_REPLACE_WITH 179 | >>>>>>> REPLACE 180 | ``` 181 | 182 | **CRITICAL RULES:** 183 | 1. Must start with `<<<<<<< SEARCH` (exactly 7 < symbols + space + SEARCH) 184 | 2. Next line: `:start_line:NUMBER` where NUMBER is the 1-based line number where SEARCH content starts 185 | 3. Separator: At least 2 hyphens `--` or more `-------` on their own line 186 | 4. EXACT_CONTENT_TO_FIND: Must match the file content character-for-character (including whitespace) 187 | 5. Separator: At least 2 equals `==` or more `=======` on their own line 188 | 6. NEW_CONTENT_TO_REPLACE_WITH: The replacement content 189 | 7. Must end with `>>>>>>> REPLACE` (exactly 7 > symbols + space + REPLACE) 190 | 191 | **EXAMPLE - Single line change:** 192 | ``` 193 | <<<<<<< SEARCH 194 | :start_line:5 195 | ------- 196 | import os 197 | ======= 198 | import os 199 | from pathlib import Path 200 | >>>>>>> REPLACE 201 | ``` 202 | 203 | **EXAMPLE - Multi-line change:** 204 | ``` 205 | <<<<<<< SEARCH 206 | :start_line:10 207 | ------- 208 | def old_function(): 209 | return "old" 210 | ======= 211 | def new_function(): 212 | return "new" 213 | # Added comment 214 | >>>>>>> REPLACE 215 | ``` 216 | 217 | **IMPORTANT NOTES:** 218 | - The SEARCH content must match the file EXACTLY (same indentation, spaces, etc.) 219 | - Line numbers are 1-based (first line = 1, not 0) 220 | - Multiple blocks can be concatenated in one diff string 221 | - Each block is processed independently""", 222 | }, 223 | }, 224 | "required": ["path", "diff"], 225 | }, 226 | }, 227 | } 228 | -------------------------------------------------------------------------------- /README_ZH.md: -------------------------------------------------------------------------------- 1 |
2 |
3 |
一个在终端中运行的轻量级编码助手(OpenAI Codex CLI Python 版本)
7 | 8 |9 | 中文 | English 10 |
11 | 12 |pip install -U codexy
2 |
3 |
Lightweight coding agent that runs in your terminal (OpenAI Codex CLI Python version)
7 | 8 |9 | 中文文档 | English 10 |
11 | 12 |pip install -U codexy