├── src
├── langchain_code
│ ├── __init__.py
│ ├── config
│ │ ├── __init__.py
│ │ ├── mcp.json
│ │ └── mcp.json~
│ ├── cli_components
│ │ ├── __init__.py
│ │ ├── constants.py
│ │ ├── state.py
│ │ ├── app.py
│ │ ├── agents.py
│ │ ├── instructions.py
│ │ ├── todos.py
│ │ ├── mcp.py
│ │ ├── editors.py
│ │ ├── runtime.py
│ │ └── display.py
│ ├── workflows
│ │ ├── bug_fix.py
│ │ ├── auto.py
│ │ ├── feature_impl.py
│ │ └── base_system.py
│ ├── cli.py
│ ├── safety
│ │ └── confirm.py
│ ├── cli
│ │ ├── __init__.py
│ │ ├── constants_runtime.py
│ │ ├── commands
│ │ │ ├── configure.py
│ │ │ ├── system.py
│ │ │ └── flows.py
│ │ └── entrypoint.py
│ ├── agent
│ │ ├── state.py
│ │ ├── subagents.py
│ │ ├── deep_agents.py
│ │ └── react.py
│ ├── tools
│ │ ├── planner.py
│ │ ├── search.py
│ │ ├── processor.py
│ │ ├── shell.py
│ │ ├── script_exec.py
│ │ └── fs_local.py
│ ├── mcp_loader.py
│ ├── hooks.py
│ └── static_values.py
└── tests
│ └── router_test.py
├── setup.py
├── assets
├── cmd.png
├── demo.gif
├── logo.png
├── mermaid_1756325827.png
└── mermaid_1756325835.png
├── MANIFEST.in
├── .gitignore
├── Dockerfile
├── .github
└── workflows
│ └── docker-build.yml
├── docs
├── mcp_loader.md
├── safety.md
├── index.md
├── memory.md
├── tools.md
├── workflows.md
├── CONTRIBUTING.md
├── workflows
│ └── feature_impl.md
└── config.md
├── pyproject.toml
└── LICENSE
/src/langchain_code/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/langchain_code/config/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | setup()
4 |
--------------------------------------------------------------------------------
/assets/cmd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zamalali/langchain-code/HEAD/assets/cmd.png
--------------------------------------------------------------------------------
/assets/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zamalali/langchain-code/HEAD/assets/demo.gif
--------------------------------------------------------------------------------
/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zamalali/langchain-code/HEAD/assets/logo.png
--------------------------------------------------------------------------------
/src/langchain_code/cli_components/__init__.py:
--------------------------------------------------------------------------------
1 | """Building blocks for the LangCode CLI."""
2 |
3 |
--------------------------------------------------------------------------------
/assets/mermaid_1756325827.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zamalali/langchain-code/HEAD/assets/mermaid_1756325827.png
--------------------------------------------------------------------------------
/assets/mermaid_1756325835.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zamalali/langchain-code/HEAD/assets/mermaid_1756325835.png
--------------------------------------------------------------------------------
/src/langchain_code/workflows/bug_fix.py:
--------------------------------------------------------------------------------
1 | """Bug-fixing workflow instruction seed."""
2 |
3 | from __future__ import annotations
4 |
5 | from ..static_values import BUGFIX_INSTR
6 |
--------------------------------------------------------------------------------
/src/langchain_code/workflows/auto.py:
--------------------------------------------------------------------------------
1 | """Re-export the autopilot instruction seed."""
2 |
3 | from __future__ import annotations
4 |
5 | from ..static_values import AUTO_DEEP_INSTR
6 |
--------------------------------------------------------------------------------
/src/langchain_code/workflows/feature_impl.py:
--------------------------------------------------------------------------------
1 | """Feature workflow instruction seed."""
2 |
3 | from __future__ import annotations
4 |
5 | from ..static_values import FEATURE_INSTR
6 |
--------------------------------------------------------------------------------
/src/langchain_code/workflows/base_system.py:
--------------------------------------------------------------------------------
1 | """Re-export the shared base system instructions."""
2 |
3 | from __future__ import annotations
4 |
5 | from ..static_values import BASE_SYSTEM
6 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | # MANIFEST.in
2 | include README.md
3 | include LICENSE
4 |
5 | # Include JSON configs
6 | recursive-include src/langchain_code/config *.json
7 |
8 | # Include all images in assets
9 | recursive-include assets *.png *.jpg *.svg *.gif
10 |
--------------------------------------------------------------------------------
/src/langchain_code/cli_components/constants.py:
--------------------------------------------------------------------------------
1 | """Shared constants for the LangCode CLI."""
2 |
3 | from ..static_values import (
4 | APP_HELP,
5 | ENV_FILENAMES,
6 | GLOBAL_ENV_ENVVAR,
7 | LANGCODE_CONFIG_DIR_ENVVAR,
8 | LANGCODE_DIRNAME,
9 | LANGCODE_FILENAME,
10 | MCP_FILENAME,
11 | MCP_PROJECT_REL,
12 | PROMPT,
13 | )
14 |
--------------------------------------------------------------------------------
/src/langchain_code/cli.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | """
4 | Compatibility wrapper so tools referencing `langchain_code.cli:main`
5 | still work after the CLI was modularized into `cli.entrypoint`.
6 | """
7 |
8 | from .cli.entrypoint import main, selection_hub
9 |
10 | __all__ = ["main", "selection_hub"]
11 |
12 | if __name__ == "__main__":
13 | main()
14 |
15 |
--------------------------------------------------------------------------------
/src/langchain_code/config/mcp.json:
--------------------------------------------------------------------------------
1 | {
2 | "servers": {
3 | "github": {
4 | "command": "npx",
5 | "args": ["-y", "@modelcontextprotocol/server-github"],
6 | "transport": "stdio",
7 | "env": {
8 | "GITHUB_TOKEN": "$GITHUB_API_KEY",
9 | "GITHUB_TOOLSETS": "repos,issues,pull_requests,actions,code_security"
10 | }
11 | }
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/langchain_code/config/mcp.json~:
--------------------------------------------------------------------------------
1 | {
2 | "servers": {
3 | "github": {
4 | "command": "npx",
5 | "args": ["-y", "@modelcontextprotocol/server-github"],
6 | "transport": "stdio",
7 | "env": {
8 | "GITHUB_TOKEN": "$GITHUB_API_KEY",
9 | "GITHUB_TOOLSETS": "repos,issues,pull_requests,actions,code_security"
10 | }
11 | }
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/langchain_code/safety/confirm.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | import os
3 | from rich.prompt import Confirm
4 |
5 | def should_auto_approve() -> bool:
6 | return os.getenv("LANGCODE_AUTO_APPROVE", "").lower() in {"1","true","yes","y"}
7 |
8 | def confirm_action(msg: str, apply: bool) -> bool:
9 | if apply or should_auto_approve():
10 | return True
11 | return Confirm.ask(msg)
12 |
--------------------------------------------------------------------------------
/src/langchain_code/cli/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | """
4 | Modular CLI package that wires together the Typer app and individual commands.
5 |
6 | This package exists to keep the top-level entrypoint (`langchain_code.cli`)
7 | lightweight while allowing each command/session to live in its own module.
8 | """
9 |
10 | from .entrypoint import app, main, selection_hub
11 |
12 | __all__ = ["app", "main", "selection_hub"]
13 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .venv/
2 | __pycache__/
3 | .env
4 | .env.*
5 | .DS_Store
6 | .vscode/
7 | .idea/
8 | *.pyc
9 | *.pyo
10 | *.pyd
11 | .mypy_cache/
12 | .pytest_cache/
13 | .coverage
14 | coverage.xml
15 | MANIFEST.in
16 | .cache/
17 | mermaid_render.log
18 | *.log
19 | *.sqlite3
20 | *.db
21 | *.egg-info/
22 | dist/
23 | build/
24 | node_modules/
25 | *.bak
26 | *.swp
27 | *.swo
28 | *.tmp
29 | *.orig
30 | *.lock
31 | .env*
32 | *.un~
33 | .langcode/
34 | /.ipynb_checkpoints
35 | test.py
36 | langchain_code.egg-info/
37 | README.pypi.md
38 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.11-slim AS runtime
2 |
3 | ENV PYTHONDONTWRITEBYTECODE=1 \
4 | PYTHONUNBUFFERED=1 \
5 | PIP_DISABLE_PIP_VERSION_CHECK=1 \
6 | PIP_NO_CACHE_DIR=1 \
7 | LANG=C.UTF-8 \
8 | LC_ALL=C.UTF-8 \
9 | LANGCODE_EDITOR=nano
10 |
11 | RUN apt-get update && apt-get install -y --no-install-recommends \
12 | build-essential git curl ca-certificates vim nano less \
13 | && rm -rf /var/lib/apt/lists/*
14 |
15 | RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \
16 | && apt-get update && apt-get install -y --no-install-recommends nodejs \
17 | && rm -rf /var/lib/apt/lists/*
18 |
19 | WORKDIR /app
20 |
21 | COPY pyproject.toml MANIFEST.in README.md ./
22 |
23 | COPY src ./src
24 |
25 | RUN pip install --no-cache-dir .
26 |
27 | RUN useradd -ms /bin/bash app && mkdir -p /work && chown -R app:app /work
28 | USER app
29 | WORKDIR /work
30 |
31 | ENTRYPOINT ["langcode"]
32 |
33 | CMD []
34 |
--------------------------------------------------------------------------------
/.github/workflows/docker-build.yml:
--------------------------------------------------------------------------------
1 | name: Build and Push Docker Image
2 | permissions:
3 | contents: read
4 |
5 | on:
6 | push:
7 | branches:
8 | - main
9 |
10 | jobs:
11 | docker:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - name: Checkout
15 | uses: actions/checkout@v4
16 |
17 | - name: Log in to Docker Hub
18 | uses: docker/login-action@v3
19 | with:
20 | username: ${{ secrets.DOCKERHUB_USERNAME }}
21 | password: ${{ secrets.DOCKERHUB_TOKEN }}
22 |
23 | - name: Extract version from pyproject.toml
24 | id: version
25 | run: |
26 | VERSION=$(grep -Po '(?<=^version = ")[^"]*' pyproject.toml)
27 | echo "version=$VERSION" >> $GITHUB_ENV
28 |
29 | - name: Build and push Docker image
30 | uses: docker/build-push-action@v5
31 | with:
32 | push: true
33 | tags: |
34 | at384/langchain-code:${{ env.version }}
35 | at384/langchain-code:latest
36 |
--------------------------------------------------------------------------------
/src/langchain_code/agent/state.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from langgraph.graph import MessagesState
3 | from typing_extensions import TypedDict, NotRequired, Annotated
4 | from typing import Literal, Any
5 | from langchain_core.messages import AnyMessage
6 | from langgraph.graph.message import add_messages
7 |
8 | class Todo(TypedDict):
9 | content: str
10 | status: Literal["pending", "in_progress", "completed"]
11 |
12 | def file_reducer(l, r):
13 | if l is None: return r
14 | if r is None: return l
15 | return {**l, **r}
16 |
17 | def replace_reducer(_, new):
18 | return new
19 |
20 | class DeepAgentState(MessagesState):
21 | """LangChain 1.0 compliant state schema for deep agent.
22 |
23 | Extends MessagesState with custom state channels:
24 | - remaining_steps: Required by langgraph.prebuilt.create_react_agent
25 | - todos: List of Todo objects (replaced completely on update)
26 | - files: Dict of file contents (merged incrementally on update)
27 | """
28 | remaining_steps: int
29 | todos: Annotated[NotRequired[list[Todo]], replace_reducer]
30 | files: Annotated[NotRequired[dict[str, str]], file_reducer]
31 |
--------------------------------------------------------------------------------
/src/langchain_code/cli/constants_runtime.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ..static_values import (
4 | ANALYZE_SESSION_TITLE,
5 | AUTO_CHAT_SUFFIX,
6 | AUTOPILOT_PROMPT,
7 | CHAT_SESSION_TITLE,
8 | DEEP_CHAT_SESSION_TITLE,
9 | DOCTOR_FOOTER_TIP,
10 | FEATURE_SESSION_TITLE,
11 | FIX_FALLBACK_PROMPT,
12 | FIX_SESSION_TITLE,
13 | GLOBAL_ENV_TITLE,
14 | INSTRUCTIONS_TITLE,
15 | PROJECT_ENV_TITLE,
16 | PROVIDER_KEY_LABELS,
17 | TODO_ANIMATION_DELAY,
18 | TODO_EMPTY_TEXT,
19 | TODO_PANEL_TITLE,
20 | TODO_PLANNING_TEXT,
21 | TODO_STEP_HEADER,
22 | )
23 |
24 | __all__ = [
25 | "CHAT_SESSION_TITLE",
26 | "DEEP_CHAT_SESSION_TITLE",
27 | "AUTO_CHAT_SUFFIX",
28 | "TODO_PANEL_TITLE",
29 | "TODO_PLANNING_TEXT",
30 | "TODO_EMPTY_TEXT",
31 | "TODO_ANIMATION_DELAY",
32 | "TODO_STEP_HEADER",
33 | "AUTOPILOT_PROMPT",
34 | "FEATURE_SESSION_TITLE",
35 | "FIX_SESSION_TITLE",
36 | "ANALYZE_SESSION_TITLE",
37 | "GLOBAL_ENV_TITLE",
38 | "PROJECT_ENV_TITLE",
39 | "INSTRUCTIONS_TITLE",
40 | "FIX_FALLBACK_PROMPT",
41 | "PROVIDER_KEY_LABELS",
42 | "DOCTOR_FOOTER_TIP",
43 | ]
44 |
--------------------------------------------------------------------------------
/docs/mcp_loader.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
LangCode
4 |
5 |
The only CLI you'll ever need!
6 |
7 |
8 | # MCP Loader
9 |
10 | The MCP Loader is responsible for loading and initializing the Multi-Code Pal (MCP) agent. It dynamically loads configurations, tools, and other components to construct the agent at runtime.
11 |
12 | ## Key Responsibilities
13 |
14 | - **Dynamic Loading:** Loads agent configurations, tools, and other resources from specified paths.
15 | - **Agent Initialization:** Initializes the MCP agent with the loaded components.
16 | - **Extensibility:** Allows for easy extension and customization by adding new tools or modifying configurations without changing the core code.
17 |
18 | ## Usage
19 |
20 | The `load_mcp` function is the main entry point for loading the MCP agent. It takes the necessary paths and configurations to initialize the agent.
21 |
22 | ```python
23 | from langcode.mcp_loader import load_mcp
24 |
25 | # Load the MCP agent with default configurations
26 | mcp_agent = load_mcp()
27 |
28 | # Execute a task
29 | result = mcp_agent.run("Some task")
30 | ```
31 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=69", "wheel"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "langchain-code"
7 | version = "0.1.6"
8 | description = "A comprehensive combination of all agentic CLI at one place with 🦜🔗!"
9 | readme = "README.pypi.md"
10 | requires-python = ">=3.11"
11 | dependencies = [
12 | "click>=8.1.7,<9",
13 | "typer>=0.12.3",
14 | "rich==13.7.1",
15 | "python-dotenv==1.0.1",
16 | "langchain>=1.0.0",
17 | "langchain-core>=0.3.0",
18 | "langchain-anthropic>=0.3.0",
19 | "langchain-google-genai>=2.0.4",
20 | "langgraph>=1.0.0,<2.0.0",
21 | "pydantic>=2.7.4,<3",
22 | "ruamel.yaml==0.18.6",
23 | "pyfiglet==1.0.4",
24 | "mcp>=0.4.2",
25 | "langchain-mcp-adapters>=0.1.7",
26 | "langchain-tavily>=0.1.0",
27 | "genai-processors>=1.1.0",
28 | "langchain-openai>=0.3.0,<0.4.0",
29 | "langchain-ollama>=0.3.0,<0.4.0",
30 | "typing-extensions>=4.0.0",
31 | ]
32 |
33 | [project.scripts]
34 | langcode = "langchain_code.cli:main"
35 |
36 | [tool.setuptools]
37 | package-dir = {"" = "src"}
38 |
39 | [tool.setuptools.packages.find]
40 | where = ["src"]
41 | include = ["langchain_code*"]
42 |
43 | [tool.setuptools.package-data]
44 | "langchain_code.config" = ["mcp.json"]
45 |
--------------------------------------------------------------------------------
/src/langchain_code/cli_components/state.py:
--------------------------------------------------------------------------------
1 | """Shared runtime state for the LangCode CLI."""
2 |
3 | from collections import OrderedDict
4 | from contextlib import contextmanager
5 | from typing import Any, Tuple
6 |
7 | from rich.console import Console
8 |
9 | console = Console()
10 |
11 | # Agent cache mirrors the legacy module-level `_AGENT_CACHE`.
12 | AgentCacheKey = Tuple[str, str, str, str, bool]
13 | agent_cache: "OrderedDict[AgentCacheKey, Any]" = OrderedDict()
14 | agent_cache_limit = 6
15 |
16 | # Launcher flag mirrors `_IN_SELECTION_HUB`.
17 | _selection_hub_active = False
18 |
19 | # Controls whether edit helpers should show status banners.
20 | _edit_feedback_enabled = False
21 |
22 | # Keeps track of the currently active Rich live display.
23 | current_live = None
24 |
25 |
26 | def set_selection_hub_active(active: bool) -> None:
27 | """Toggle whether the launcher is currently active."""
28 | global _selection_hub_active
29 | _selection_hub_active = active
30 |
31 |
32 | def in_selection_hub() -> bool:
33 | """Return True when the launcher hub owns the terminal."""
34 | return _selection_hub_active
35 |
36 |
37 | def edit_feedback_enabled() -> bool:
38 | """Return True if edit helpers should print status banners."""
39 | return _edit_feedback_enabled
40 |
41 |
42 | @contextmanager
43 | def edit_feedback():
44 | """Temporarily enable edit feedback panels while a user is editing."""
45 | global _edit_feedback_enabled
46 | prev = _edit_feedback_enabled
47 | _edit_feedback_enabled = True
48 | try:
49 | yield
50 | finally:
51 | _edit_feedback_enabled = prev
52 |
--------------------------------------------------------------------------------
/src/langchain_code/cli_components/app.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import logging
4 | import warnings
5 |
6 | import typer
7 | from click.exceptions import UsageError
8 | from typer.core import TyperGroup
9 |
10 | from .constants import APP_HELP
11 |
12 |
13 | warnings.filterwarnings("ignore", message=r"typing\.NotRequired is not a Python type.*")
14 | warnings.filterwarnings("ignore", category=UserWarning, module=r"pydantic\._internal.*")
15 |
16 |
17 | class _DefaultToChatGroup(TyperGroup):
18 | """Route unknown subcommands to `chat --inline ...` so `langcode \"Hi\"` works."""
19 |
20 | def resolve_command(self, ctx, args):
21 | if args and not args[0].startswith("-"):
22 | try:
23 | return super().resolve_command(ctx, args)
24 | except UsageError:
25 | chat_cmd = self.get_command(ctx, "chat")
26 | if chat_cmd is None:
27 | raise
28 | if "--inline" not in args:
29 | args = ["--inline", *args]
30 | return chat_cmd.name, chat_cmd, args
31 | return super().resolve_command(ctx, args)
32 |
33 |
34 | for _name in (
35 | "langchain_google_genai",
36 | "langchain_google_genai.chat_models",
37 | "tenacity",
38 | "tenacity.retry",
39 | "httpx",
40 | "urllib3",
41 | "google",
42 | ):
43 | _log = logging.getLogger(_name)
44 | _log.setLevel(logging.CRITICAL)
45 | _log.propagate = False
46 |
47 |
48 | app = typer.Typer(
49 | cls=_DefaultToChatGroup,
50 | add_completion=False,
51 | help=APP_HELP.strip(),
52 | context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
53 | )
54 |
55 |
--------------------------------------------------------------------------------
/src/langchain_code/cli_components/agents.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from pathlib import Path
4 | from typing import Any, Optional, Tuple
5 |
6 | from ..config_core import resolve_provider as _resolve_provider_base
7 | from .state import agent_cache, agent_cache_limit, AgentCacheKey
8 |
9 |
10 | def agent_cache_get(key: AgentCacheKey):
11 | if key in agent_cache:
12 | agent_cache.move_to_end(key)
13 | return agent_cache[key]
14 | return None
15 |
16 |
17 | def agent_cache_put(key: AgentCacheKey, value: Any) -> None:
18 | agent_cache[key] = value
19 | agent_cache.move_to_end(key)
20 | while len(agent_cache) > agent_cache_limit:
21 | agent_cache.popitem(last=False)
22 |
23 |
24 | def resolve_provider(llm_opt: Optional[str], router: bool) -> str:
25 | if llm_opt:
26 | return _resolve_provider_base(llm_opt)
27 | if router:
28 | return "gemini"
29 | return _resolve_provider_base(None)
30 |
31 |
32 | def build_react_agent_with_optional_llm(provider: str, project_dir: Path, llm=None, **kwargs):
33 | from ..agent.react import build_react_agent
34 |
35 | try:
36 | if llm is not None:
37 | return build_react_agent(provider=provider, project_dir=project_dir, llm=llm, **kwargs)
38 | except TypeError:
39 | pass
40 | return build_react_agent(provider=provider, project_dir=project_dir, **kwargs)
41 |
42 |
43 | def build_deep_agent_with_optional_llm(provider: str, project_dir: Path, llm=None, **kwargs):
44 | from ..agent.react import build_deep_agent
45 |
46 | try:
47 | if llm is not None:
48 | return build_deep_agent(provider=provider, project_dir=project_dir, llm=llm, **kwargs)
49 | except TypeError:
50 | pass
51 | return build_deep_agent(provider=provider, project_dir=project_dir, **kwargs)
52 |
--------------------------------------------------------------------------------
/src/langchain_code/cli/commands/configure.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from pathlib import Path
4 |
5 | import typer
6 | from rich.panel import Panel
7 | from rich.text import Text
8 |
9 | from ...cli_components.display import print_session_header
10 | from ...cli_components.env import edit_global_env_file, edit_env_file, load_global_env, load_env_files
11 | from ...cli_components.state import console, edit_feedback
12 | from ...cli_components.instructions import edit_langcode_md
13 | from ..constants_runtime import GLOBAL_ENV_TITLE, PROJECT_ENV_TITLE, INSTRUCTIONS_TITLE
14 |
15 |
16 | def env(
17 | global_: bool = typer.Option(False, "--global", "-g", help="Edit the global env file."),
18 | project_dir: Path = typer.Option(Path.cwd(), "--project-dir", exists=True, file_okay=False),
19 | ):
20 | if global_:
21 | print_session_header(GLOBAL_ENV_TITLE, provider=None, project_dir=project_dir, interactive=False)
22 | with edit_feedback():
23 | edit_global_env_file()
24 | load_global_env(override_existing=True)
25 | console.print(Panel.fit(Text("Global environment loaded.", style="green"), border_style="green"))
26 | else:
27 | print_session_header(PROJECT_ENV_TITLE, provider=None, project_dir=project_dir, interactive=False)
28 | with edit_feedback():
29 | edit_env_file(project_dir)
30 | load_env_files(project_dir, override_existing=False)
31 | console.print(Panel.fit(Text("Project environment loaded.", style="green"), border_style="green"))
32 |
33 |
34 | def edit_instructions(
35 | project_dir: Path = typer.Option(Path.cwd(), "--project-dir", exists=True, file_okay=False),
36 | ):
37 | print_session_header(
38 | INSTRUCTIONS_TITLE,
39 | provider=None,
40 | project_dir=project_dir,
41 | interactive=False,
42 | )
43 | with edit_feedback():
44 | edit_langcode_md(project_dir)
45 |
46 |
47 | __all__ = ["env", "edit_instructions"]
48 |
49 |
--------------------------------------------------------------------------------
/docs/safety.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
LangCode
4 |
5 |
The only CLI you'll ever need!
6 |
7 |
8 | # Safety
9 |
10 | The safety module is responsible for ensuring that the agent's responses are safe and appropriate. It includes mechanisms for detecting and filtering harmful content, as well as providing warnings and disclaimers when necessary.
11 |
12 | ## Key Features
13 |
14 | The safety module provides the following key features:
15 |
16 | - **Content Filtering:** Filters out harmful or inappropriate content from the agent's responses. This includes content that is sexually suggestive, violent, or discriminatory.
17 | - **Bias Detection:** Detects and mitigates biases in the agent's responses. This helps to ensure that the agent's responses are fair and unbiased.
18 | - **Safety Prompts:** Provides safety-related prompts and instructions to the language model. This helps to guide the language model towards generating safe and appropriate responses.
19 |
20 |
21 |
22 | - **Content Filtering:** Filters out harmful or inappropriate content from the agent's responses.
23 | - **Bias Detection:** Detects and mitigates biases in the agent's responses.
24 | - **Safety Prompts:** Provides safety-related prompts and instructions to the language model.
25 |
26 | ## Usage
27 |
28 | The safety module is enabled by default and can be configured through the agent's settings. This allows you to customize the behavior of the safety module, such as the content filtering level and the bias detection sensitivity.
29 |
30 | ### Configuration
31 |
32 | The safety module can be configured through the agent's settings. This allows you to customize the behavior of the safety module, such as the content filtering level and the bias detection sensitivity.
33 |
34 | ### Best Practices
35 |
36 | - Use the safety module to protect users from harmful content.
37 | - Configure the safety module to meet your specific needs.
38 | - Monitor the safety module's performance and make adjustments as needed.
39 |
40 |
41 |
42 | The safety module is enabled by default and can be configured through the agent's settings.
43 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
LangCode
4 |
5 |
The only CLI you'll ever need!
6 |
7 |
8 | # LangCode Documentation
9 |
10 | Welcome to the documentation for LangCode, a ReAct + tools code agent CLI.
11 |
12 | This documentation provides a comprehensive overview of the LangCode architecture, components, and usage.
13 |
14 | ## Table of Contents
15 |
16 | - **[Introduction](README.md)**: Overview of the LangCode project and its purpose.
17 | - **[Command-Line Interface (CLI)](cli.md)**: Detailed guide to using the LangCode CLI, including commands, options, and usage examples.
18 | - **[Configuration](config.md)**: Explanation of how to configure LangCode, including setting the language model provider and other settings.
19 | - **[Agent](agent/README.md)**: Information about the LangCode agent architecture and its components.
20 | - **[Deep Agent](agent/deep.md)**: In-depth documentation of the Deep Agent, its planning and tasking capabilities, and its subagents.
21 | - **[ReAct Agent](agent/react.md)**: Comprehensive guide to the ReAct Agent, its reasoning and acting framework, and its tools.
22 | - **[Router](agent/router.md)**: Documentation for the agent router.
23 | - **[MCP Loader](mcp_loader.md)**: Guide to loading Mission Control Platform (MCP) tools and integrating them with LangCode.
24 | - **[Memory](memory.md)**: Explanation of the agent's memory management, including conversation history, summarization, and entity extraction.
25 | - **[Safety](safety.md)**: Details about the safety mechanisms in LangCode, including content filtering, bias detection, and safety prompts.
26 | - **[Tools](tools.md)**: Comprehensive list of available tools and instructions on how to add new tools.
27 | - **[Workflows](workflows.md)**: Overview of predefined workflows and instructions on how to create new workflows.
28 |
29 |
30 | - **[Introduction](README.md)**
31 | - **[Command-Line Interface (CLI)](cli.md)**
32 | - **[Configuration](config.md)**
33 | - **[Agent](agent/README.md)**
34 | - **[Deep Agent](agent/deep.md)**
35 | - **[ReAct Agent](agent/react.md)**
36 | - **[Router](agent/router.md)**
37 | - **[MCP Loader](mcp_loader.md)**
38 | - **[Memory](memory.md)**
39 | - **[Safety](safety.md)**
40 | - **[Tools](tools.md)**
41 | - **[Workflows](workflows.md)**
42 |
--------------------------------------------------------------------------------
/docs/memory.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
LangCode
4 |
5 |
The only CLI you'll ever need!
6 |
7 |
8 | # Memory
9 |
10 | The memory module is responsible for managing the agent's memory and context. This includes storing past interactions, summarizing conversations, and providing relevant information to the agent when needed.
11 |
12 | ## Key Components
13 |
14 | The memory module consists of the following key components:
15 |
16 | - **Conversation History:** Stores the full history of interactions between the user and the agent. This includes both the user's input and the agent's responses. The conversation history is used to provide context for the agent's decisions and actions.
17 | - **Summarization:** Summarizes long conversations to fit within the context window of the language model. This is important because language models have a limited context window, and long conversations can exceed this limit. Summarization helps to reduce the amount of text that needs to be processed by the language model, while still preserving the key information.
18 | - **Entity Extraction:** Extracts key entities and concepts from the conversation to build a knowledge base. This allows the agent to learn from past interactions and to use this knowledge to improve its performance on future tasks.
19 |
20 |
21 |
22 | - **Conversation History:** Stores the full history of interactions between the user and the agent.
23 | - **Summarization:** Summarizes long conversations to fit within the context window of the language model.
24 | - **Entity Extraction:** Extracts key entities and concepts from the conversation to build a knowledge base.
25 |
26 | ## Usage
27 |
28 | The memory module is automatically integrated with the agent and does not require separate initialization. The agent will automatically store and retrieve information from the memory module as needed.
29 |
30 | ### Configuration
31 |
32 | The memory module can be configured through the agent's settings. This allows you to customize the behavior of the memory module, such as the summarization method and the entity extraction method.
33 |
34 | ### Best Practices
35 |
36 | - Keep conversations focused and concise.
37 | - Use clear and specific language.
38 | - Provide the agent with relevant context.
39 |
40 |
41 |
42 | The memory module is automatically integrated with the agent and does not require separate initialization.
43 |
--------------------------------------------------------------------------------
/docs/tools.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
LangCode
4 |
5 |
The only CLI you'll ever need!
6 |
7 |
8 | # Tools
9 |
10 | The tools module provides a collection of tools that the agent can use to perform various tasks. These tools enable the agent to interact with the environment, access information, and perform computations.
11 |
12 | ## Available Tools
13 |
14 | - **Files:** Provides functionalities for listing directories, finding files using glob patterns, reading file contents, editing files by replacing snippets, writing new files, and deleting files.
15 | - **Search:** Enables searching for regex patterns within files under a specified directory.
16 | - **Shell:** Allows running shell commands in the project directory, facilitating tasks like listing files, searching file contents, and running project-specific commands.
17 | - **Web:** Offers a search engine optimized for comprehensive, accurate, and trusted results, useful for answering questions about current events.
18 | - **Multimodal:** Processes text and optional images with the underlying LLM.
19 | - **Planning:** Creates and updates structured todo lists for task management.
20 | - **GitHub:** Provides functionalities for interacting with GitHub repositories, including creating, updating, and searching repositories, managing issues and pull requests, and pushing files.
21 | - **Mermaid:** Renders Mermaid syntax to a PNG file.
22 |
23 | - **Files:** Provides functionalities for listing directories, finding files using glob patterns, reading file contents, editing files by replacing snippets, writing new files, and deleting files.
24 | - **Search:** Enables searching for regex patterns within files under a specified directory.
25 | - **Shell:** Allows running shell commands in the project directory, facilitating tasks like listing files, searching file contents, and running project-specific commands.
26 | - **Web:** Offers a search engine optimized for comprehensive, accurate, and trusted results, useful for answering questions about current events.
27 | - **Multimodal:** Processes text and optional images with the underlying LLM.
28 | - **Planning:** Creates and updates structured todo lists for task management.
29 | - **GitHub:** Provides functionalities for interacting with GitHub repositories, including creating, updating, and searching repositories, managing issues and pull requests, and pushing files.
30 | - **Mermaid:** Renders Mermaid syntax to a PNG file.
31 |
32 | ## Tool Usage Guidelines
33 |
34 | - Always use tools rather than guessing.
35 | - For file edits, show exactly what changed.
36 | - Include relevant command outputs in your response.
37 | - Keep responses focused and actionable.
38 |
39 | ## Adding New Tools
40 |
41 | To add a new tool, you need to define a new function or class that implements the desired functionality and integrates it with the agent's tool management system.
42 |
43 | ### Best Practices
44 |
45 | - Keep tools simple and focused.
46 | - Provide clear and concise documentation.
47 | - Handle errors gracefully.
48 | - Test your tools thoroughly.
49 |
--------------------------------------------------------------------------------
/src/tests/router_test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 | import builtins
4 |
5 | import importlib
6 |
7 | import src.langchain_code.config_core as config_core
8 |
9 |
10 | def test_normalize_gemini_env(monkeypatch):
11 | monkeypatch.delenv("GOOGLE_API_KEY", raising=False)
12 | monkeypatch.setenv("GEMINI_API_KEY", "gem-key")
13 | importlib.reload(config_core)
14 |
15 | assert os.environ["GOOGLE_API_KEY"] == "gem-key"
16 | assert os.environ["GEMINI_API_KEY"] == "gem-key"
17 |
18 |
19 | @pytest.mark.parametrize("query,expected", [
20 | ("quick fix", "simple"),
21 | ("optimize database performance and add caching", "medium"),
22 | ("design microservices architecture with kubernetes orchestration", "complex"),
23 | ("implement enterprise-grade deep-learning with distributed infrastructure", "overly_complex"),
24 | ])
25 | def test_classify_complexity(query, expected):
26 | r = config_core.IntelligentLLMRouter()
27 | assert r.classify_complexity(query) == expected
28 |
29 |
30 | @pytest.mark.parametrize("cli,expected", [
31 | ("claude", "anthropic"),
32 | ("anthropic", "anthropic"),
33 | ("gemini", "gemini"),
34 | ("google", "gemini"),
35 | ("gpt", "openai"),
36 | ("openai", "openai"),
37 | ("ollama", "ollama"),
38 | ])
39 | def test_resolve_provider_cli(cli, expected):
40 | assert config_core.resolve_provider(cli) == expected
41 |
42 |
43 | def test_resolve_provider_env(monkeypatch):
44 | monkeypatch.setenv("LLM_PROVIDER", "openai")
45 | assert config_core.resolve_provider(None) == "openai"
46 |
47 | monkeypatch.setenv("LLM_PROVIDER", "foobar")
48 | assert config_core.resolve_provider(None) == "gemini"
49 |
50 |
51 | def test_get_model_info_defaults():
52 | info = config_core.get_model_info("anthropic")
53 | assert info["langchain_model_name"] == "claude-3-7-sonnet-20250219"
54 | assert info["complexity"] == "default"
55 |
56 | info = config_core.get_model_info("gemini")
57 | assert info["langchain_model_name"] == "gemini-2.0-flash"
58 |
59 | info = config_core.get_model_info("openai")
60 | assert info["langchain_model_name"] == "gpt-4o-mini"
61 |
62 | info = config_core.get_model_info("ollama")
63 | assert info["provider"] == "ollama"
64 | assert "langchain_model_name" in info
65 |
66 |
67 | def test_get_model_info_with_query():
68 | query = "design kubernetes microservices architecture"
69 | info = config_core.get_model_info("gemini", query)
70 | assert info["provider"] == "gemini"
71 | assert info["complexity"] in {"complex", "overly_complex"}
72 | assert "reasoning_strength" in info
73 |
74 |
75 | def test_pick_default_ollama_model(monkeypatch):
76 | monkeypatch.setenv("LANGCODE_OLLAMA_MODEL", "custom-model")
77 | assert config_core._pick_default_ollama_model() == "custom-model"
78 |
79 | monkeypatch.delenv("LANGCODE_OLLAMA_MODEL", raising=False)
80 | monkeypatch.setenv("OLLAMA_MODEL", "env-ollama")
81 | assert config_core._pick_default_ollama_model() == "env-ollama"
82 |
83 | monkeypatch.delenv("OLLAMA_MODEL", raising=False)
84 | assert config_core._pick_default_ollama_model() == "llama3.1"
85 |
--------------------------------------------------------------------------------
/docs/workflows.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
LangCode
4 |
5 |
The only CLI you'll ever need!
6 |
7 |
8 | # Workflows
9 |
10 | The workflows module defines a set of predefined workflows that the agent can follow to accomplish specific tasks. Each workflow is a sequence of steps that the agent executes to achieve a particular goal.
11 |
12 | ## Available Workflows
13 |
14 | The following workflows are available for the agent to use:
15 |
16 | - **Bug Fix:** A workflow for identifying and fixing bugs in code. This workflow typically involves the following steps: identifying the bug, reproducing the bug, diagnosing the bug, fixing the bug, and verifying the fix.
17 | - **Feature Implementation:** A workflow for implementing new features in a codebase. This workflow typically involves the following steps: understanding the requirements, designing the feature, implementing the feature, testing the feature, and integrating the feature.
18 | - **General Purpose:** A general-purpose workflow that can be adapted to a wide range of tasks. This workflow can be customized to fit the specific needs of the task at hand.
19 |
20 |
21 |
22 | The following workflows are available for the agent to use:
23 |
24 | - **Bug Fix:** A workflow for identifying and fixing bugs in code. This workflow typically involves the following steps: identifying the bug, reproducing the bug, diagnosing the bug, fixing the bug, and verifying the fix.
25 | - **Feature Implementation:** A workflow for implementing new features in a codebase. This workflow typically involves the following steps: understanding the requirements, designing the feature, implementing the feature, testing the feature, and integrating the feature.
26 | - **General Purpose:** A general-purpose workflow that can be adapted to a wide range of tasks. This workflow can be customized to fit the specific needs of the task at hand.
27 |
28 |
29 |
30 | - **Bug Fix:** A workflow for identifying and fixing bugs in code.
31 | - **Feature Implementation:** A workflow for implementing new features in a codebase.
32 | - **General Purpose:** A general-purpose workflow that can be adapted to a wide range of tasks.
33 |
34 | ## Creating New Workflows
35 |
36 | To create a new workflow, you need to create a new Python file in the `langcode/workflows` directory and define a class that inherits from the `BaseWorkflow` class. You will also need to add the workflow to the `__init__.py` file in the same directory.
37 |
38 | ### Workflow Structure
39 |
40 | Each workflow should have a clear goal and a well-defined set of steps. The workflow should be able to accomplish a specific task and return a result that can be used by the agent. The workflow should also be well-documented, with clear instructions on how to use it.
41 |
42 | ### Best Practices
43 |
44 | - Keep workflows simple and focused.
45 | - Provide clear and concise documentation.
46 | - Handle errors gracefully.
47 | - Test your workflows thoroughly.
48 |
49 |
50 |
51 | To create a new workflow, you need to create a new Python file in the `langcode/workflows` directory and define a class that inherits from the `BaseWorkflow` class. You will also need to add the workflow to the `__init__.py` file in the same directory.
52 |
--------------------------------------------------------------------------------
/src/langchain_code/cli_components/instructions.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from datetime import datetime
4 | from pathlib import Path
5 | from typing import Optional
6 |
7 | import click
8 | from rich.panel import Panel
9 | from rich.text import Text
10 |
11 | from .constants import LANGCODE_DIRNAME, LANGCODE_FILENAME
12 | from .editors import diff_stats, inline_capture_editor, open_in_terminal_editor
13 | from .state import console, edit_feedback_enabled
14 |
15 |
16 | def ensure_langcode_md(project_dir: Path) -> Path:
17 | """
18 | Ensure .langcode/langcode.md exists. Return its Path.
19 | """
20 | cfg_dir = project_dir / LANGCODE_DIRNAME
21 | cfg_dir.mkdir(parents=True, exist_ok=True)
22 | md_path = cfg_dir / LANGCODE_FILENAME
23 | if not md_path.exists():
24 | template = f"""# LangCode - Project Custom Instructions
25 |
26 | Use this file to add project-specific guidance for the agent.
27 | These notes are appended to the base system prompt in both ReAct and Deep agents.
28 |
29 | **Tips**
30 | - Keep it concise and explicit.
31 | - Prefer bullet points and checklists.
32 | - Mention repo conventions, must/shouldn't rules, style guides, and gotchas.
33 |
34 | ## Project Rules
35 | - [ ] e.g., All edits must run `pytest -q` and pass.
36 | - [ ] e.g., Use Ruff & Black for Python formatting.
37 |
38 | ## Code Style & Architecture
39 | - e.g., Follow existing module boundaries in `src/...`
40 |
41 | ## Tooling & Commands
42 | - e.g., Use `make test` to run the test suite.
43 |
44 | ---
45 | _Created {datetime.now().strftime('%Y-%m-%d %H:%M')} by LangCode CLI_
46 | """
47 | md_path.write_text(template, encoding="utf-8")
48 | return md_path
49 |
50 |
51 | def edit_langcode_md(project_dir: Path) -> None:
52 | """
53 | Open .langcode/langcode.md in a terminal editor (Vim-first).
54 | Falls back to $VISUAL/$EDITOR, then click.edit, then inline capture if nothing available.
55 | After exit, show short stats: lines added/removed and new total lines.
56 | """
57 | md_path = ensure_langcode_md(project_dir)
58 | original = md_path.read_text(encoding="utf-8")
59 |
60 | launched = open_in_terminal_editor(md_path)
61 | edited_text: Optional[str] = None
62 |
63 | if not launched:
64 | edited_text = click.edit(original, require_save=False)
65 | if edited_text is None:
66 | edited_text = inline_capture_editor(original)
67 | if edited_text is not None and edited_text != original:
68 | md_path.write_text(edited_text, encoding="utf-8")
69 | else:
70 | edited_text = md_path.read_text(encoding="utf-8")
71 |
72 | if edited_text is None:
73 | if edit_feedback_enabled():
74 | console.print(Panel.fit(Text("No changes saved.", style="yellow"), border_style="yellow"))
75 | return
76 | if edited_text == original:
77 | if edit_feedback_enabled():
78 | console.print(Panel.fit(Text("No changes saved (file unchanged).", style="yellow"), border_style="yellow"))
79 | return
80 |
81 | stats = diff_stats(original, edited_text)
82 | if edit_feedback_enabled():
83 | console.print(Panel.fit(
84 | Text.from_markup(
85 | f"Saved [bold]{md_path}[/bold]\n"
86 | f"[green]+{stats['added']}[/green] / [red]-{stats['removed']}[/red] - total {stats['total_after']} lines"
87 | ),
88 | border_style="green"
89 | ))
90 |
--------------------------------------------------------------------------------
/src/langchain_code/cli_components/todos.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import List, Optional
4 |
5 | from rich import box
6 | from rich.panel import Panel
7 | from rich.table import Table
8 | from rich.text import Text
9 |
10 |
11 | def _coerce_sequential_todos(todos: List[dict] | None) -> List[dict]:
12 | """Ensure visual progression is strictly sequential."""
13 | todos = list(todos or [])
14 | blocked = False
15 | out: List[dict] = []
16 | for item in todos:
17 | status = (item.get("status") or "pending").lower().replace("-", "_")
18 | if blocked and status in {"in_progress", "completed"}:
19 | status = "pending"
20 | if status != "completed":
21 | blocked = True
22 | out.append({**item, "status": status})
23 | return out
24 |
25 |
26 | def render_todos_panel(todos: List[dict]) -> Panel:
27 | todos = _coerce_sequential_todos(todos)
28 |
29 | if not todos:
30 | return Panel(Text("No TODOs yet.", style="dim"), title="TODOs", border_style="blue", box=box.ROUNDED)
31 | table = Table.grid(padding=(0, 1))
32 | table.add_column(justify="right", width=3, no_wrap=True)
33 | table.add_column()
34 |
35 | ICON = {"pending": "○", "in_progress": "◔", "completed": "✓"}
36 | STYLE = {"pending": "dim", "in_progress": "yellow", "completed": "green"}
37 |
38 | for idx, item in enumerate(todos, 1):
39 | status = (item.get("status") or "pending").lower().replace("-", "_")
40 | status = status if status in ICON else "pending"
41 | content = (item.get("content") or "").strip() or "(empty)"
42 | style = STYLE[status]
43 | mark = ICON[status]
44 | text = Text(content, style=style)
45 | if status == "completed":
46 | text.stylize("strike")
47 | table.add_row(f"{idx}.", Text.assemble(Text(mark + " ", style=style), text))
48 | return Panel(table, title="TODOs", border_style="blue", box=box.ROUNDED, padding=(1, 1), expand=True)
49 |
50 |
51 | def diff_todos(before: List[dict] | None, after: List[dict] | None) -> List[str]:
52 | before_list = _coerce_sequential_todos(before or [])
53 | after_list = _coerce_sequential_todos(after or [])
54 | changes: List[str] = []
55 | for idx in range(min(len(before_list), len(after_list))):
56 | prev = (before_list[idx].get("status") or "").lower()
57 | curr = (after_list[idx].get("status") or "").lower()
58 | if prev != curr:
59 | content = (after_list[idx].get("content") or before_list[idx].get("content") or "").strip()
60 | changes.append(f"[{idx + 1}] {content} -> {curr}")
61 | if len(after_list) > len(before_list):
62 | for j in range(len(before_list), len(after_list)):
63 | content = (after_list[j].get("content") or "").strip()
64 | changes.append(f"[+ ] {content} (added)")
65 | if len(before_list) > len(after_list):
66 | for j in range(len(after_list), len(before_list)):
67 | content = (before_list[j].get("content") or "").strip()
68 | changes.append(f"[- ] {content} (removed)")
69 | return changes
70 |
71 |
72 | def complete_all_todos(todos: List[dict] | None) -> List[dict]:
73 | """
74 | Mark any non-completed TODOs as completed. Invoked right before rendering
75 | the final answer so the board reflects finished work the agent may have
76 | forgotten to mark as done.
77 | """
78 | todos = list(todos or [])
79 | out: List[dict] = []
80 | for item in todos:
81 | status = (item.get("status") or "pending").lower().replace("-", "_")
82 | if status != "completed":
83 | item = {**item, "status": "completed"}
84 | out.append(item)
85 | return out
86 |
87 |
88 | def short(text: str, length: int = 280) -> str:
89 | text = text.replace("\r\n", "\n").strip()
90 | return text if len(text) <= length else text[:length] + " ..."
91 |
92 |
--------------------------------------------------------------------------------
/src/langchain_code/cli_components/mcp.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | import os
5 | from pathlib import Path
6 | from typing import Optional
7 |
8 | import click
9 | from rich.panel import Panel
10 | from rich.text import Text
11 |
12 | from .constants import LANGCODE_DIRNAME, MCP_FILENAME, MCP_PROJECT_REL
13 | from .editors import diff_stats, inline_capture_editor, open_in_terminal_editor
14 | from .state import console, edit_feedback_enabled
15 |
16 |
17 | def mcp_target_path(project_dir: Path) -> Path:
18 | """
19 | Always prefer the repo MCP at src/langchain_code/config/mcp.json.
20 | (We’ll mirror to .langcode/mcp.json after saving for backward compatibility.)
21 | """
22 | prefer = project_dir / MCP_PROJECT_REL
23 | try:
24 | _ = prefer.resolve().relative_to(project_dir.resolve())
25 | except Exception:
26 | return project_dir / LANGCODE_DIRNAME / MCP_FILENAME
27 | return prefer
28 |
29 |
30 | def ensure_mcp_json(project_dir: Path) -> Path:
31 | """
32 | Ensure MCP config exists. Prefer src/langchain_code/config/mcp.json.
33 | """
34 | mcp_path = mcp_target_path(project_dir)
35 | mcp_path.parent.mkdir(parents=True, exist_ok=True)
36 | if not mcp_path.exists():
37 | template = {
38 | "servers": {
39 | "github": {
40 | "command": "npx",
41 | "args": ["-y", "@modelcontextprotocol/server-github"],
42 | "transport": "stdio",
43 | "env": {
44 | "GITHUB_TOKEN": "$GITHUB_API_KEY",
45 | # "GITHUB_TOOLSETS": "repos,issues,pull_requests,actions,code_security"
46 | }
47 | }
48 | }
49 | }
50 | mcp_path.write_text(json.dumps(template, indent=2) + "\n", encoding="utf-8")
51 | return mcp_path
52 |
53 |
54 | def mcp_status_label(project_dir: Path) -> str:
55 | """
56 | Show status for MCP config, pointing to src/langchain_code/config/mcp.json (or legacy).
57 | """
58 | mcp_path = mcp_target_path(project_dir)
59 | rel = os.path.relpath(mcp_path, project_dir)
60 | if not mcp_path.exists():
61 | return f"create- ({rel})"
62 | try:
63 | data = json.loads(mcp_path.read_text(encoding="utf-8") or "{}")
64 | servers = data.get("servers", {}) or {}
65 | count = len(servers) if isinstance(servers, dict) else 0
66 | return f"edit- ({rel}, {count} server{'s' if count != 1 else ''})"
67 | except Exception:
68 | return f"edit- ({rel}, unreadable)"
69 |
70 |
71 | def edit_mcp_json(project_dir: Path) -> None:
72 | """
73 | Open MCP config in a terminal editor (Vim-first), fall back to click.edit / inline,
74 | and show a short diff stat after save. Prefers src/langchain_code/config/mcp.json.
75 | """
76 | mcp_path = ensure_mcp_json(project_dir)
77 | original = mcp_path.read_text(encoding="utf-8")
78 |
79 | launched = open_in_terminal_editor(mcp_path)
80 | edited_text: Optional[str] = None
81 |
82 | if not launched:
83 | edited_text = click.edit(original, require_save=False)
84 | if edited_text is None:
85 | edited_text = inline_capture_editor(original)
86 | if edited_text is not None and edited_text != original:
87 | mcp_path.write_text(edited_text, encoding="utf-8")
88 | else:
89 | edited_text = mcp_path.read_text(encoding="utf-8")
90 |
91 | if edited_text is None:
92 | if edit_feedback_enabled():
93 | console.print(Panel.fit(Text("No changes saved.", style="yellow"), border_style="yellow"))
94 | return
95 | if edited_text == original:
96 | if edit_feedback_enabled():
97 | console.print(Panel.fit(Text("No changes saved (file unchanged).", style="yellow"), border_style="yellow"))
98 | return
99 |
100 | stats = diff_stats(original, edited_text)
101 | if edit_feedback_enabled():
102 | console.print(Panel.fit(
103 | Text.from_markup(
104 | f"Saved [bold]{mcp_path}[/bold]\n"
105 | f"[green]+{stats['added']}[/green] / [red]-{stats['removed']}[/red] - total {stats['total_after']} lines"
106 | ),
107 | border_style="green"
108 | ))
109 |
110 |
--------------------------------------------------------------------------------
/docs/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
LangCode
4 |
5 |
The only CLI you'll ever need!
6 |
7 |
8 | # Contributing to LangCode
9 |
10 | We welcome contributions to LangCode! This guide outlines the process and best practices for contributing to the project.
11 |
12 | ## Code of Conduct
13 |
14 | This project and everyone participating in it is governed by the [Contributor Covenant Code of Conduct](https://www.contributor-covenant.org/). By participating, you are expected to uphold this code. Please report unacceptable behavior to [insert contact email or link here].
15 |
16 | ## How to Contribute
17 |
18 | There are several ways you can contribute to LangCode:
19 |
20 | * **Report Bugs:** If you find a bug, please submit a detailed issue on GitHub.
21 | * **Suggest Enhancements:** Have an idea for a new feature or improvement? Open an issue to discuss it.
22 | * **Contribute Code:** Submit pull requests to fix bugs, add features, or improve existing code.
23 | * **Improve Documentation:** Help us make the documentation clearer, more concise, and more helpful.
24 |
25 | ## Getting Started
26 |
27 | 1. **Fork the Repository:** Click the "Fork" button in the top right corner of the repository on GitHub.
28 | 2. **Clone Your Fork:** Clone the repository to your local machine:
29 |
30 | ```bash
31 | git clone https://github.com/zamalali/langchain-code.git
32 | cd langchain-code
33 | ```
34 |
35 | 3. **Create a Virtual Environment:** It's recommended to use a virtual environment to manage dependencies:
36 |
37 | ```bash
38 | python3 -m venv .venv
39 | source .venv/bin/activate # On Windows, use .\.venv\Scripts\activate
40 | ```
41 |
42 | 4. **Install Dependencies:**
43 |
44 | ```bash
45 | pip install -e .
46 | ```
47 |
48 | 5. **Create a Branch:** Create a new branch for your contribution:
49 |
50 | ```bash
51 | git checkout -b feature/your-feature-name
52 | ```
53 |
54 | ## Making Changes
55 |
56 | 1. **Follow Coding Standards:** Adhere to the existing coding style and conventions.
57 | 2. **Write Tests:** Ensure your changes are covered by unit tests. Add new tests if necessary.
58 | 3. **Run Tests:** Run all tests to ensure everything is working correctly:
59 |
60 | ```bash
61 | pytest
62 | ```
63 |
64 | 4. **Commit Your Changes:** Write clear and concise commit messages:
65 |
66 | ```bash
67 | git commit -m "feat: Add your feature description"
68 | ```
69 |
70 | ## Submitting a Pull Request
71 |
72 | 1. **Push Your Branch:** Push your branch to your forked repository on GitHub:
73 |
74 | ```bash
75 | git push origin feature/your-feature-name
76 | ```
77 |
78 | 2. **Create a Pull Request:** Go to your forked repository on GitHub and click the "Compare & pull request" button.
79 | 3. **Pull Request Template:**
80 |
81 | Use the following template for your pull request description:
82 |
83 | ```markdown
84 | ## Description
85 |
86 | [Provide a brief description of the changes you've made.]
87 |
88 | ## Related Issue(s)
89 |
90 | [If applicable, link to the issue(s) this PR addresses. For example: "Fixes #123"]
91 |
92 | ## Checklist
93 |
94 | - [ ] I have tested these changes thoroughly.
95 | - [ ] I have added or updated unit tests.
96 | - [ ] I have updated the documentation (if applicable).
97 | - [ ] I have followed the coding standards.
98 | - [ ] My code is free of any warnings or errors.
99 |
100 | ## Additional Notes
101 |
102 | [Add any additional information or context that might be helpful for reviewers.]
103 | ```
104 |
105 | 4. **Review Process:** Your pull request will be reviewed by the project maintainers. They may request changes or ask questions. Please respond to their feedback promptly.
106 | 5. **Merge:** Once your pull request has been approved, it will be merged into the main branch.
107 |
108 | ## Best Practices
109 |
110 | * **Keep PRs Small:** Smaller pull requests are easier to review and merge.
111 | * **Focus on One Thing:** Each pull request should address a single issue or feature.
112 | * **Write Clear Commit Messages:** Use descriptive commit messages to explain your changes.
113 | * **Stay Up-to-Date:** Keep your branch up-to-date with the main branch by rebasing or merging.
114 |
115 | ## Documentation
116 |
117 | When contributing code, please update the documentation accordingly. This includes:
118 |
119 | * **API Documentation:** Document any new functions, classes, or modules.
120 | * **User Guides:** Update the user guides to reflect any new features or changes.
121 | * **Examples:** Provide examples of how to use the new features.
122 |
123 | Thank you for contributing to LangCode!
--------------------------------------------------------------------------------
/src/langchain_code/cli_components/editors.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 | import platform
5 | import shutil
6 | import subprocess
7 | import difflib
8 | from pathlib import Path
9 | from typing import Dict, List, Optional
10 |
11 | from rich.markdown import Markdown
12 | from rich.panel import Panel
13 | from rich.text import Text
14 |
15 | from .state import console
16 |
17 |
18 | def inline_capture_editor(initial_text: str) -> str:
19 | """
20 | Minimal inline editor fallback. Type/paste, then finish with a line: EOF
21 | (Only used if no terminal editor is available.)
22 | """
23 | console.print(Panel.fit(
24 | Text("Inline editor: Type/paste your content below. End with a line containing only: EOF", style="bold"),
25 | border_style="cyan",
26 | title="Inline Editor"
27 | ))
28 | if initial_text.strip():
29 | console.print(Text("---- CURRENT CONTENT (preview) ----", style="dim"))
30 | console.print(Markdown(initial_text))
31 | console.print(Text("---- START TYPING (new content will replace file) ----", style="dim"))
32 | lines: List[str] = []
33 | while True:
34 | try:
35 | line = input()
36 | except EOFError:
37 | break
38 | if line.strip() == "EOF":
39 | break
40 | lines.append(line)
41 | return "\n".join(lines)
42 |
43 |
44 | def pick_terminal_editor() -> Optional[List[str]]:
45 | """
46 | Choose a terminal editor command list, prioritizing Vim.
47 | Order:
48 | 1) $LANGCODE_EDITOR (split on spaces)
49 | 2) $VISUAL
50 | 3) $EDITOR
51 | 4) nvim, vim, vi, nano (first found on PATH)
52 | 5) Windows-specific: check common Vim installation paths
53 | Returns argv list or None if nothing available.
54 | """
55 | if os.environ.get("LANGCODE_EDITOR"):
56 | return os.environ["LANGCODE_EDITOR"].split()
57 |
58 | for var in ("VISUAL", "EDITOR"):
59 | v = os.environ.get(var)
60 | if v:
61 | return v.split()
62 |
63 | # Check PATH first
64 | for cand in ("nvim", "vim", "vi", "nano"):
65 | if shutil.which(cand):
66 | return [cand]
67 |
68 | # Windows-specific: check common Vim installation paths
69 | if platform.system().lower() == "windows":
70 | common_vim_paths = [
71 | r"C:\Program Files\Vim\vim91\vim.exe",
72 | r"C:\Program Files\Vim\vim90\vim.exe",
73 | r"C:\Program Files\Vim\vim82\vim.exe",
74 | r"C:\Program Files (x86)\Vim\vim91\vim.exe",
75 | r"C:\Program Files (x86)\Vim\vim90\vim.exe",
76 | r"C:\Program Files (x86)\Vim\vim82\vim.exe",
77 | r"C:\tools\vim\vim91\vim.exe", # Chocolatey
78 | r"C:\tools\vim\vim90\vim.exe",
79 | r"C:\Users\{}\scoop\apps\vim\current\vim.exe".format(os.environ.get("USERNAME", "")), # Scoop
80 | ]
81 |
82 | for vim_path in common_vim_paths:
83 | if os.path.exists(vim_path):
84 | return [vim_path]
85 |
86 | try:
87 | result = subprocess.run(["where", "vim"], capture_output=True, text=True, check=False)
88 | if result.returncode == 0 and result.stdout.strip():
89 | vim_exe = result.stdout.strip().split('\n')[0]
90 | if os.path.exists(vim_exe):
91 | return [vim_exe]
92 | except Exception:
93 | pass
94 |
95 | return None
96 |
97 |
98 | def open_in_terminal_editor(file_path: Path) -> bool:
99 | """
100 | Open the file in a terminal editor and block until it exits.
101 | Returns True if the editor launched, False otherwise.
102 | """
103 | cmd = pick_terminal_editor()
104 | if not cmd:
105 | return False
106 |
107 | try:
108 | if platform.system().lower() == "windows":
109 | subprocess.run(cmd + [str(file_path)], check=False)
110 | else:
111 | subprocess.run([*cmd, str(file_path)], check=False)
112 | return True
113 | except Exception as e:
114 | console.print(f"[yellow]Failed to launch editor: {e}[/yellow]")
115 | return False
116 |
117 |
118 | def diff_stats(before: str, after: str) -> Dict[str, int]:
119 | """
120 | Compute a simple added/removed stat using difflib.ndiff semantics.
121 | """
122 | before_lines = before.splitlines()
123 | after_lines = after.splitlines()
124 | added = removed = 0
125 | for line in difflib.ndiff(before_lines, after_lines):
126 | if line.startswith("+ "):
127 | added += 1
128 | elif line.startswith("- "):
129 | removed += 1
130 | return {
131 | "added": added,
132 | "removed": removed,
133 | "total_after": len(after_lines),
134 | }
135 |
136 |
--------------------------------------------------------------------------------
/docs/workflows/feature_impl.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
LangCode
4 |
5 |
The only CLI you'll ever need!
6 |
7 |
8 | # Feature Implementation Workflow
9 |
10 | This workflow provides a detailed guide for using the agent to implement new features based on user requests. It outlines the recommended steps and tools to ensure a structured and efficient development process, ensuring that the agent follows a consistent and well-defined approach to feature implementation. This workflow is designed to help developers leverage the power of the agent to accelerate the development process while maintaining code quality and stability.
11 |
12 | ## `FEATURE_INSTR`
13 |
14 | This section details the `FEATURE_INSTR` instruction seed, which is provided to the ReAct agent to specialize its behavior for feature implementation tasks. This instruction seed defines a clear, step-by-step process that the agent should follow, providing a structured framework for the agent to approach feature implementation in a consistent and predictable manner. The `FEATURE_INSTR` instruction seed is designed to guide the agent through the key stages of feature development, from initial planning to final verification.
15 |
16 | 1. **Plan:** The agent begins by carefully considering the necessary steps to implement the feature. This includes identifying the files that may need to be inspected, understanding the scope of the required changes, and formulating a high-level plan of action. This planning phase is crucial for ensuring a focused and efficient implementation process, as it allows the agent to anticipate potential challenges, allocate resources effectively, and minimize the risk of errors. A well-defined plan serves as a roadmap for the entire feature implementation process.
17 | 2. **Locate:** The agent utilizes file system tools such as `glob` and `grep` to locate the relevant code files and identify specific code sections that need modification. `glob` is used for pattern-based file searching, allowing the agent to quickly identify files that match a given naming pattern or reside in a specific directory. `grep`, on the other hand, is employed to find specific text or code snippets within files, enabling the agent to pinpoint the exact locations where changes need to be made. These tools provide the agent with the ability to efficiently navigate and search the codebase, saving time and effort in the process.
18 | 3. **Inspect:** The agent employs the `read_file` tool to thoroughly examine the existing code in the identified files. This step is crucial for gaining a deep understanding of the current implementation, including its architecture, dependencies, and coding style. By carefully reviewing the existing code, the agent can ensure that any modifications are made in a compatible and well-informed manner, minimizing the risk of introducing conflicts or breaking existing functionality. This step also allows the agent to identify potential areas for improvement or refactoring.
19 | 4. **Edit:** The agent makes targeted changes to the code using the `edit_by_diff` tool. This tool allows for precise and controlled modifications by replacing specific code snippets with new ones, ensuring that only the necessary changes are made to the codebase. For the creation of entirely new files, the `write_file` tool is used. However, `edit_by_diff` is generally preferred due to its safety features and ability to generate reviewable diffs, minimizing the risk of introducing errors and facilitating code review. The `edit_by_diff` tool provides a clear and auditable record of all changes made to the code, making it easier to track down and resolve any issues that may arise.
20 | 5. **Verify:** If a test command has been provided via the `--test-cmd` option, the agent executes it using the `run_cmd` tool, with the `{TEST_CMD}` placeholder replaced by the actual test command. This step allows the agent to automatically verify the correctness and stability of the implemented feature by running the project's test suite, ensuring that the new feature integrates seamlessly with the existing codebase and does not introduce any regressions or unexpected behavior. The agent analyzes the output of the test command to determine if the tests have passed successfully, providing a reliable indication of the feature's quality and stability.
21 | 6. **Summarize:** The agent concludes the feature implementation workflow by providing a concise summary of all the changes made to the codebase. This summary should highlight the key modifications, explain the reasoning behind them, and suggest any potential next steps or areas for further improvement. This final step ensures that the changes are well-documented and easily understood by other developers, facilitating collaboration and knowledge sharing within the development team. The summary should also include any relevant context or considerations that may be helpful for future maintenance or enhancements of the feature.
22 |
--------------------------------------------------------------------------------
/src/langchain_code/agent/subagents.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from typing import TypedDict, NotRequired, Annotated, Any, Dict, List
3 | from langchain_core.tools import tool, BaseTool, InjectedToolCallId
4 | from langchain_core.messages import ToolMessage
5 | from langgraph.types import Command
6 | from langgraph.prebuilt import InjectedState, create_react_agent
7 | from ..agent.state import DeepAgentState
8 | from ..config_core import get_model
9 |
10 | RESEARCH_SUBAGENT = {
11 | "name": "research-agent",
12 | "description": "In-depth web research and synthesis.",
13 | "prompt": "You are an expert researcher. Produce precise, sourced notes, then a short summary.",
14 | "include_files": True,
15 | "include_todos": True,
16 | "model_settings": {"provider": "gemini", "temperature": 0.1},
17 | }
18 |
19 | CRITIQUE_SUBAGENT = {
20 | "name": "critique-agent",
21 | "description": "Strict editor that critiques and improves final drafts.",
22 | "prompt": "Be terse and ruthless. Fix clarity, structure, and correctness.",
23 | "include_todos": False,
24 | "model_settings": {"provider": "gemini", "temperature": 0.1},
25 | }
26 |
27 | class SubAgent(TypedDict):
28 | name: str
29 | description: str
30 | prompt: str
31 | tools: NotRequired[list[str]]
32 | include_files: NotRequired[bool]
33 | include_todos: NotRequired[bool]
34 | model_settings: NotRequired[Dict[str, Any]]
35 |
36 | def _maybe_model(base_model, ms: Dict[str, Any] | None):
37 | if not ms:
38 | return base_model
39 | model = base_model
40 | if "provider" in ms: # gemini | anthropic
41 | model = get_model(ms["provider"])
42 |
43 | bind_args = {k: ms[k] for k in ("temperature", "max_tokens") if k in ms}
44 | if bind_args:
45 | try:
46 | model = model.bind(**bind_args)
47 | except Exception:
48 | pass
49 | return model
50 |
51 | def _index_tools(tools: List[BaseTool]) -> Dict[str, BaseTool]:
52 | out: Dict[str, BaseTool] = {}
53 | for t in tools:
54 | if not isinstance(t, BaseTool):
55 | t = tool(t)
56 | out[t.name] = t
57 | return out
58 |
59 | def create_task_tool(
60 | tools: List[BaseTool],
61 | instructions: str,
62 | subagents: List[SubAgent],
63 | base_model,
64 | state_schema,
65 | ):
66 | """Create a task tool that delegates to sub-agents.
67 |
68 | LangChain 1.0 Compliance:
69 | - Uses create_react_agent from langgraph.prebuilt
70 | - All sub-agents use identical tool-calling interface
71 | - State schema is TypedDict (DeepAgentState)
72 | - Works across all providers (Anthropic, Gemini, OpenAI, Ollama)
73 | """
74 | tools_by_name = _index_tools(tools)
75 |
76 | agents = {
77 | "general-purpose": create_react_agent(
78 | base_model, prompt=instructions, tools=tools, state_schema=state_schema
79 | )
80 | }
81 | configs = {"general-purpose": {"include_files": True, "include_todos": True}}
82 |
83 | for sa in subagents or []:
84 | model = _maybe_model(base_model, sa.get("model_settings"))
85 | allowed_tools = tools if "tools" not in sa else [tools_by_name[n] for n in sa["tools"] if n in tools_by_name]
86 | agents[sa["name"]] = create_react_agent(
87 | model, prompt=sa["prompt"], tools=allowed_tools, state_schema=state_schema
88 | )
89 | configs[sa["name"]] = {
90 | "include_files": sa.get("include_files", False),
91 | "include_todos": sa.get("include_todos", False),
92 | }
93 |
94 | other = "\n".join(
95 | f"- {n}: {sa.get('description','')}"
96 | for n, sa in ((s["name"], s) for s in (subagents or []))
97 | )
98 |
99 | @tool(description=f"""Launch a specialized sub-agent to execute a complex task.
100 | Available agents:
101 | - general-purpose: general research/exec agent (Tools: all)
102 | {other}
103 |
104 | Usage:
105 | - description: detailed, self-contained instruction for the subagent
106 | - subagent_type: one of {list(agents.keys())}""")
107 | async def task(
108 | description: str,
109 | subagent_type: str,
110 | state: Annotated[DeepAgentState, InjectedState],
111 | tool_call_id: Annotated[str, InjectedToolCallId],
112 | ):
113 | if subagent_type not in agents:
114 | return f"Error: unknown subagent_type '{subagent_type}'. Allowed: {list(agents.keys())}"
115 |
116 | sub_state: Dict[str, Any] = {"messages": [{"role": "user", "content": description}]}
117 | cfg = configs.get(subagent_type, {})
118 | if cfg.get("include_files"):
119 | sub_state["files"] = state.get("files", {})
120 | if cfg.get("include_todos"):
121 | sub_state["todos"] = state.get("todos", [])
122 |
123 | result = await agents[subagent_type].ainvoke(sub_state)
124 | update: Dict[str, Any] = {
125 | "messages": [ToolMessage(result["messages"][-1].content, tool_call_id=tool_call_id)]
126 | }
127 | if cfg.get("include_files"):
128 | update["files"] = result.get("files", {})
129 | if cfg.get("include_todos"):
130 | update["todos"] = result.get("todos", [])
131 | return Command(update=update)
132 |
133 | return task
134 |
--------------------------------------------------------------------------------
/src/langchain_code/agent/deep_agents.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import os
5 | from pathlib import Path
6 | from typing import Any, List, Optional, Union
7 |
8 | from langgraph.prebuilt import create_react_agent
9 | from langchain_core.tools import BaseTool
10 | from langgraph.checkpoint.memory import MemorySaver
11 | from ..agent.state import DeepAgentState
12 | from ..agent.subagents import SubAgent, create_task_tool
13 | from ..config_core import get_model
14 | from ..mcp_loader import get_mcp_tools
15 | from ..tools.fs_local import (
16 | make_delete_file_tool,
17 | make_edit_by_diff_tool,
18 | make_list_dir_tool,
19 | make_read_file_tool,
20 | make_write_file_tool,
21 | )
22 | from ..tools.mermaid import make_mermaid_tools
23 | from ..tools.planner import write_todos, append_todo, update_todo_status, clear_todos
24 | from ..tools.processor import make_process_multimodal_tool
25 | from ..tools.shell import make_run_cmd_tool
26 | from ..tools.script_exec import make_script_exec_tool
27 | from ..tools.search import make_glob_tool, make_grep_tool
28 | from ..static_values import BASE_DEEP_SUFFIX
29 | from ..workflows.base_system import BASE_SYSTEM
30 |
31 | def load_langcode_context(project_dir: Path) -> str:
32 | ctx_file = project_dir / ".langcode" / "langcode.md"
33 | if ctx_file.exists():
34 | try:
35 | return "\n\n# Project Context\n" + ctx_file.read_text(encoding="utf-8")
36 | except Exception as e:
37 | return f"\n\n# Project Context\n(Error reading langcode.md: {e})"
38 | return ""
39 |
40 |
41 | try:
42 | from langchain_tavily import TavilySearch
43 | except Exception:
44 | TavilySearch = None
45 |
46 | async def _load_dynamic_tools(project_dir: Path, model, apply: bool, test_cmd: Optional[str]) -> List[BaseTool]:
47 | tools: List[BaseTool] = [
48 | make_glob_tool(str(project_dir)),
49 | make_grep_tool(str(project_dir)),
50 | make_list_dir_tool(str(project_dir)),
51 | make_read_file_tool(str(project_dir)),
52 | make_edit_by_diff_tool(str(project_dir), apply),
53 | make_write_file_tool(str(project_dir), apply),
54 | make_delete_file_tool(str(project_dir), apply),
55 | make_run_cmd_tool(str(project_dir), apply, test_cmd),
56 | make_script_exec_tool(str(project_dir), apply),
57 | make_process_multimodal_tool(str(project_dir), model),
58 | write_todos,
59 | append_todo,
60 | update_todo_status,
61 | clear_todos
62 | ]
63 | tools.extend(await get_mcp_tools(project_dir))
64 |
65 | if TavilySearch and os.getenv("TAVILY_API_KEY"):
66 | try:
67 | tools.append(
68 | TavilySearch(
69 | max_results=5,
70 | topic="general",
71 | description=(
72 | "Use TavilySearch for internet or websearch to answer questions "
73 | "that require up-to-date information from the web. "
74 | "Best for research, current events, general knowledge, news etc."
75 | ),
76 | )
77 | )
78 | except Exception as e:
79 | print(f"[LangCode] Tavily disabled (reason: {e})")
80 |
81 | tools.extend(make_mermaid_tools(str(project_dir)))
82 | return tools
83 |
84 |
85 | def create_deep_agent(
86 | *,
87 | provider: str,
88 | project_dir: Path,
89 | instructions: Optional[str] = None,
90 | subagents: Optional[List[SubAgent]] = None,
91 | apply: bool = False,
92 | test_cmd: Optional[str] = None,
93 | state_schema=DeepAgentState,
94 | checkpointer: Optional[Any] = None,
95 | llm: Optional[Any] = None,
96 | ):
97 | """Create a Deep Agent using LangChain 1.0 create_agent API.
98 |
99 | This creates a LangGraph-based agent for complex, multi-step tasks.
100 | The deep agent uses the new LangChain 1.0 create_agent API which
101 | provides identical behavior across all LLM providers.
102 |
103 | Args:
104 | provider: LLM provider ("anthropic", "gemini", "openai", "ollama")
105 | project_dir: Root directory for filesystem operations
106 | instructions: Optional system prompt customization
107 | subagents: Optional list of sub-agents for specialized tasks
108 | apply: Whether to write changes to disk
109 | test_cmd: Optional test command to run
110 | state_schema: TypedDict schema for agent state (default: DeepAgentState)
111 | checkpointer: Optional checkpointer for persistence (default: MemorySaver)
112 | llm: Optional pre-configured LLM instance
113 |
114 | Returns:
115 | Compiled agent supporting persistence, streaming, and multi-agent coordination
116 | """
117 | model = llm or get_model(provider)
118 | project_context = load_langcode_context(project_dir)
119 | prompt = (BASE_SYSTEM + "\n" + (instructions or "") + "\n" + BASE_DEEP_SUFFIX + project_context).strip()
120 |
121 | tools = asyncio.run(_load_dynamic_tools(project_dir, model, apply, test_cmd))
122 | task_tool = create_task_tool(tools, instructions or BASE_SYSTEM, subagents or [], model, state_schema)
123 | all_tools: List[Union[BaseTool, Any]] = [*tools, task_tool]
124 |
125 | if checkpointer is None:
126 | checkpointer = MemorySaver()
127 |
128 | # LangChain 1.0: Using create_react_agent from langgraph.prebuilt
129 | # This handles tool calling identically across all providers
130 | graph = create_react_agent(
131 | model,
132 | prompt=prompt,
133 | tools=all_tools,
134 | state_schema=state_schema,
135 | checkpointer=checkpointer,
136 | )
137 |
138 | # Set recursion limit to prevent infinite loops in complex reasoning
139 | graph.config = {"recursion_limit": 250}
140 | return graph
141 |
--------------------------------------------------------------------------------
/src/langchain_code/cli/commands/system.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 | import platform
5 | import shutil
6 | import subprocess
7 | import sys
8 | from pathlib import Path
9 | from typing import List, Optional
10 |
11 | import typer
12 | from rich.panel import Panel
13 | from rich.text import Text
14 | from rich.align import Align
15 | from rich.columns import Columns
16 | from rich.table import Table
17 |
18 | from ...cli_components.state import console
19 | from ...cli_components.env import (
20 | bootstrap_env,
21 | tty_log_path,
22 | current_tty_id,
23 | global_env_path,
24 | count_env_keys_in_file,
25 | )
26 | from ...cli_components.mcp import mcp_target_path
27 | from ...cli_components.launcher import list_ollama_models
28 | from ..constants_runtime import PROVIDER_KEY_LABELS, DOCTOR_FOOTER_TIP
29 |
30 |
31 | def wrap(
32 | cmd: List[str] = typer.Argument(..., help="Command to run (e.g., pytest -q)"),
33 | project_dir: Path = typer.Option(Path.cwd(), "--project-dir", exists=True, file_okay=False),
34 | tty_id: Optional[str] = typer.Option(None, "--tty-id", help="Override session id (default: auto per TTY)"),
35 | ):
36 | log_path = tty_log_path(tty_id)
37 | log_path.parent.mkdir(parents=True, exist_ok=True)
38 | console.print(Panel.fit(Text(f"Logging to: {log_path}", style="dim"), title="TTY Capture", border_style="cyan"))
39 | os.chdir(project_dir)
40 |
41 | if platform.system().lower().startswith("win"):
42 | with open(log_path, "a", encoding="utf-8", errors="ignore") as f:
43 | proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
44 | assert proc.stdout is not None
45 | for line in proc.stdout:
46 | sys.stdout.write(line)
47 | f.write(line)
48 | rc = proc.wait()
49 | raise typer.Exit(rc)
50 |
51 | import pty # type: ignore
52 |
53 | with open(log_path, "a", encoding="utf-8", errors="ignore") as f:
54 | old_env = dict(os.environ)
55 | os.environ["LANGCODE_TTY_LOG"] = str(log_path)
56 | os.environ["LANGCODE_TTY_ID"] = tty_id or current_tty_id()
57 |
58 | def _tee(master_fd):
59 | data = os.read(master_fd, 1024)
60 | if data:
61 | try:
62 | f.write(data.decode("utf-8", "ignore"))
63 | f.flush()
64 | except Exception:
65 | pass
66 | return data
67 |
68 | try:
69 | status = pty.spawn(cmd, master_read=_tee)
70 | finally:
71 | os.environ.clear()
72 | os.environ.update(old_env)
73 | raise typer.Exit(status >> 8)
74 |
75 |
76 | def shell(
77 | project_dir: Path = typer.Option(Path.cwd(), "--project-dir", exists=True, file_okay=False),
78 | tty_id: Optional[str] = typer.Option(None, "--tty-id", help="Override session id (default: auto per TTY)"),
79 | ):
80 | sh = os.environ.get("SHELL") if platform.system().lower() != "windows" else os.environ.get("COMSPEC", "cmd.exe")
81 | if not sh:
82 | sh = "/bin/bash" if platform.system().lower() != "windows" else "cmd.exe"
83 | return wrap([sh], project_dir=project_dir, tty_id=tty_id)
84 |
85 |
86 | def doctor(
87 | project_dir: Path = typer.Option(Path.cwd(), "--project-dir", exists=True, file_okay=False),
88 | ):
89 | bootstrap_env(project_dir, interactive_prompt_if_missing=False)
90 |
91 | def yes(x):
92 | return Text("• " + x, style="green")
93 |
94 | def no(x):
95 | return Text("• " + x, style="red")
96 |
97 | rows = []
98 | rows.append(yes(f"Python {sys.version.split()[0]} on {platform.platform()}"))
99 |
100 | for tool in ["git", "npx", "node", "ollama"]:
101 | rows.append(yes(f"{tool} found") if shutil.which(tool) else no(f"{tool} missing"))
102 |
103 | provider_panel = Table.grid(padding=(0, 2))
104 | provider_panel.add_column("Provider")
105 | provider_panel.add_column("Status")
106 | for env, label in PROVIDER_KEY_LABELS.items():
107 | ok = env in os.environ and bool(os.environ.get(env, "").strip())
108 | provider_panel.add_row(
109 | label,
110 | ("[green]OK[/green]" if ok else "[red]missing[/red]") + f" [dim]{env}[/dim]",
111 | )
112 |
113 | mcp_path = mcp_target_path(project_dir)
114 | mcp_status = "exists" if mcp_path.exists() else "missing"
115 | mcp_card = Panel(
116 | Text(f"{mcp_status}: {os.path.relpath(mcp_path, project_dir)}"),
117 | title="MCP",
118 | border_style=("green" if mcp_path.exists() else "red"),
119 | )
120 |
121 | ollama = shutil.which("ollama")
122 | if ollama:
123 | models = list_ollama_models()
124 | if models:
125 | oll_text = ", ".join(models[:6]) + (" ..." if len(models) > 6 else "")
126 | else:
127 | oll_text = "(none installed)"
128 | oll_card = Panel(Text(oll_text), title="Ollama models", border_style=("green" if models else "yellow"))
129 | else:
130 | oll_card = Panel(Text("ollama not found"), title="Ollama", border_style="red")
131 |
132 | gpath = global_env_path()
133 | gexists = gpath.exists()
134 | gkeys = count_env_keys_in_file(gpath) if gexists else 0
135 | gmsg = f"{'exists' if gexists else 'missing'}: {gpath}\nkeys: {gkeys}"
136 | global_card = Panel(Text(gmsg), title="Global .env", border_style=("green" if gexists else "red"))
137 |
138 | console.print(Panel(Align.left(Text.assemble(*[r + Text("\n") for r in rows])), title="System", border_style="cyan"))
139 | console.print(Panel(provider_panel, title="Providers", border_style="cyan"))
140 | console.print(Columns([mcp_card, oll_card, global_card]))
141 | console.print(Panel(Text(DOCTOR_FOOTER_TIP), border_style="blue"))
142 |
143 |
144 | __all__ = ["wrap", "shell", "doctor"]
145 |
--------------------------------------------------------------------------------
/docs/config.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
LangCode
4 |
5 |
The only CLI you'll ever need!
6 |
7 |
8 | # Configuration
9 |
10 | The configuration module is responsible for configuring the language model provider, selecting the appropriate model for the agent, and managing model selection logic. It defines the `resolve_provider` and `get_model` functions, which are used to determine the LLM provider and retrieve a LangChain chat model instance, respectively. It also includes the `IntelligentLLMRouter` class, which provides intelligent routing of requests to different LLMs based on query complexity and other factors.
11 |
12 | ## `resolve_provider`
13 |
14 | The `resolve_provider` function determines which LLM provider to use. It prioritizes the provider specified via the `--llm` command-line option. If that option is not provided, it falls back to the `LLM_PROVIDER` environment variable. If neither is set, it defaults to `gemini`. This function ensures that the LLM provider is consistently resolved across the application.
15 |
16 | **Configuration precedence:**
17 |
18 | The LLM provider is resolved in the following order of precedence:
19 |
20 | 1. `--llm` CLI option (`anthropic` or `gemini`)
21 | 2. `LLM_PROVIDER` environment variable (`anthropic` or `gemini`)
22 | 3. Default: `gemini`
23 |
24 | This allows users to override the default provider either through the command line or by setting an environment variable.
25 |
26 |
27 |
28 | The `resolve_provider` function determines which LLM provider to use. It prioritizes the provider specified via the `--llm` command-line option. If that option is not provided, it falls back to the `LLM_PROVIDER` environment variable. If neither is set, it defaults to `gemini`. This function ensures that the LLM provider is consistently resolved across the application.
29 |
30 | **Configuration precedence:**
31 |
32 | The LLM provider is resolved in the following order of precedence:
33 |
34 | 1. `--llm` CLI option (`anthropic` or `gemini`)
35 | 2. `LLM_PROVIDER` environment variable (`anthropic` or `gemini`)
36 | 3. Default: `gemini`
37 |
38 | This allows users to override the default provider either through the command line or by setting an environment variable.
39 |
40 |
41 |
42 | The `resolve_provider` function determines which LLM provider to use. It prioritizes the provider specified via the `--llm` command-line option. If that option is not provided, it falls back to the `LLM_PROVIDER` environment variable. If neither is set, it defaults to `gemini`. This function ensures that the LLM provider is consistently resolved across the application.
43 |
44 | **Priority Order:**
45 | 1. `--llm` CLI option (`anthropic` or `gemini`)
46 | 2. `LLM_PROVIDER` environment variable (`anthropic` or `gemini`)
47 | 3. Default: `gemini`
48 |
49 | ## `get_model`
50 |
51 | The `get_model` function returns an instance of the appropriate LangChain chat model based on the resolved provider and an optional query. If a query is provided, the function uses the `IntelligentLLMRouter` to select the optimal model based on the query complexity and the specified priority. If no query is provided, the function returns a default model for the given provider.
52 |
53 | - **`anthropic`**: Returns a `ChatAnthropic` instance using the `claude-3-7-sonnet-2025-05-14` model as the default. This model is well-suited for tasks that require creative text generation and complex reasoning.
54 | - **`gemini`**: Returns a `ChatGoogleGenerativeAI` instance using the `gemini-2.0-flash` model as the default. This model is known for its strong performance on a wide range of tasks, including code generation and natural language understanding.
55 |
56 | All models are initialized with a `temperature` of `0.2` to encourage more deterministic and focused outputs suitable for coding tasks. A lower temperature value reduces the randomness of the model's output, making it more predictable and consistent. The function also utilizes a lightweight model cache to improve performance by reusing previously created model instances.
57 |
58 |
59 |
60 | The `get_model` function returns an instance of the appropriate LangChain chat model based on the resolved provider and an optional query. If a query is provided, the function uses the `IntelligentLLMRouter` to select the optimal model based on the query complexity and the specified priority. If no query is provided, the function returns a default model for the given provider.
61 |
62 | - **`anthropic`**: Returns a `ChatAnthropic` instance using the `claude-3-7-sonnet-2025-05-14` model as the default. This model is well-suited for tasks that require creative text generation and complex reasoning.
63 | - **`gemini`**: Returns a `ChatGoogleGenerativeAI` instance using the `gemini-2.0-flash` model as the default. This model is known for its strong performance on a wide range of tasks, including code generation and natural language understanding.
64 |
65 | All models are initialized with a `temperature` of `0.2` to encourage more deterministic and focused outputs suitable for coding tasks. A lower temperature value reduces the randomness of the model's output, making it more predictable and consistent. The function also utilizes a lightweight model cache to improve performance by reusing previously created model instances.
66 |
67 |
68 | This function returns an instance of the appropriate LangChain chat model based on the resolved provider and an optional query. If a query is provided, the function uses the `IntelligentLLMRouter` to select the optimal model based on the query complexity and the specified priority. If no query is provided, the function returns a default model for the given provider.
69 |
70 | - **`anthropic`**: Returns a `ChatAnthropic` instance using the `claude-3-7-sonnet-2025-05-14` model as the default.
71 | - **`gemini`**: Returns a `ChatGoogleGenerativeAI` instance using the `gemini-2.0-flash` model as the default.
72 |
73 | All models are initialized with a `temperature` of `0.2` to encourage more deterministic and focused outputs suitable for coding tasks. The function also utilizes a lightweight model cache to improve performance by reusing previously created model instances.
74 |
--------------------------------------------------------------------------------
/src/langchain_code/agent/react.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import logging
5 | import os
6 | from pathlib import Path
7 | from typing import Any, List, Optional
8 |
9 | from langchain.agents import create_agent
10 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
11 | from langchain_core.tools import BaseTool
12 | from langgraph.types import Checkpointer
13 | from ..config_core import get_model
14 | from ..mcp_loader import get_mcp_tools
15 | from ..static_values import RUNTIME_POLICY
16 | from ..tools.fs_local import (
17 | make_delete_file_tool,
18 | make_edit_by_diff_tool,
19 | make_list_dir_tool,
20 | make_read_file_tool,
21 | make_write_file_tool,
22 | )
23 | from ..tools.mermaid import make_mermaid_tools
24 | from ..tools.shell import make_run_cmd_tool, make_read_terminal_tool
25 | from ..tools.processor import make_process_multimodal_tool
26 | from ..tools.search import make_glob_tool, make_grep_tool
27 | from ..tools.script_exec import make_script_exec_tool
28 | from ..workflows.base_system import BASE_SYSTEM
29 |
30 | logger = logging.getLogger(__name__)
31 |
32 | try:
33 | from langchain_tavily import TavilySearch
34 | except Exception:
35 | TavilySearch = None
36 |
37 |
38 | def _maybe_make_tavily_tool() -> Optional[BaseTool]:
39 | if TavilySearch is None or not os.getenv("TAVILY_API_KEY"):
40 | return None
41 | try:
42 | return TavilySearch(
43 | max_results=5,
44 | topic="general",
45 | description=(
46 | "Use TavilySearch for internet/web search when you need up-to-date info. "
47 | "Best for research, current events, and general knowledge."
48 | ),
49 | )
50 | except Exception as e:
51 | logger.warning("Tavily disabled: %s", e)
52 | return None
53 |
54 |
55 | def load_langcode_context(project_dir: Path) -> str:
56 | ctx_file = project_dir / ".langcode" / "langcode.md"
57 | if ctx_file.exists():
58 | try:
59 | return "\n\n# Project Context\n" + ctx_file.read_text(encoding="utf-8")
60 | except Exception as e:
61 | return f"\n\n# Project Context\n(Error reading langcode.md: {e})"
62 | return ""
63 |
64 |
65 | def _escape_braces(text: str) -> str:
66 | return text.replace("{", "{{").replace("}", "}}")
67 |
68 |
69 | def build_prompt(instruction_seed: Optional[str], project_dir: Path) -> ChatPromptTemplate:
70 | system_extra = ("\n\n" + instruction_seed) if instruction_seed else ""
71 | project_context = load_langcode_context(project_dir)
72 | system_text = _escape_braces(BASE_SYSTEM + "\n\n" + RUNTIME_POLICY + system_extra + project_context)
73 | return ChatPromptTemplate.from_messages(
74 | [
75 | ("system", system_text),
76 | MessagesPlaceholder("chat_history"),
77 | ("human", "{input}"),
78 | MessagesPlaceholder("agent_scratchpad"),
79 | ]
80 | )
81 |
82 |
83 | def build_react_agent(
84 | provider: str,
85 | project_dir: Path,
86 | apply: bool = False,
87 | test_cmd: Optional[str] = None,
88 | instruction_seed: Optional[str] = None,
89 | *,
90 | llm: Optional[Any] = None,
91 | ):
92 | """Build a ReAct agent using LangChain 1.0 create_agent API.
93 |
94 | This creates a fast-loop agent for chat, reads, and targeted edits.
95 | Uses the new LangChain 1.0 create_agent which handles tool calling
96 | identically across all providers (Anthropic, Gemini, OpenAI, Ollama).
97 |
98 | Args:
99 | provider: LLM provider ("anthropic", "gemini", "openai", "ollama")
100 | project_dir: Root directory for filesystem operations
101 | apply: Whether to write changes to disk
102 | test_cmd: Optional test command to run
103 | instruction_seed: Optional system prompt customization
104 | llm: Optional pre-configured LLM instance
105 |
106 | Returns:
107 | Compiled agent runnable supporting .invoke(), .stream(), etc.
108 | """
109 | model = llm or get_model(provider)
110 | try:
111 | mcp_tools: List[BaseTool] = asyncio.run(get_mcp_tools(project_dir))
112 | except RuntimeError:
113 | loop = asyncio.get_event_loop()
114 | mcp_tools = loop.run_until_complete(get_mcp_tools(project_dir))
115 | except Exception as e:
116 | logger.warning("Failed to load MCP tools: %s", e)
117 | mcp_tools = []
118 |
119 | tool_list: List[BaseTool] = [
120 | make_glob_tool(str(project_dir)),
121 | make_grep_tool(str(project_dir)),
122 | make_list_dir_tool(str(project_dir)),
123 | make_read_file_tool(str(project_dir)),
124 | make_edit_by_diff_tool(str(project_dir), apply),
125 | make_write_file_tool(str(project_dir), apply),
126 | make_delete_file_tool(str(project_dir), apply),
127 | make_script_exec_tool(str(project_dir), apply, return_direct=False),
128 | make_process_multimodal_tool(str(project_dir), model),
129 | make_run_cmd_tool(str(project_dir), apply, test_cmd),
130 | make_read_terminal_tool()
131 | ]
132 |
133 | tool_list.extend(mcp_tools)
134 | t = _maybe_make_tavily_tool()
135 | if t:
136 | tool_list.append(t)
137 | tool_list.extend(make_mermaid_tools(str(project_dir)))
138 |
139 | system_prompt = build_prompt(instruction_seed, project_dir)
140 |
141 | # LangChain 1.0: create_agent is the new unified API
142 | # Returns a compiled runnable that works across all providers identically
143 | agent = create_agent(
144 | model,
145 | tools=tool_list,
146 | system_prompt=str(system_prompt)
147 | )
148 |
149 | return agent
150 |
151 |
152 | def build_deep_agent(
153 | provider: str,
154 | project_dir: Path,
155 | apply: bool = False,
156 | test_cmd: Optional[str] = None,
157 | instruction_seed: Optional[str] = None,
158 | subagents: Optional[list] = None,
159 | checkpointer: Optional[Checkpointer] = None,
160 | *,
161 | llm: Optional[Any] = None,
162 | ):
163 | from .deep_agents import create_deep_agent
164 | return create_deep_agent(
165 | provider=provider,
166 | project_dir=project_dir,
167 | instructions=instruction_seed,
168 | subagents=subagents or [],
169 | apply=apply,
170 | test_cmd=test_cmd,
171 | checkpointer=checkpointer,
172 | llm=llm,
173 | )
174 |
--------------------------------------------------------------------------------
/src/langchain_code/cli_components/runtime.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import ast
4 | import builtins
5 | import re
6 | from contextlib import nullcontext
7 | from pathlib import Path
8 | from typing import Any, List, Optional
9 |
10 | from langchain_core.callbacks import BaseCallbackHandler
11 | from rich import box
12 | from rich.panel import Panel
13 | from rich.text import Text
14 |
15 | from . import state
16 | from .todos import diff_todos, render_todos_panel, short
17 |
18 |
19 | class InputPatch:
20 | def __init__(self, title: str = "Consent"):
21 | self.title = title
22 | self._orig_input = None
23 |
24 | def __enter__(self):
25 | self._orig_input = builtins.input
26 |
27 | def _rich_input(prompt: str = "") -> str:
28 | live = state.current_live
29 | cm = live.pause() if getattr(live, "pause", None) else nullcontext()
30 | with cm:
31 | body = Text()
32 | msg = prompt.strip() or "Action requires your confirmation."
33 | body.append(msg + "\n\n")
34 | body.append("Type ", style="dim")
35 | body.append("Y", style="bold green")
36 | body.append("/", style="dim")
37 | body.append("N", style="bold red")
38 | body.append(" and press Enter.", style="dim")
39 |
40 | panel = Panel(
41 | body,
42 | title=self.title,
43 | border_style="yellow",
44 | box=box.ROUNDED,
45 | padding=(1, 2),
46 | )
47 | state.console.print(panel)
48 |
49 | answer = console.input("[bold yellow]›[/bold yellow] ").strip()
50 | return answer
51 |
52 | builtins.input = _rich_input
53 | return self
54 |
55 | def __exit__(self, exc_type, exc, tb):
56 | builtins.input = self._orig_input
57 | return False
58 |
59 |
60 | class TodoLive(BaseCallbackHandler):
61 | """Stream TODO updates when planner tools return Command(update={'todos': ...})."""
62 |
63 | def __init__(self):
64 | self.prev: List[dict] = []
65 |
66 | def on_tool_end(self, output, **kwargs):
67 | text = str(output)
68 | if "Command(update=" not in text or "'todos':" not in text:
69 | return
70 | match = re.search(r"Command\(update=(\{.*\})\)$", text, re.S) or re.search(r"update=(\{.*\})", text, re.S)
71 | if not match:
72 | return
73 | try:
74 | data = ast.literal_eval(match.group(1))
75 | todos = data.get("todos")
76 | if not isinstance(todos, list):
77 | return
78 | changes = diff_todos(self.prev, todos)
79 | self.prev = todos
80 | if todos:
81 | state.console.print(render_todos_panel(todos))
82 | if changes:
83 | state.console.print(Panel(Text("\n".join(changes)), title="Progress", border_style="yellow", box=box.ROUNDED, expand=True))
84 | except Exception:
85 | pass
86 |
87 |
88 | class RichDeepLogs(BaseCallbackHandler):
89 | """
90 | Minimal, pretty callback printer for deep (LangGraph) runs.
91 | Only logs the big milestones so it stays readable.
92 | Toggle by passing --verbose.
93 | """
94 |
95 | def on_chain_start(self, serialized, inputs, **kwargs):
96 | name = (serialized or {}).get("id") or (serialized or {}).get("name") or "chain"
97 | state.console.print(Panel.fit(Text.from_markup(f"▶ [bold]Start[/bold] {name}\n[dim]{short(str(inputs))}[/dim]"), border_style="cyan", title="Node", box=box.ROUNDED))
98 |
99 | def on_chain_end(self, outputs, **kwargs):
100 | state.console.print(Panel.fit(Text.from_markup(f"[bold]End[/bold]\n[dim]{short(str(outputs))}[/dim]"), border_style="cyan", title="Node", box=box.ROUNDED))
101 |
102 | def on_tool_start(self, serialized, tool_input, **kwargs):
103 | name = (serialized or {}).get("name") or "tool"
104 | state.console.print(Panel.fit(Text.from_markup(f"[bold]{name}[/bold]\n[dim]{short(str(tool_input))}[/dim]"), border_style="yellow", title="Tool", box=box.ROUNDED))
105 |
106 | def on_tool_end(self, output, **kwargs):
107 | state.console.print(Panel.fit(Text.from_markup(f" [bold]Tool result[/bold]\n{short(str(output))}"), border_style="yellow", title="Tool", box=box.ROUNDED))
108 |
109 | def on_llm_start(self, serialized, prompts, **kwargs):
110 | name = (serialized or {}).get("id") or (serialized or {}).get("name") or "llm"
111 | show = "\n---\n".join(short(p) for p in (prompts or [])[:1])
112 | state.console.print(Panel.fit(Text.from_markup(f"[bold]{name}[/bold]\n{show}"), border_style="green", title="LLM", box=box.ROUNDED))
113 |
114 | def on_llm_end(self, response, **kwargs):
115 | state.console.print(Panel.fit(Text("[dim]LLM complete[/dim]"), border_style="green", title="LLM", box=box.ROUNDED))
116 |
117 |
118 | def maybe_coerce_img_command(raw: str) -> str:
119 | text = raw.strip()
120 | if not text.startswith("/img"):
121 | return raw
122 | try:
123 | rest = text[len("/img"):].strip()
124 | if "::" in rest:
125 | paths_part, prompt_text = rest.split("::", 1)
126 | prompt_text = prompt_text.strip()
127 | else:
128 | paths_part, prompt_text = rest, ""
129 | paths = [p for p in paths_part.split() if p]
130 | return (
131 | f'Please call the tool "process_multimodal" with '
132 | f"image_paths={paths} and text={prompt_text!r}. After the tool returns, summarize the result."
133 | )
134 | except Exception:
135 | return raw
136 |
137 |
138 | def extract_last_content(messages: List) -> str:
139 | if not messages:
140 | return ""
141 | last = messages[-1]
142 | content = getattr(last, "content", None)
143 |
144 | if isinstance(content, str):
145 | return content.strip()
146 | if isinstance(content, list):
147 | parts = []
148 | for item in content:
149 | if isinstance(item, dict):
150 | if "text" in item and isinstance(item["text"], str):
151 | parts.append(item["text"])
152 | elif item.get("type") == "text" and isinstance(item.get("data") or item.get("content"), str):
153 | parts.append(item.get("data") or item.get("content"))
154 | elif isinstance(item, str):
155 | parts.append(item)
156 | return "\n".join(parts).strip()
157 |
158 | if isinstance(last, dict):
159 | content = last.get("content", "")
160 | if isinstance(content, str):
161 | return content.strip()
162 | if isinstance(content, list):
163 | return "\n".join(str(x) for x in content if isinstance(x, str)).strip()
164 |
165 | return (str(content) if content is not None else str(last)).strip()
166 |
167 |
168 | def thread_id_for(project_dir: Path, purpose: str = "chat") -> str:
169 | """Stable thread id per project & purpose for LangGraph checkpointer."""
170 | return f"{purpose}@{project_dir.resolve()}"
171 |
--------------------------------------------------------------------------------
/src/langchain_code/cli/entrypoint.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from pathlib import Path
4 | from typing import Any, Dict, Optional
5 |
6 | import typer
7 |
8 | from .commands.chat import chat
9 | from .commands.flows import feature, fix, analyze
10 | from .commands.system import wrap, shell, doctor
11 | from .commands.configure import env, edit_instructions
12 | from ..cli_components.app import app
13 | from ..cli_components.launcher import launcher_loop, default_state, list_ollama_models
14 | from ..cli_components.state import console, set_selection_hub_active
15 | from ..cli_components.env import bootstrap_env
16 |
17 |
18 | def _unwrap_exc(e: BaseException) -> BaseException:
19 | """Drill down through ExceptionGroup/TaskGroup, __cause__, and __context__ to the root error."""
20 | seen = set()
21 | while True:
22 | inner = getattr(e, "exceptions", None)
23 | if inner:
24 | e = inner[0]
25 | continue
26 | if getattr(e, "__cause__", None) and e.__cause__ not in seen:
27 | seen.add(e)
28 | e = e.__cause__
29 | continue
30 | if getattr(e, "__context__", None) and e.__context__ not in seen:
31 | seen.add(e)
32 | e = e.__context__
33 | continue
34 | return e
35 |
36 |
37 | def _friendly_agent_error(e: BaseException) -> str:
38 | root = _unwrap_exc(e)
39 | name = root.__class__.__name__
40 | msg = (str(root) or "").strip() or "(no details)"
41 | return "Sorry, a tool run failed. Please try again :)\n\n" f"| {name}: {msg}\n\n"
42 |
43 |
44 | def _dispatch_from_state(chosen: Dict[str, Any]) -> Dict[str, Optional[str]]:
45 | """Dispatch the chosen launcher state and report navigation plus status text."""
46 | if chosen.get("llm") == "ollama" and not list_ollama_models():
47 | return {"nav": "select", "info": "Cannot start: no Ollama models installed."}
48 |
49 | try:
50 | cmd = chosen["command"]
51 | if cmd == "chat":
52 | nav = chat(
53 | llm=chosen["llm"],
54 | project_dir=chosen["project_dir"],
55 | mode=chosen["engine"],
56 | auto=bool(chosen["autopilot"] and chosen["engine"] == "deep"),
57 | router=chosen["router"],
58 | priority=chosen["priority"],
59 | verbose=False,
60 | )
61 | return {"nav": nav, "info": None}
62 |
63 | if cmd == "feature":
64 | req = console.input("[bold]Feature request[/bold] (e.g. Add a dark mode toggle): ").strip()
65 | if not req:
66 | return {"nav": "select", "info": "Feature request aborted (empty input)."}
67 | feature(
68 | request=req,
69 | llm=chosen["llm"],
70 | project_dir=chosen["project_dir"],
71 | test_cmd=chosen["test_cmd"],
72 | apply=chosen["apply"],
73 | router=chosen["router"],
74 | priority=chosen["priority"],
75 | verbose=False,
76 | )
77 | return {"nav": "select", "info": "Feature workflow completed."}
78 |
79 | if cmd == "fix":
80 | req = console.input("[bold]Bug summary[/bold] (e.g. Fix crash on image upload) [Enter to skip]: ").strip() or None
81 | log_path = console.input("[bold]Path to error log[/bold] [Enter to skip]: ").strip()
82 | log = Path(log_path) if log_path else None
83 | fix(
84 | request=req,
85 | log=log if log and log.exists() else None,
86 | llm=chosen["llm"],
87 | project_dir=chosen["project_dir"],
88 | test_cmd=chosen["test_cmd"],
89 | apply=chosen["apply"],
90 | router=chosen["router"],
91 | priority=chosen["priority"],
92 | verbose=False,
93 | )
94 | return {"nav": "select", "info": "Fix workflow completed."}
95 |
96 | req = console.input("[bold]Analysis question[/bold] (e.g. What are the main components?): ").strip()
97 | if not req:
98 | return {"nav": "select", "info": "Analysis aborted (empty question)."}
99 | analyze(
100 | request=req,
101 | llm=chosen["llm"],
102 | project_dir=chosen["project_dir"],
103 | router=chosen["router"],
104 | priority=chosen["priority"],
105 | verbose=False,
106 | )
107 | return {"nav": "select", "info": "Analysis results provided."}
108 |
109 | except RuntimeError as exc:
110 | return {"nav": "select", "info": str(exc)}
111 | except Exception as exc:
112 | return {"nav": "select", "info": _friendly_agent_error(exc)}
113 |
114 |
115 | def selection_hub(initial_state: Optional[Dict[str, Any]] = None) -> None:
116 | """Persistent launcher loop so users can switch modes without restarting the CLI."""
117 | state = dict(initial_state or default_state())
118 |
119 | try:
120 | bootstrap_env(state["project_dir"], interactive_prompt_if_missing=True)
121 | except Exception:
122 | pass
123 |
124 | set_selection_hub_active(True)
125 | try:
126 | while True:
127 | chosen = launcher_loop(state)
128 | if not chosen:
129 | return
130 | state.update(chosen)
131 | result = _dispatch_from_state(chosen)
132 | nav = result.get("nav") if result else None
133 | info = result.get("info") if result else None
134 | if info:
135 | state["_status"] = info
136 | elif "_status" in state:
137 | state.pop("_status", None)
138 | if nav == "quit":
139 | console.print("\n[bold]Goodbye![/bold]")
140 | return
141 | if nav == "select":
142 | continue
143 | finally:
144 | set_selection_hub_active(False)
145 |
146 |
147 | # Typer command registration -------------------------------------------------
148 | app.command(help="Run a command inside a PTY and capture output to a session log (used by fix --from-tty).")(wrap)
149 | app.command(help="Open a logged subshell. Anything you run here is captured for fix --from-tty.")(shell)
150 | app.command(help="Run environment checks for providers, tools, and MCP.")(doctor)
151 | app.command(
152 | help="Open an interactive chat with the agent. Modes: react | deep (default: react). Use --auto in deep mode for full autopilot (plan+act with no questions)."
153 | )(chat)
154 | app.command(
155 | help="Implement a feature end-to-end (plan → search → edit → verify). Supports --apply and optional --test-cmd (e.g., 'pytest -q')."
156 | )(feature)
157 | app.command(help="Diagnose & fix a bug (trace → pinpoint → patch → test). Accepts --log, --test-cmd, and supports --apply.")(fix)
158 | app.command(help="Analyze any codebase and generate insights (deep agent).")(analyze)
159 | app.command(help="Edit environment. Use --global to edit your global env (~/.config/langcode/.env).")(env)
160 | app.command(name="instr", help="Open or create project-specific instructions (.langcode/langcode.md) in your editor.")(edit_instructions)
161 |
162 |
163 | @app.callback(invoke_without_command=True)
164 | def _default_entry(ctx: typer.Context):
165 | """Launch the selection hub when no explicit subcommand is provided."""
166 | if ctx.invoked_subcommand:
167 | return
168 | selection_hub()
169 |
170 |
171 | def main() -> None:
172 | app()
173 |
174 |
175 | __all__ = ["selection_hub", "main"]
176 |
--------------------------------------------------------------------------------
/src/langchain_code/tools/planner.py:
--------------------------------------------------------------------------------
1 | # src/langchain_code/tools/planner.py
2 | from __future__ import annotations
3 | from typing import Any, Annotated, Dict, List
4 | import re
5 |
6 | from langchain_core.tools import tool, InjectedToolCallId
7 | from langchain_core.messages import ToolMessage
8 | from langgraph.types import Command
9 | from langgraph.prebuilt import InjectedState
10 | from ..agent.state import DeepAgentState
11 |
12 | WRITE_TODOS_DESCRIPTION = """Create or update a structured todo list.
13 |
14 | Accepted input formats (normalized automatically):
15 | - String with bullet lines:
16 | "- Plan\n- Search\n- Edit\n- Verify"
17 | - List of strings:
18 | ["Plan the work", "Search repo", "Edit files", "Run tests"]
19 | - List of objects:
20 | [{"content":"Plan", "status":"pending"}, {"content":"Search", "status":"in_progress"}]
21 | - Object with 'items':
22 | {"items": ["Plan", {"content":"Search","status":"pending"}]}
23 |
24 | Also understands GitHub-style checkboxes:
25 | - [ ] pending
26 | - [x] completed
27 |
28 | Valid statuses: "pending", "in_progress", "completed".
29 | If omitted or invalid, defaults to "pending".
30 | Maintain at most ONE item with status "in_progress".
31 | """
32 |
33 | _ALLOWED = {"pending", "in_progress", "completed"}
34 | _ALIAS = {
35 | "in-progress": "in_progress",
36 | "progress": "in_progress",
37 | "doing": "in_progress",
38 | "todo": "pending",
39 | "tbd": "pending",
40 | "done": "completed",
41 | "complete": "completed",
42 | "finished": "completed",
43 | }
44 |
45 | def _coerce_status(s: str | None) -> str:
46 | if not s:
47 | return "pending"
48 | s = s.strip().lower()
49 | s = _ALIAS.get(s, s)
50 | return s if s in _ALLOWED else "pending"
51 |
52 | _checkbox_re = re.compile(r"^\s*(?:[-*+]|\d+[.)])?\s*(\[[ xX]\])?\s*(.+)$")
53 |
54 | def _normalize_one(item: Any) -> Dict[str, str] | None:
55 | if item is None:
56 | return None
57 |
58 | if isinstance(item, str):
59 | # Handle "- [x] Do thing" / "* [ ] Task" / "1) [x] Task"
60 | m = _checkbox_re.match(item.strip())
61 | if m:
62 | box, rest = m.groups()
63 | content = (rest or "").strip().strip("-|*").strip()
64 | if not content:
65 | return None
66 | status = "completed" if (box and box.lower() == "[x]") else "pending"
67 | return {"content": content, "status": status}
68 | content = item.strip().lstrip("-|* ").strip()
69 | if not content:
70 | return None
71 | return {"content": content, "status": "pending"}
72 |
73 | if isinstance(item, dict):
74 | content = (
75 | item.get("content")
76 | or item.get("task")
77 | or item.get("title")
78 | or ""
79 | )
80 | content = content.strip()
81 | if not content:
82 | return None
83 | status = _coerce_status(item.get("status"))
84 | return {"content": content, "status": status}
85 |
86 | # Fallback to string coercion
87 | return _normalize_one(str(item))
88 |
89 | def _normalize_list(raw: Any) -> List[Dict[str, str]]:
90 | out: List[Dict[str, str]] = []
91 | if isinstance(raw, str):
92 | lines = [ln for ln in raw.splitlines() if ln.strip()]
93 | for ln in lines:
94 | item = _normalize_one(ln)
95 | if item:
96 | out.append(item)
97 | elif isinstance(raw, list):
98 | for el in raw:
99 | item = _normalize_one(el)
100 | if item:
101 | out.append(item)
102 | elif isinstance(raw, dict) and "items" in raw:
103 | for el in raw["items"]:
104 | item = _normalize_one(el)
105 | if item:
106 | out.append(item)
107 | else:
108 | got = _normalize_one(raw)
109 | if got:
110 | out.append(got)
111 | # De-dup consecutive duplicates by content (compact)
112 | dedup: List[Dict[str, str]] = []
113 | last = None
114 | for it in out[:50]:
115 | if not last or it["content"] != last["content"]:
116 | dedup.append(it)
117 | last = it
118 | return dedup
119 |
120 | def _enforce_single_in_progress(items: List[Dict[str, str]], prefer_index: int | None = None) -> None:
121 | # Keep at most one 'in_progress'. If multiple, keep the preferred index and downgrade others to 'pending'.
122 | ip_indices = [i for i, it in enumerate(items) if it.get("status") == "in_progress"]
123 | if len(ip_indices) <= 1:
124 | return
125 | keep = prefer_index if prefer_index is not None else ip_indices[0]
126 | for i in ip_indices:
127 | if i != keep:
128 | items[i]["status"] = "pending"
129 |
130 | @tool(description=WRITE_TODOS_DESCRIPTION)
131 | def write_todos(
132 | todos: Any = None,
133 | items: Any = None,
134 | value: Any = None,
135 | state: Annotated[DeepAgentState, InjectedState] = None,
136 | tool_call_id: Annotated[str, InjectedToolCallId] = "",
137 | ) -> Command:
138 | raw = None
139 | for candidate in (todos, items, value):
140 | if candidate is not None:
141 | raw = candidate
142 | break
143 | if raw is None:
144 | raw = []
145 | normalized = _normalize_list(raw)
146 | _enforce_single_in_progress(normalized, prefer_index=None)
147 | return Command(update={
148 | "todos": normalized,
149 | "messages": [ToolMessage(f"Updated todos ({len(normalized)} items).", tool_call_id=tool_call_id)],
150 | })
151 |
152 | @tool(description="Append a new TODO (defaults to pending).")
153 | def append_todo(
154 | content: str,
155 | status: str | None = None,
156 | state: Annotated[DeepAgentState, InjectedState] = None,
157 | tool_call_id: Annotated[str, InjectedToolCallId] = "",
158 | ) -> Command:
159 | content = (content or "").strip()
160 | if not content:
161 | return Command() # no-op
162 | todos = state.get("todos", []) or []
163 | todos = list(todos)
164 | s = _coerce_status(status)
165 | todos.append({"content": content, "status": s})
166 | _enforce_single_in_progress(todos, prefer_index=len(todos) - 1 if s == "in_progress" else None)
167 | return Command(update={
168 | "todos": todos,
169 | "messages": [ToolMessage(f"Appended todo: {content}", tool_call_id=tool_call_id)],
170 | })
171 |
172 | @tool(description="Update TODO status by index (0-based). Status = pending | in_progress | completed. Keeps only one in_progress.")
173 | def update_todo_status(
174 | index: int,
175 | status: str,
176 | state: Annotated[DeepAgentState, InjectedState] = None,
177 | tool_call_id: Annotated[str, InjectedToolCallId] = "",
178 | ) -> Command:
179 | try:
180 | idx = int(index)
181 | except Exception:
182 | idx = 0
183 |
184 | todos = list(state.get("todos", []) or [])
185 | if 0 <= idx < len(todos):
186 | todos[idx] = {**todos[idx], "status": _coerce_status(status)}
187 | _enforce_single_in_progress(todos, prefer_index=idx if todos[idx]["status"] == "in_progress" else None)
188 | msg = f"Set todo[{idx+1}] to {todos[idx]['status']}"
189 | else:
190 | msg = f"Ignored update_todo_status: index {idx} out of range (n={len(todos)})"
191 | return Command(update={
192 | "todos": todos,
193 | "messages": [ToolMessage(msg, tool_call_id=tool_call_id)],
194 | })
195 |
196 | @tool(description="Clear all TODOs.")
197 | def clear_todos(
198 | state: Annotated[DeepAgentState, InjectedState] = None,
199 | tool_call_id: Annotated[str, InjectedToolCallId] = "",
200 | ) -> Command:
201 | return Command(update={
202 | "todos": [],
203 | "messages": [ToolMessage("Cleared todos.", tool_call_id=tool_call_id)],
204 | })
205 |
--------------------------------------------------------------------------------
/src/langchain_code/tools/search.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from pathlib import Path
3 | import fnmatch
4 | import os
5 | import re
6 | import time
7 | from typing import Iterable, List, Optional
8 | from langchain_core.tools import tool
9 |
10 | DEFAULT_EXCLUDE_DIRS = {
11 | ".git", ".hg", ".svn",
12 | ".venv", "venv", "env",
13 | "node_modules",
14 | "__pycache__", ".mypy_cache", ".pytest_cache", ".ruff_cache",
15 | ".tox", ".cache",
16 | "dist", "build", "target",
17 | ".idea", ".vscode", ".gradle",
18 | }
19 | DEFAULT_MAX_RESULTS = 500
20 | DEFAULT_MAX_MATCHES = 500
21 | DEFAULT_MAX_FILES_SCANNED = 5000
22 | DEFAULT_MAX_BYTES_PER_FILE = 2_000_000
23 | DEFAULT_TIME_BUDGET_SEC = 8.0
24 |
25 | def _iter_files(
26 | root: Path,
27 | exclude_dirs: set[str],
28 | max_files: int,
29 | time_budget_sec: float,
30 | ) -> Iterable[Path]:
31 | """Iteratively walk files under root:
32 | - prunes excluded directories
33 | - does NOT follow symlinked directories
34 | - stops on time/file caps
35 | """
36 | start = time.time()
37 | stack = [root]
38 | seen = 0
39 |
40 | while stack:
41 | d = stack.pop()
42 | try:
43 | with os.scandir(d) as it:
44 | for entry in it:
45 | if (time.time() - start) > max(0.1, time_budget_sec):
46 | return
47 | try:
48 | if entry.is_dir(follow_symlinks=False):
49 | name = entry.name
50 | if name in exclude_dirs:
51 | continue
52 | stack.append(Path(entry.path))
53 | elif entry.is_file(follow_symlinks=False):
54 | yield Path(entry.path)
55 | seen += 1
56 | if seen >= max(1, max_files):
57 | return
58 | except PermissionError:
59 | continue
60 | except (FileNotFoundError, PermissionError):
61 | continue
62 |
63 | def _relposix(root: Path, p: Path) -> str:
64 | try:
65 | return p.relative_to(root).as_posix()
66 | except Exception:
67 | return str(p.as_posix())
68 |
69 | def _compile_regex(pattern: str, ignore_case: bool) -> re.Pattern:
70 | flags = re.IGNORECASE if ignore_case else 0
71 | try:
72 | return re.compile(pattern, flags)
73 | except re.error:
74 | return re.compile(re.escape(pattern), flags)
75 |
76 | def make_glob_tool(project_dir: str):
77 | @tool("glob", return_direct=False)
78 | def glob(
79 | pattern: str,
80 | *,
81 | exclude_dirs: Optional[List[str]] = None,
82 | max_results: int = DEFAULT_MAX_RESULTS,
83 | max_files_scanned: int = DEFAULT_MAX_FILES_SCANNED,
84 | time_budget_sec: float = DEFAULT_TIME_BUDGET_SEC,
85 | ) -> str:
86 | """
87 | Find files in the project by glob pattern.
88 |
89 | Examples:
90 | - "**/*.py" -> all Python files
91 | - "src/**/*.json" -> JSON under src/
92 | - "*config*" -> any file with "config" in its name
93 |
94 | Optional:
95 | - exclude_dirs: extra folder names to skip (in addition to defaults)
96 | - max_results: cap returned paths
97 | - max_files_scanned: cap files walked before stopping
98 | - time_budget_sec: soft ceiling to prevent long scans
99 | """
100 | root = Path(project_dir).resolve()
101 | excludes = set(DEFAULT_EXCLUDE_DIRS)
102 | if exclude_dirs:
103 | excludes.update(exclude_dirs)
104 |
105 | results: List[str] = []
106 | scanned = 0
107 | timed_out = False
108 | start = time.time()
109 |
110 | for f in _iter_files(root, excludes, max_files_scanned, time_budget_sec):
111 | scanned += 1
112 | rel = _relposix(root, f)
113 | if fnmatch.fnmatch(rel, pattern):
114 | results.append(rel)
115 | if len(results) >= max_results:
116 | break
117 |
118 | if (time.time() - start) > time_budget_sec:
119 | timed_out = True
120 |
121 | if not results:
122 | return "(no matches)"
123 | out = "\n".join(results[:max_results])
124 | if timed_out or scanned >= max_files_scanned:
125 | out += f"\n[note] truncated: scannedΓëê{scanned}, returned={len(results)}"
126 | return out
127 | return glob
128 |
129 | def make_grep_tool(project_dir: str):
130 | @tool("grep", return_direct=False)
131 | def grep(
132 | pattern: str,
133 | path: str = ".",
134 | *,
135 | ignore_case: bool = False,
136 | exclude_dirs: Optional[List[str]] = None,
137 | max_matches: int = DEFAULT_MAX_MATCHES,
138 | max_files_scanned: int = DEFAULT_MAX_FILES_SCANNED,
139 | max_bytes_per_file: int = DEFAULT_MAX_BYTES_PER_FILE,
140 | time_budget_sec: float = DEFAULT_TIME_BUDGET_SEC,
141 | ) -> str:
142 | """
143 | Search for a regex (or literal if invalid) inside files under a directory.
144 |
145 | Returns lines in the form:
146 | ::
147 |
148 | Optional:
149 | - ignore_case: case-insensitive search
150 | - exclude_dirs: extra folder names to skip (defaults already skip .venv, node_modules, .git, etc.)
151 | - max_matches: cap on total matches returned
152 | - max_files_scanned: cap files walked before stopping
153 | - max_bytes_per_file: skip very large files quickly
154 | - time_budget_sec: soft ceiling to prevent long scans
155 | """
156 | root = Path(project_dir).resolve().joinpath(path).resolve()
157 | proj_root = Path(project_dir).resolve()
158 |
159 | if not root.exists():
160 | return f"{path} not found."
161 | if not str(root).startswith(str(proj_root)):
162 | return f"{path} escapes project root."
163 |
164 | excludes = set(DEFAULT_EXCLUDE_DIRS)
165 | if exclude_dirs:
166 | excludes.update(exclude_dirs)
167 |
168 | rx = _compile_regex(pattern, ignore_case)
169 |
170 | matches: List[str] = []
171 | scanned = 0
172 | timed_out = False
173 | start = time.time()
174 |
175 | for f in _iter_files(root, excludes, max_files_scanned, time_budget_sec):
176 | scanned += 1
177 |
178 | try:
179 | if f.stat().st_size > max_bytes_per_file:
180 | continue
181 | except Exception:
182 | continue
183 |
184 | rel = _relposix(proj_root, f)
185 | try:
186 | with f.open("r", encoding="utf-8", errors="ignore") as fh:
187 | for i, line in enumerate(fh, 1):
188 | # Time budget check within file too
189 | if (time.time() - start) > time_budget_sec:
190 | timed_out = True
191 | break
192 | if rx.search(line):
193 | matches.append(f"{rel}:{i}:{line.rstrip()}")
194 | if len(matches) >= max_matches:
195 | break
196 | if len(matches) >= max_matches or timed_out:
197 | break
198 | except Exception:
199 | continue
200 |
201 | if not matches:
202 | return "(no matches)"
203 | out = "\n".join(matches[:max_matches])
204 | if timed_out or scanned >= max_files_scanned:
205 | out += f"\n[note] truncated: scannedΓëê{scanned}, returned={len(matches)}"
206 | return out
207 | return grep
208 |
--------------------------------------------------------------------------------
/src/langchain_code/tools/processor.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | import mimetypes
3 | import re
4 | from pathlib import Path
5 | from typing import AsyncIterable, Iterable, List, Optional, Callable, Any, Dict
6 |
7 | import asyncio
8 | import threading
9 |
10 | from genai_processors import content_api, streams
11 | from genai_processors.contrib.langchain_model import LangChainModel
12 | from langchain_core.prompts import ChatPromptTemplate
13 | from langchain_core.language_models.chat_models import BaseChatModel
14 | from langchain_core.tools import tool
15 |
16 | def _img_part(path: Path) -> content_api.ProcessorPart:
17 | mt, _ = mimetypes.guess_type(path.name)
18 | if mt not in {"image/png", "image/jpeg", "image/gif"}:
19 | raise ValueError(f"Unsupported image format: {path}. Supported: PNG, JPEG, GIF")
20 | data = path.read_bytes()
21 | return content_api.ProcessorPart(data, mimetype=mt, role="user")
22 |
23 |
24 | def _text_part(text: str, role: str = "user") -> content_api.ProcessorPart:
25 | return content_api.ProcessorPart(text, mimetype="text/plain", role=role)
26 |
27 |
28 | def build_processor(
29 | model: BaseChatModel,
30 | system_instruction: Optional[str] = None,
31 | prompt_template: Optional[ChatPromptTemplate] = None,
32 | ) -> LangChainModel:
33 | si = _text_part(system_instruction, role="system") if system_instruction else None
34 | return LangChainModel(
35 | model=model,
36 | system_instruction=(si,) if si else (),
37 | prompt_template=prompt_template,
38 | )
39 |
40 |
41 | async def stream_processor(
42 | model: BaseChatModel,
43 | text: Optional[str] = None,
44 | images: Iterable[Path] = (),
45 | system_instruction: Optional[str] = None,
46 | prompt_template: Optional[ChatPromptTemplate] = None,
47 | ) -> AsyncIterable[str]:
48 | """Yield streamed text chunks from the LangChain model via GenAI Processors."""
49 | proc = build_processor(model, system_instruction=system_instruction, prompt_template=prompt_template)
50 | parts: List[content_api.ProcessorPart] = []
51 | if text:
52 | parts.append(_text_part(text))
53 | for p in images:
54 | parts.append(_img_part(Path(p)))
55 | input_stream = streams.stream_content(parts)
56 | async for out in proc(input_stream):
57 | if content_api.is_text(out.mimetype):
58 | yield out.text
59 |
60 |
61 | def _run_coro_sync(coro_func: Callable[[], Any]) -> Any:
62 | """Run a coroutine safely from sync code, even if an event loop is already running."""
63 | try:
64 | loop = asyncio.get_running_loop()
65 | except RuntimeError:
66 | loop = None
67 |
68 | if not loop or not loop.is_running():
69 | return asyncio.run(coro_func())
70 |
71 | result_box: Dict[str, Any] = {}
72 | error_box: Dict[str, BaseException] = {}
73 | done = threading.Event()
74 |
75 | def runner():
76 | new_loop = asyncio.new_event_loop()
77 | try:
78 | asyncio.set_event_loop(new_loop)
79 | result_box["value"] = new_loop.run_until_complete(coro_func())
80 | except BaseException as e:
81 | error_box["error"] = e
82 | finally:
83 | try:
84 | new_loop.close()
85 | finally:
86 | done.set()
87 |
88 | t = threading.Thread(target=runner, daemon=True)
89 | t.start()
90 | done.wait()
91 | if "error" in error_box:
92 | raise error_box["error"]
93 | return result_box.get("value")
94 |
95 |
96 | def run_stream_to_text(
97 | model: BaseChatModel,
98 | *,
99 | text: Optional[str],
100 | images: Iterable[Path] = (),
101 | system_instruction: Optional[str] = None,
102 | prompt_template: Optional[ChatPromptTemplate] = None,
103 | ) -> str:
104 | """Collect the async stream to a single string (for use as a LangChain tool return)."""
105 | async def _consume():
106 | chunks: List[str] = []
107 | async for piece in stream_processor(
108 | model,
109 | text=text,
110 | images=images,
111 | system_instruction=system_instruction,
112 | prompt_template=prompt_template,
113 | ):
114 | chunks.append(piece)
115 | return "".join(chunks)
116 |
117 | return _run_coro_sync(_consume)
118 |
119 |
120 | def _discover_images(
121 | root: Path,
122 | raw_hints: List[str],
123 | text: Optional[str],
124 | ) -> List[Path]:
125 | """
126 | Resolve/locate image files using:
127 | 1) Provided paths (absolute/relative) resolved against root.
128 | 2) If any do not exist, search by stem and common image extensions.
129 | 3) If no paths given, infer names from `text` (explicit filenames or 'the image').
130 | Returns a list of resolved Path objects (deduped), preferring the shallowest match.
131 | """
132 | exts = (".png", ".jpg", ".jpeg", ".gif")
133 | resolved: List[Path] = []
134 |
135 | def _resolve_one(raw: str) -> Optional[Path]:
136 |
137 | p = Path(raw)
138 | if not p.is_absolute():
139 | p = (root / p).resolve()
140 | if p.exists() and p.is_file():
141 | return p
142 |
143 | stem = Path(raw).stem or raw
144 | candidates: List[Path] = []
145 |
146 | name = Path(raw).name
147 | for found in root.rglob(name):
148 | if found.is_file():
149 | candidates.append(found)
150 |
151 | for ext in exts:
152 | for found in root.rglob(f"*{stem}*{ext}"):
153 | if found.is_file():
154 | candidates.append(found)
155 |
156 | if not candidates:
157 | return None
158 |
159 | uniq, seen = [], set()
160 | for c in candidates:
161 | s = str(c)
162 | if s not in seen:
163 | seen.add(s)
164 | uniq.append(c)
165 | uniq.sort(key=lambda x: (len(x.relative_to(root).parts), len(str(x))))
166 | return uniq[0]
167 |
168 | hints = list(raw_hints or [])
169 |
170 | if not hints and text:
171 | filenames = re.findall(r'([\w.\-]+?\.(?:png|jpe?g|gif))', text, flags=re.I)
172 | if filenames:
173 | hints.extend(filenames)
174 | else:
175 | stems = re.findall(r'(?:\bimage\s+|\bthe\s+)([\w.\-]+)', text, flags=re.I)
176 | hints.extend(stems)
177 |
178 | for raw in hints:
179 | p = _resolve_one(raw)
180 | if p:
181 | resolved.append(p)
182 |
183 | deduped, seen = [], set()
184 | for p in resolved:
185 | s = str(p)
186 | if s not in seen:
187 | seen.add(s)
188 | deduped.append(p)
189 | return deduped
190 |
191 |
192 | def make_process_multimodal_tool(project_dir: str, model: BaseChatModel):
193 | """
194 | Factory: returns a LangChain tool named `process_multimodal` that:
195 | - accepts `text` and optional `image_paths`
196 | - auto-discovers images by filename or stem anywhere under project_dir
197 | - resolves relative paths against project_dir
198 | - gracefully handles fabricated absolute paths (e.g., /tmp/...)
199 | """
200 | root = Path(project_dir).resolve()
201 |
202 | @tool("process_multimodal", return_direct=False)
203 | def process_multimodal(text: str, image_paths: List[str] = []) -> str:
204 | """
205 | Process text + optional images (PNG/JPEG/GIF) with the underlying LLM and
206 | return the streamed text as a single string.
207 |
208 | Smart path resolution:
209 | - Pass bare filenames (e.g., "deepgit.png") or just a stem (e.g., "deepgit").
210 | - If a provided path does not exist, we search the project directory recursively
211 | for a matching image by name/stem (.png/.jpg/.jpeg/.gif).
212 | - If `image_paths` is empty, we try to infer filenames from `text`.
213 | - Relative paths are resolved against the project root; fabricated /tmp paths are ignored.
214 | """
215 | resolved = _discover_images(root, image_paths or [], text)
216 | if not resolved:
217 | tried = image_paths or []
218 | return (
219 | "Image file(s) not found or discoverable. "
220 | f"Tried to resolve {tried or '[inferred from text]'} under {root}."
221 | )
222 | try:
223 | return run_stream_to_text(model, text=text, images=resolved)
224 | except ValueError as e:
225 | return f"Error processing multimodal input: {e}"
226 |
227 | return process_multimodal
228 |
--------------------------------------------------------------------------------
/src/langchain_code/tools/shell.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | import subprocess
3 | import os, platform, ctypes, struct
4 | from langchain_core.tools import tool
5 | from ..safety.confirm import confirm_action
6 |
7 | def make_run_cmd_tool(cwd: str, apply: bool, test_cmd: str | None):
8 | @tool("run_cmd", return_direct=False)
9 | def run_cmd(command: str, timeout_sec: int = 120) -> str:
10 | """
11 | Run a shell command in the project directory (`cwd`).
12 |
13 | Use this tool for tasks such as:
14 | - Listing files or directories (`ls -la`, `dir`)
15 | - Finding a file (`find . -name "config.py"`)
16 | - Searching file contents (`grep "Router" -r src/`)
17 | - Running project commands (`pytest`, `make build`)
18 |
19 | Notes:
20 | - Pass a single command string (chain with `&&` if needed).
21 | - `{TEST_CMD}` will be replaced with the configured test command if used.
22 | - Timeout defaults to 120s; increase for long tests.
23 | """
24 | cmd = command.strip()
25 | if cmd == "{TEST_CMD}" and test_cmd:
26 | cmd = test_cmd
27 |
28 | if not confirm_action(f"Run command: `{cmd}` ?", apply):
29 | return f"Command cancelled: {cmd}"
30 |
31 | try:
32 | result = subprocess.run(
33 | cmd,
34 | cwd=cwd,
35 | shell=True,
36 | capture_output=True,
37 | text=True,
38 | encoding="utf-8",
39 | errors="replace",
40 | timeout=max(5, int(timeout_sec)),
41 | )
42 | stdout = result.stdout.strip()
43 | stderr = result.stderr.strip()
44 | code = result.returncode
45 | out = f"$ {cmd}\n(exit {code})\n"
46 | if stdout:
47 | out += f"\n[stdout]\n{stdout}\n"
48 | if stderr:
49 | out += f"\n[stderr]\n{stderr}\n"
50 | return out
51 | except subprocess.TimeoutExpired:
52 | return f"$ {cmd}\n(timeout after {timeout_sec}s)\n"
53 | except Exception as e:
54 | return f"Error running `{cmd}`: {e}"
55 | return run_cmd
56 |
57 | def make_read_terminal_tool():
58 | def _read_terminal() -> str:
59 | """
60 | Always call this tool immediately if the user says something vague like
61 | "what's this", "what's this error", "fix this", or refers to 'this' without details.
62 | This captures the current visible terminal contents so you can answer correctly.
63 | Never ask the user to paste errors — rely on this tool instead.
64 | """
65 | system = platform.system()
66 |
67 | if system == "Windows":
68 | from ctypes import wintypes
69 |
70 | kernel32 = ctypes.windll.kernel32
71 | h = kernel32.GetStdHandle(-11)
72 |
73 | csbi = ctypes.create_string_buffer(22)
74 | res = kernel32.GetConsoleScreenBufferInfo(h, csbi)
75 | if not res:
76 | return ""
77 |
78 | (bufx, bufy, curx, cury, wattr,
79 | left, top, right, bottom,
80 | maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
81 |
82 | width = right - left + 1
83 | height = bottom - top + 1
84 | size = width * height
85 |
86 | chars = ctypes.create_unicode_buffer(size)
87 | read = ctypes.c_int(0)
88 | coord = wintypes._COORD(0, 0)
89 |
90 | kernel32.ReadConsoleOutputCharacterW(
91 | h, chars, size, coord, ctypes.byref(read)
92 | )
93 |
94 | return chars.value.strip()
95 |
96 | else:
97 | try:
98 | import re, shutil
99 |
100 | def _clean(s: str) -> str:
101 | if not s:
102 | return s
103 | ansi = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]')
104 | s = ansi.sub('', s).replace('\r', '')
105 | return s.strip()
106 |
107 | if os.environ.get("TMUX") and shutil.which("tmux"):
108 | r = subprocess.run(
109 | ["tmux", "capture-pane", "-p"],
110 | capture_output=True,
111 | text=True,
112 | encoding="utf-8",
113 | errors="replace",
114 | timeout=3,
115 | )
116 | out = _clean(r.stdout)
117 | if out:
118 | return out
119 |
120 | if (os.environ.get("STY") or os.environ.get("SCREEN") or os.path.exists("/var/run/screen")) and shutil.which("screen"):
121 | tmp = "/tmp/screen_hardcopy.txt"
122 | subprocess.run(["screen", "-X", "hardcopy", tmp], timeout=3)
123 | try:
124 | with open(tmp, "r", encoding="utf-8", errors="replace") as f:
125 | out = _clean(f.read())
126 | os.remove(tmp)
127 | if out:
128 | return out
129 | except Exception:
130 | pass
131 |
132 | history_chunks = []
133 |
134 | bash_hist = os.path.expanduser("~/.bash_history")
135 | if os.path.exists(bash_hist):
136 | try:
137 | with open(bash_hist, "r", encoding="utf-8", errors="replace") as f:
138 | lines = f.readlines()
139 | if lines:
140 | history_chunks.append("".join(lines[-50:]))
141 | except Exception:
142 | pass
143 |
144 | zsh_hist = os.path.expanduser("~/.zsh_history")
145 | if os.path.exists(zsh_hist):
146 | try:
147 | with open(zsh_hist, "r", encoding="utf-8", errors="replace") as f:
148 | lines = f.readlines()
149 | if lines:
150 | cleaned = [ln.split(";", 1)[-1] for ln in lines[-100:]]
151 | history_chunks.append("".join(cleaned[-50:]))
152 | except Exception:
153 | pass
154 |
155 | fish_hist = os.path.expanduser("~/.local/share/fish/fish_history")
156 | if not os.path.exists(fish_hist):
157 | fish_hist = os.path.expanduser("~/.config/fish/fish_history")
158 | if os.path.exists(fish_hist):
159 | try:
160 | with open(fish_hist, "r", encoding="utf-8", errors="replace") as f:
161 | lines = f.readlines()
162 | cmds = [ln.strip()[6:] for ln in lines if ln.strip().startswith("- cmd: ")]
163 | if cmds:
164 | history_chunks.append("\n".join(cmds[-50:]))
165 | except Exception:
166 | pass
167 |
168 | if history_chunks:
169 | combined = _clean("\n".join(history_chunks).strip())
170 | if combined:
171 | return "(terminal buffer capture not supported; recent history)\n" + combined
172 |
173 | if shutil.which("bash"):
174 | r = subprocess.run(
175 | ["bash", "-ic", "history -r; history 50"],
176 | capture_output=True,
177 | text=True,
178 | encoding="utf-8",
179 | errors="replace",
180 | timeout=3,
181 | )
182 | out = _clean(r.stdout)
183 | if out:
184 | return out
185 |
186 | if shutil.which("zsh"):
187 | r = subprocess.run(
188 | ["zsh", "-ic", "fc -l -n -50"],
189 | capture_output=True,
190 | text=True,
191 | encoding="utf-8",
192 | errors="replace",
193 | timeout=3,
194 | )
195 | out = _clean(r.stdout)
196 | if out:
197 | return out
198 |
199 | if shutil.which("fish"):
200 | r = subprocess.run(
201 | ["fish", "-ic", "history | tail -n 50"],
202 | capture_output=True,
203 | text=True,
204 | encoding="utf-8",
205 | errors="replace",
206 | timeout=3,
207 | )
208 | out = _clean(r.stdout)
209 | if out:
210 | return out
211 |
212 | return "(no recent history)"
213 | except Exception:
214 | return "(not supported on this system)"
215 |
216 |
217 | @tool("read_terminal", return_direct=False)
218 | def read_terminal() -> str:
219 | """
220 | Capture the current visible contents of the terminal.
221 |
222 | Use this tool when:
223 | - You need to know what text is currently displayed in the terminal window.
224 | - You want to confirm whether the screen is empty (after `clear`/`cls`).
225 | - You want to inspect recent command outputs still visible.
226 |
227 | Notes:
228 | - This does NOT fetch command history, only what is visible right now.
229 | - On Windows, it uses the console screen buffer API.
230 | - On Linux/macOS, output may be limited depending on terminal capabilities.
231 | """
232 | return _read_terminal()
233 |
234 | return read_terminal
--------------------------------------------------------------------------------
/src/langchain_code/tools/script_exec.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from pathlib import Path
3 | from typing import List, Optional, Literal
4 | import os, sys, time, shutil, subprocess, re, datetime as _dt
5 |
6 | from langchain_core.tools import tool
7 | from pydantic import BaseModel, Field
8 | _INTERPS = {
9 | "python": lambda: [(sys.executable or "python"), "-u"],
10 | "bash": lambda: [shutil.which("bash") or "bash"],
11 | "sh": lambda: [shutil.which("sh") or "sh"],
12 | "powershell": lambda: ["powershell" if os.name == "nt" else "pwsh", "-NoProfile", "-ExecutionPolicy", "Bypass"],
13 | "node": lambda: [shutil.which("node") or "node"],
14 | "cmd": lambda: ["cmd", "/c"], # Windows only
15 | }
16 | _EXT = {"python": "py", "bash": "sh", "sh": "sh", "powershell": "ps1", "node": "js", "cmd": "cmd"}
17 |
18 | _DENY = (
19 | "rm -rf /", "mkfs", "format ", "shutdown", "reboot", ":(){ :|:& };:",
20 | )
21 |
22 | def _rooted(project_dir: str, path: str) -> Path:
23 | p = (Path(project_dir) / (path or "")).resolve()
24 | root = Path(project_dir).resolve()
25 | if not str(p).startswith(str(root)):
26 | raise ValueError("Path escapes project root")
27 | return p
28 |
29 | def _looks_read_only(lang: str, code: str) -> bool:
30 | s = code.lower()
31 |
32 | if lang in {"bash", "sh", "cmd"}:
33 | patterns = [
34 | r"\brm\b", r"\bmv\b", r"\bcp\b", r"\bchmod\b", r"\bchown\b",
35 | r"\bmkdir\b", r"\brmdir\b", r">\s*\S", r">>\s*\S", r"\btruncate\b",
36 | r"\btouch\b", r"\btee\b", r"\bsed\b.*\s-i(\s|$)", r"\bgit\s+(clean|reset|checkout\s+--)"
37 | ]
38 | return not any(re.search(p, s) for p in patterns)
39 |
40 | if lang == "python":
41 | if re.search(r"\bopen\s*\([^)]*[\"'](?:w|a|\+)[\"']", s): # write modes
42 | return False
43 | if re.search(r"\b(os\.remove|os\.unlink|os\.rmdir|os\.rename|os\.chmod|os\.chown|os\.mkdir|os\.makedirs)\b", s):
44 | return False
45 | if re.search(r"\b(shutil\.(copy|copy2|copyfile|copytree|move|rmtree|make_archive))\b", s):
46 | return False
47 | return True
48 |
49 | if lang == "powershell":
50 | patterns = [r"\bremove-item\b", r"\bmove-item\b", r"\bcopy-item\b", r"\bnew-item\b", r"\bset-item\b", r">\s*\S", r">>\s*\S"]
51 | return not any(re.search(p, s) for p in patterns)
52 |
53 | if lang == "node":
54 | if re.search(r"\bfs\.(write|append|rename|chmod|chown|rm|rmdir|mkdir)\b", s):
55 | return False
56 | return True
57 |
58 | return True
59 |
60 | def _janitor(tmp_dir: Path, hours: int = 2) -> None:
61 | """Best-effort cleanup of old temp scripts."""
62 | try:
63 | if not tmp_dir.exists():
64 | return
65 | cutoff = _dt.datetime.utcnow().timestamp() - hours * 3600
66 | for p in tmp_dir.glob("snippet_*.*"):
67 | try:
68 | if p.stat().st_mtime < cutoff:
69 | p.unlink(missing_ok=True)
70 | except Exception:
71 | pass
72 | except Exception:
73 | pass
74 |
75 | class ScriptExecArgs(BaseModel):
76 | language: Literal["python", "bash", "sh", "powershell", "node", "cmd"] = Field(..., description="Interpreter.")
77 | code: str = Field(..., description="Short, self-contained script body.")
78 | argv: List[str] = Field(default_factory=list, description="Command-line arguments.")
79 | stdin: str = Field(default="", description="Optional standard input.")
80 | timeout_sec: int = Field(default=120, ge=5, le=900, description="Hard time limit (seconds).")
81 | save_as: Optional[str] = Field(default=None, description="Relative path to persist the script.")
82 | persist: bool = Field(default=False, description="Keep the script even if save_as is not provided.")
83 | report: Literal["stdout", "full", "auto"] = Field(default="stdout", description="Output format.")
84 | safety: Literal["auto", "require", "force"] = Field(
85 | default="auto",
86 | description="auto: allow read-only without apply; require: needs apply=True; force: skip consent gate."
87 | )
88 |
89 | # ---------------- tool factory ----------------
90 | def make_script_exec_tool(project_dir: str, apply: bool, *, return_direct: bool = False):
91 | tmp_dir = _rooted(project_dir, ".langcode/tmp_scripts")
92 | _janitor(tmp_dir) # opportunistic cleanup
93 |
94 | @tool("script_exec", args_schema=ScriptExecArgs, return_direct=return_direct)
95 | def script_exec(**kwargs) -> str:
96 | """
97 | Run a short script in the project workspace.
98 | - No prompts: read-only scripts run when apply=False (safety='auto').
99 | - Mutating scripts require apply=True unless safety='force'.
100 | - Temp scripts are deleted unless you set save_as or persist=True.
101 | """
102 | args = ScriptExecArgs(**kwargs) # validate + fill defaults
103 |
104 | # quick safety checks
105 | if args.language not in _INTERPS:
106 | return f"Unsupported language: {args.language}. Allowed: {', '.join(sorted(_INTERPS.keys()))}"
107 | for bad in _DENY:
108 | if bad in args.code:
109 | return f"Blocked for safety; found '{bad}'."
110 |
111 | lang = args.language
112 | base_cmd = _INTERPS[lang]()
113 | if any(x is None for x in base_cmd):
114 | return f"Interpreter for {lang} not found on PATH."
115 |
116 | read_only = _looks_read_only(lang, args.code)
117 | if args.safety == "require" and not apply:
118 | return "Execution requires apply=True (explicit consent). Re-run with --apply."
119 | if args.safety == "auto" and not apply and not read_only:
120 | return ("Declined without apply: script appears to modify files. "
121 | "Re-run with --apply, or set safety='force' if you intend to run it.")
122 |
123 | # command building (prefer inline -c for small python)
124 | cmd: List[str]
125 | script_path: Optional[Path] = None
126 | use_inline = (lang == "python" and not args.save_as and not args.persist and len(args.code) < 8000)
127 | if use_inline:
128 | cmd = base_cmd + ["-c", args.code]
129 | else:
130 | ext = _EXT.get(lang, "txt")
131 | filename = args.save_as or f".langcode/tmp_scripts/snippet_{int(time.time()*1000)}.{ext}"
132 | script_path = _rooted(project_dir, filename)
133 | script_path.parent.mkdir(parents=True, exist_ok=True)
134 | try:
135 | body = args.code
136 | if lang in {"bash", "sh"} and not body.lstrip().startswith("#!"):
137 | body = "#!/usr/bin/env bash\nset -euo pipefail\n" + body
138 | script_path.write_text(body, encoding="utf-8")
139 | if lang in {"bash", "sh"}:
140 | try:
141 | os.chmod(script_path, 0o755)
142 | except Exception:
143 | pass
144 | except Exception as e:
145 | return f"Failed to write script: {type(e).__name__}: {e}"
146 |
147 | if lang == "cmd":
148 | if os.name != "nt":
149 | try: script_path.unlink(missing_ok=True)
150 | except Exception: pass
151 | return "cmd is Windows-only."
152 | if lang == "powershell":
153 | cmd = base_cmd + ["-File", str(script_path)]
154 | else:
155 | cmd = base_cmd + [str(script_path)]
156 |
157 | if args.argv:
158 | cmd += [str(a) for a in args.argv]
159 |
160 | # hide console on Windows
161 | popen_kwargs = {}
162 | if os.name == "nt":
163 | try:
164 | popen_kwargs["creationflags"] = subprocess.CREATE_NO_WINDOW # type: ignore[attr-defined]
165 | except Exception:
166 | pass
167 |
168 | try:
169 | proc = subprocess.run(
170 | cmd,
171 | cwd=project_dir,
172 | input=args.stdin,
173 | text=True,
174 | capture_output=True,
175 | timeout=max(5, int(args.timeout_sec)),
176 | shell=False,
177 | encoding="utf-8",
178 | errors="replace",
179 | **popen_kwargs,
180 | )
181 | out = (proc.stdout or "").replace("\r\n", "\n").strip()
182 | err = (proc.stderr or "").replace("\r\n", "\n").strip()
183 |
184 | def _clip(s: str, n: int = 24000) -> str:
185 | return s if len(s) <= n else s[:n] + "\n...[truncated]..."
186 |
187 | if args.report == "stdout":
188 | return out or f"(no stdout)\n(exit {proc.returncode})"
189 | if args.report == "auto":
190 | if out and not err and len(out) <= 2000:
191 | return out
192 | return f"$ {' '.join(cmd)}\n(exit {proc.returncode})\n[stdout]\n{_clip(out)}\n[stderr]\n{_clip(err)}"
193 |
194 | # full
195 | src = (script_path and script_path.relative_to(Path(project_dir))) or ""
196 | lines = [f"$ {' '.join(cmd)}", f"(exit {proc.returncode})", f"[script] {src}"]
197 | if out: lines += ["", "[stdout]", _clip(out)]
198 | if err: lines += ["", "[stderr]", _clip(err)]
199 | return "\n".join(lines)
200 |
201 | except subprocess.TimeoutExpired:
202 | return f"Timed out after {args.timeout_sec}s running: {' '.join(cmd)}"
203 | except Exception as e:
204 | return f"Error executing script: {type(e).__name__}: {e}"
205 | finally:
206 | if script_path and not (args.persist or args.save_as):
207 | try:
208 | script_path.unlink(missing_ok=True)
209 | except Exception:
210 | pass
211 |
212 | return script_exec
--------------------------------------------------------------------------------
/src/langchain_code/mcp_loader.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import json
5 | import logging
6 | import os
7 | import threading
8 | from importlib.resources import files as pkg_files
9 | from pathlib import Path
10 | from typing import Any, Dict, Iterable, List, Optional, Union
11 |
12 | from langchain_core.tools import BaseTool
13 | from langchain_mcp_adapters.client import MultiServerMCPClient
14 |
15 | try:
16 | # LC >= 0.2
17 | from langchain_core.tools.structured import StructuredTool # type: ignore
18 | except Exception: # pragma: no cover
19 | # LC < 0.2 fallback
20 | from langchain.tools import StructuredTool # type: ignore
21 |
22 | # ---------- dotenv (optional) ----------
23 | try:
24 | from dotenv import load_dotenv, find_dotenv
25 |
26 | def _preload_dotenv() -> None:
27 | # find .env in or above CWD (typical CLI usage) without overriding already-set env
28 | path = find_dotenv(usecwd=True)
29 | if path:
30 | load_dotenv(path, override=False)
31 | except Exception: # pragma: no cover
32 | def _preload_dotenv() -> None:
33 | pass
34 |
35 |
36 | logger = logging.getLogger("langcode.mcp")
37 | logger.addHandler(logging.NullHandler())
38 | _VERBOSE = os.getenv("LANGCODE_MCP_VERBOSE", "").lower() in {"1", "true", "yes"}
39 | if _VERBOSE:
40 | logging.basicConfig(level=logging.DEBUG)
41 | logger.setLevel(logging.DEBUG)
42 |
43 | # ---------- default on-disk locations ----------
44 | _DEFAULT_LOCATIONS: List[Path] = [
45 | Path.cwd() / ".langcode" / "mcp.json",
46 | Path.cwd() / "mcp.json",
47 | Path.home() / ".langcode" / "mcp.json",
48 | Path.home() / ".config" / "langcode" / "mcp.json",
49 | ]
50 |
51 | # ---------- packaged fallback (inside the wheel) ----------
52 | def _load_packaged_cfg() -> Optional[Dict[str, Any]]:
53 | """Load mcp.json packaged at langchain_code/config/mcp.json (wheel-safe)."""
54 | try:
55 | res = pkg_files("langchain_code.config") / "mcp.json"
56 | if res and res.is_file():
57 | data = json.loads(res.read_text(encoding="utf-8"))
58 | if isinstance(data, dict) and isinstance(data.get("servers"), dict):
59 | logger.debug("MCP: loaded packaged config langchain_code/config/mcp.json")
60 | return data
61 | except Exception as e: # pragma: no cover
62 | logger.debug("MCP: failed to load packaged config: %s", e)
63 | return None
64 |
65 | def _env_overrides() -> List[Path]:
66 | """Allow users to point to custom config via env."""
67 | paths: List[Path] = []
68 | j = os.getenv("LANGCODE_MCP_JSON", "").strip()
69 | if j:
70 | for chunk in j.split(os.pathsep):
71 | p = Path(chunk).expanduser()
72 | if p.is_file():
73 | paths.append(p)
74 | d = os.getenv("LANGCODE_MCP_DIR", "").strip()
75 | if d:
76 | p = Path(d).expanduser() / "mcp.json"
77 | if p.is_file():
78 | paths.append(p)
79 | return paths
80 |
81 | def _project_locations(project_dir: Optional[Path]) -> List[Path]:
82 | if not project_dir:
83 | return []
84 | project_dir = project_dir.resolve()
85 | return [
86 | project_dir / ".langcode" / "mcp.json",
87 | project_dir / "mcp.json",
88 | ]
89 |
90 | def _expand_env_placeholders(cfg: Dict[str, Any]) -> Dict[str, Any]:
91 | """Expand $VARS in server env blocks using process env (after .env preload)."""
92 | servers = cfg.get("servers", {}) or {}
93 | for _, server in servers.items():
94 | env_map = server.get("env", {}) or {}
95 | if not isinstance(env_map, dict):
96 | continue
97 | expanded = {}
98 | for k, v in env_map.items():
99 | expanded[k] = os.path.expandvars(os.path.expanduser(v)) if isinstance(v, str) else v
100 | server["env"] = expanded
101 | return cfg
102 |
103 | def _normalize_commands_for_windows(cfg: Dict[str, Any]) -> Dict[str, Any]:
104 | """On Windows, prefer npx.cmd for CreateProcess compatibility."""
105 | if os.name != "nt":
106 | return cfg
107 | servers = cfg.get("servers", {}) or {}
108 | for name, server in servers.items():
109 | cmd = server.get("command")
110 | if isinstance(cmd, str) and cmd.lower() == "npx":
111 | server["command"] = "npx.cmd"
112 | logger.debug("MCP: normalized server '%s' command to npx.cmd for Windows", name)
113 | return cfg
114 |
115 | def _read_json_file(pathlike: Union[Path, Any]) -> Optional[Dict[str, Any]]:
116 | try:
117 | if isinstance(pathlike, Path):
118 | if not pathlike.exists() or not pathlike.is_file():
119 | return None
120 | return json.loads(pathlike.read_text(encoding="utf-8"))
121 | if hasattr(pathlike, "read_text"):
122 | return json.loads(pathlike.read_text(encoding="utf-8"))
123 | except Exception as e: # pragma: no cover
124 | logger.debug("MCP: failed to read JSON from %s: %s", pathlike, e)
125 | return None
126 |
127 | def _merge_server_cfgs_dicts(dicts: Iterable[Dict[str, Any]]) -> Dict[str, Any]:
128 | cfg: Dict[str, Any] = {"servers": {}}
129 | for data in dicts:
130 | if not data:
131 | continue
132 | servers = data.get("servers", {})
133 | if isinstance(servers, dict):
134 | cfg["servers"].update(servers)
135 | cfg = _expand_env_placeholders(cfg)
136 | cfg = _normalize_commands_for_windows(cfg)
137 | return cfg
138 |
139 | def _ensure_sync_invocation(tool: BaseTool) -> BaseTool:
140 | """Make async StructuredTool invokable in sync LC agents."""
141 | try:
142 | is_structured = isinstance(tool, StructuredTool) # type: ignore[arg-type]
143 | except Exception:
144 | is_structured = False
145 |
146 | if is_structured and getattr(tool, "coroutine", None) and getattr(tool, "func", None) is None:
147 | async_coro = tool.coroutine
148 |
149 | def _sync_func(*args, **kwargs):
150 | try:
151 | loop = asyncio.get_running_loop()
152 | except RuntimeError:
153 | loop = None
154 | if not loop or not loop.is_running():
155 | return asyncio.run(async_coro(*args, **kwargs))
156 |
157 | result_holder: Dict[str, Any] = {}
158 | error_holder: Dict[str, BaseException] = {}
159 | done = threading.Event()
160 |
161 | def _runner():
162 | new_loop = asyncio.new_event_loop()
163 | try:
164 | asyncio.set_event_loop(new_loop)
165 | result_holder["value"] = new_loop.run_until_complete(async_coro(*args, **kwargs))
166 | except BaseException as e: # pragma: no cover
167 | error_holder["error"] = e
168 | finally:
169 | try:
170 | new_loop.close()
171 | finally:
172 | done.set()
173 |
174 | t = threading.Thread(target=_runner, daemon=True)
175 | t.start()
176 | done.wait()
177 | if "error" in error_holder:
178 | raise error_holder["error"]
179 | return result_holder.get("value")
180 |
181 | tool.func = _sync_func # type: ignore[attr-defined]
182 | return tool
183 |
184 | def _sanitize_tool_schema(tool: BaseTool) -> BaseTool:
185 | s = getattr(tool, "args_schema", None)
186 | if s is None:
187 | return tool
188 |
189 | def _strip(obj: Any) -> Any:
190 | if isinstance(obj, dict):
191 | obj.pop("$schema", None)
192 | obj.pop("additionalProperties", None)
193 | for k, v in list(obj.items()):
194 | obj[k] = _strip(v)
195 | elif isinstance(obj, list):
196 | return [_strip(x) for x in obj]
197 | return obj
198 |
199 | try:
200 | if isinstance(s, dict):
201 | tool.args_schema = _strip(s) # type: ignore[assignment]
202 | except Exception as e: # pragma: no cover
203 | logger.debug("MCP: sanitize schema failed for %s: %s", getattr(tool, "name", ""), e)
204 | return tool
205 |
206 | async def get_mcp_tools(project_dir: Optional[Path] = None) -> List[BaseTool]:
207 | """
208 | Search order:
209 | 1) project_dir/.langcode/mcp.json, project_dir/mcp.json (if provided)
210 | 2) LANGCODE_MCP_JSON paths, LANGCODE_MCP_DIR/mcp.json
211 | 3) CWD/HOME defaults: ./.langcode/mcp.json, ./mcp.json, ~/.langcode/mcp.json, ~/.config/langcode/mcp.json
212 | 4) Packaged fallback: langchain_code/config/mcp.json (inside the wheel)
213 | """
214 | _preload_dotenv()
215 |
216 | if not _VERBOSE:
217 | logging.getLogger("langchain_mcp_adapters").setLevel(logging.WARNING)
218 | logging.getLogger("langchain").setLevel(logging.WARNING)
219 |
220 | dicts: List[Dict[str, Any]] = []
221 |
222 | # 1–3: files on disk
223 | disk_locations = [
224 | *_project_locations(project_dir),
225 | *_env_overrides(),
226 | *_DEFAULT_LOCATIONS,
227 | ]
228 | looked: List[str] = []
229 | for loc in disk_locations:
230 | looked.append(str(loc))
231 | d = _read_json_file(loc)
232 | if d:
233 | dicts.append(d)
234 |
235 | # 4: packaged fallback
236 | packaged = _load_packaged_cfg()
237 | if packaged:
238 | dicts.append(packaged)
239 | looked.append("pkg:langchain_code/config/mcp.json")
240 |
241 | cfg = _merge_server_cfgs_dicts(dicts)
242 | if not cfg.get("servers"):
243 | logger.debug("MCP: no servers discovered. Looked in: %s", " | ".join(looked))
244 | return []
245 |
246 | try:
247 | client = MultiServerMCPClient(cfg["servers"])
248 | tools = await client.get_tools()
249 | except Exception as e:
250 | logger.debug("MCP: client/get_tools failed: %s", e)
251 | return []
252 |
253 | tools = [_sanitize_tool_schema(t) for t in tools]
254 | tools = [_ensure_sync_invocation(t) for t in tools]
255 |
256 | logger.debug("MCP: total tools loaded: %d", len(tools))
257 | return tools
258 |
259 |
--------------------------------------------------------------------------------
/src/langchain_code/hooks.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from dataclasses import dataclass, field
3 | from pathlib import Path
4 | from typing import Any, Dict, List, Optional
5 | import fnmatch
6 | import os
7 | import re
8 | import shlex
9 | import subprocess
10 | import threading
11 | import time
12 |
13 | try:
14 | import yaml
15 | except Exception:
16 | yaml = None
17 |
18 |
19 | from .safety.confirm import confirm_action
20 |
21 |
22 |
23 | HOOKS_PATH_REL = Path(".langcode/hooks.yml")
24 |
25 |
26 |
27 | @dataclass
28 | class HookAction:
29 | run: Optional[str] = None
30 | deny: Optional[str] = None
31 | confirm: Optional[str] = None
32 | set_env: Dict[str, str] = field(default_factory=dict)
33 |
34 |
35 | @dataclass
36 | class HookRule:
37 | on: str
38 | if_path: List[str] = field(default_factory=list)
39 | if_cmd: List[str] = field(default_factory=list)
40 | if_cmd_re: Optional[str] = None
41 | if_language: List[str] = field(default_factory=list)
42 | when_mutating: Optional[bool] = None
43 | require_apply: Optional[bool] = None
44 | if_exit_code: Optional[int] = None
45 | actions: List[HookAction] = field(default_factory=list)
46 |
47 |
48 | @dataclass
49 | class HookConfig:
50 | version: int = 1
51 | timeout_sec: int = 60
52 | on_error: str = "fail"
53 | hooks: List[HookRule] = field(default_factory=list)
54 |
55 |
56 | class HookResult:
57 | def __init__(self, allowed: bool, message: str = "", outputs: Optional[List[str]] = None):
58 | self.allowed = allowed
59 | self.message = message
60 | self.outputs = outputs or []
61 |
62 | def with_output(self, text: str) -> "HookResult":
63 | self.outputs.append(text)
64 | return self
65 |
66 |
67 |
68 | class _SafeDict(dict):
69 |
70 | def __missing__(self, k):
71 | return "{" + k + "}"
72 |
73 |
74 | def _fmt(template: str, ctx: Dict[str, Any]) -> str:
75 | return template.format_map(_SafeDict(**{k: _stringify(v) for k, v in (ctx or {}).items()}))
76 |
77 |
78 | def _stringify(v: Any) -> str:
79 | if v is None:
80 | return ""
81 | if isinstance(v, (str, int, float, bool)):
82 | return str(v)
83 | return str(v)
84 |
85 |
86 | def _split_cmd(cmd: str) -> List[str]:
87 |
88 | if os.name == "nt":
89 | return [cmd]
90 | return shlex.split(cmd)
91 |
92 |
93 |
94 | class HookRunner:
95 |
96 | def __init__(self, project_dir: Path):
97 | self.root = Path(project_dir).resolve()
98 | self.cfg_path = (self.root / HOOKS_PATH_REL).resolve()
99 | self._cfg = HookConfig()
100 | self._mtime = 0.0
101 | self._lock = threading.RLock()
102 |
103 |
104 | def fire(self, event: str, ctx: Dict[str, Any] | None = None) -> HookResult:
105 |
106 | self._reload_if_changed()
107 | if not self._cfg.hooks:
108 | return HookResult(True)
109 |
110 | c = dict(ctx or {})
111 | c.setdefault("path", "")
112 | c.setdefault("cmd", "")
113 | c.setdefault("language", "")
114 | c.setdefault("read_only", False)
115 | c.setdefault("mutating", False)
116 | c.setdefault("apply", False)
117 | c.setdefault("exit_code", None)
118 |
119 | outputs: List[str] = []
120 |
121 | for rule in self._cfg.hooks:
122 | if not self._match(rule, event, c):
123 | continue
124 |
125 | local_env = os.environ.copy()
126 |
127 | for action in rule.actions:
128 | if action.set_env:
129 | for k, v in action.set_env.items():
130 | try:
131 | local_env[k] = _fmt(v, c)
132 | except Exception:
133 | local_env[k] = str(v)
134 |
135 | if action.deny:
136 | msg = _fmt(action.deny, c)
137 | if event.startswith("pre_"):
138 | return HookResult(False, f"Blocked by hook: {msg}", outputs)
139 | else:
140 | outputs.append(f"[hook:deny:post] {msg}")
141 | continue
142 |
143 | if action.confirm:
144 | msg = _fmt(action.confirm, c)
145 | ok = confirm_action(msg, bool(c.get("apply")))
146 | if not ok:
147 | return HookResult(False, f"Cancelled by user: {msg}", outputs)
148 |
149 | if action.run:
150 | cmd = _fmt(action.run, c)
151 | res = self._run(cmd, env=local_env)
152 | outputs.append(res["report"])
153 | if res["failed"] and self._cfg.on_error == "fail" and event.startswith("pre_"):
154 | return HookResult(False, f"Hook command failed: {cmd}", outputs)
155 |
156 | return HookResult(True, "", outputs)
157 |
158 |
159 | def _reload_if_changed(self) -> None:
160 | with self._lock:
161 | if yaml is None: # pragma: no cover
162 | self._cfg = HookConfig()
163 | return
164 | if not self.cfg_path.exists():
165 | self._cfg = HookConfig()
166 | return
167 | try:
168 | m = self.cfg_path.stat().st_mtime
169 | except Exception:
170 | self._cfg = HookConfig()
171 | return
172 | if m <= self._mtime:
173 | return
174 | self._mtime = m
175 | try:
176 | data = yaml.safe_load(self.cfg_path.read_text(encoding="utf-8")) or {}
177 | self._cfg = self._parse_config(data)
178 | except Exception:
179 | self._cfg = HookConfig()
180 |
181 | def _parse_config(self, data: Dict[str, Any]) -> HookConfig:
182 | defaults = data.get("defaults", {}) or {}
183 | cfg = HookConfig(
184 | version=int(data.get("version", 1)),
185 | timeout_sec=int(defaults.get("timeout_sec", 60)),
186 | on_error=str(defaults.get("on_error", "fail")).lower(),
187 | hooks=[],
188 | )
189 | for raw in data.get("hooks", []) or []:
190 | actions: List[HookAction] = []
191 | for a in raw.get("actions", []) or []:
192 | actions.append(HookAction(
193 | run=a.get("run"),
194 | deny=a.get("deny"),
195 | confirm=a.get("confirm"),
196 | set_env=(a.get("set_env") or {}),
197 | ))
198 | cfg.hooks.append(HookRule(
199 | on=str(raw.get("on", "")),
200 | if_path=list(raw.get("if_path", []) or []),
201 | if_cmd=list(raw.get("if_cmd", []) or []),
202 | if_cmd_re=raw.get("if_cmd_re"),
203 | if_language=list(raw.get("if_language", []) or []),
204 | when_mutating=raw.get("when_mutating"),
205 | require_apply=raw.get("require_apply"),
206 | if_exit_code=raw.get("if_exit_code"),
207 | actions=actions,
208 | ))
209 | return cfg
210 |
211 | def _match(self, rule: HookRule, event: str, ctx: Dict[str, Any]) -> bool:
212 | if rule.on != event:
213 | return False
214 |
215 | if rule.if_path:
216 | p = str(ctx.get("path") or "").replace("\\", "/")
217 | if not any(fnmatch.fnmatch(p, pat) for pat in rule.if_path):
218 | return False
219 |
220 | if rule.if_cmd:
221 | c = str(ctx.get("cmd") or "")
222 | if not any(sub in c for sub in rule.if_cmd):
223 | return False
224 |
225 | if rule.if_cmd_re:
226 | c = str(ctx.get("cmd") or "")
227 | try:
228 | if not re.search(rule.if_cmd_re, c):
229 | return False
230 | except re.error:
231 | return False
232 |
233 | if rule.if_language:
234 | lang = str(ctx.get("language") or "").lower()
235 | if lang not in [s.lower() for s in rule.if_language]:
236 | return False
237 |
238 | if rule.when_mutating is not None and bool(ctx.get("mutating")) != bool(rule.when_mutating):
239 | return False
240 | if rule.require_apply is not None and bool(ctx.get("apply")) != bool(rule.require_apply):
241 | return False
242 |
243 | if rule.if_exit_code is not None:
244 | try:
245 | if int(ctx.get("exit_code")) != int(rule.if_exit_code):
246 | return False
247 | except Exception:
248 | return False
249 |
250 | return True
251 |
252 | def _run(self, cmd: str, *, env: Dict[str, str]) -> Dict[str, Any]:
253 |
254 | use_shell = os.name == "nt"
255 | argv = _split_cmd(cmd)
256 |
257 | try:
258 | proc = subprocess.run(
259 | argv if not use_shell else cmd,
260 | cwd=self.root,
261 | text=True,
262 | capture_output=True,
263 | timeout=max(5, int(self._cfg.timeout_sec)),
264 | shell=use_shell,
265 | env=env,
266 | )
267 | out = (proc.stdout or "").strip()
268 | err = (proc.stderr or "").strip()
269 | report = f"$ {cmd}\n(exit {proc.returncode})"
270 | if out:
271 | report += f"\n[stdout]\n{out}"
272 | if err:
273 | report += f"\n[stderr]\n{err}"
274 | return {"failed": proc.returncode != 0, "report": report}
275 | except subprocess.TimeoutExpired:
276 | return {"failed": True, "report": f"$ {cmd}\n(timeout after {self._cfg.timeout_sec}s)"}
277 | except Exception as e:
278 | return {"failed": True, "report": f"$ {cmd}\n(error: {type(e).__name__}: {e})"}
279 |
280 |
281 |
282 | _RUNNERS: Dict[str, HookRunner] = {}
283 |
284 |
285 | def get_hook_runner(project_dir: str | Path) -> HookRunner:
286 | root = str(Path(project_dir).resolve())
287 | runner = _RUNNERS.get(root)
288 | if runner is None:
289 | runner = HookRunner(Path(project_dir))
290 | _RUNNERS[root] = runner
291 | return runner
--------------------------------------------------------------------------------
/src/langchain_code/static_values.py:
--------------------------------------------------------------------------------
1 | """Shared static values used across the LangCode package."""
2 |
3 | from __future__ import annotations
4 |
5 | from pathlib import Path
6 |
7 | APP_HELP = """
8 | LangCode - ReAct + Tools + Deep (LangGraph) code agent CLI.
9 |
10 | Just type `langcode` and hit enter - it's the only CLI you'll ever need.
11 | Toggle across everything without leaving the terminal!
12 |
13 | Use it to chat with an agent, implement features, fix bugs, or analyze a codebase.
14 |
15 | Key flags (for `chat`):
16 | \x07 --mode [react|deep] Choose the reasoning engine (default: react).
17 | - react : Classic ReAct agent with tools.
18 | - deep : LangGraph-style multi-step agent.
19 | \x07 --auto Autopilot (deep mode only). The deep agent will plan+act end-to-end
20 | WITHOUT asking questions (it still uses tools safely). Think "hands-off planning".
21 | \x07 --apply Write changes to disk and run commands for you (feature/fix flows).
22 | If OFF, the agent proposes diffs only. Think "permission to execute".
23 |
24 | \x07 --router Auto-route to the most efficient LLM per query (uses Gemini if --llm not provided).
25 | \x07 --priority Router priority: balanced | cost | speed | quality (default: balanced)
26 | \x07 --verbose Show router model-selection panels.
27 |
28 | Examples:
29 | \x07 langcode chat --llm anthropic --mode react
30 | \x07 langcode chat --llm gemini --mode deep --auto
31 | \x07 langcode chat --router --priority cost --verbose
32 | \x07 langcode feature "Add a dark mode toggle" --router --priority quality
33 | \x07 langcode fix --log error.log --test-cmd "pytest -q" --router
34 | \x07 langcode tell me what's going on in the codebase (quick mode \x1a analyze)
35 | \x07 langcode fix this (quick mode \x1a fix; reads TTY log if available)
36 | Custom instructions:
37 | \x07 Put project-specific rules in .langcode/langcode.md (created automatically).
38 | \x07 From the launcher, select "Custom Instructions" to open your editor; or run `langcode instr`.
39 |
40 | NEW:
41 | \x07 Just run `langcode` to open a beautiful interactive launcher.
42 | Use \x18/\x19 to move, \x1b/\x1a to change values, Enter to start, h for help, q to quit.
43 | \x07 In chat, type /select to return to the launcher without exiting.
44 | """.strip()
45 |
46 | PROMPT = "[bold green]langcode[/bold green] [dim]>[/dim] "
47 |
48 | ENV_FILENAMES = (".env", ".env.local")
49 |
50 | GLOBAL_ENV_ENVVAR = "LANGCODE_GLOBAL_ENV"
51 | LANGCODE_CONFIG_DIR_ENVVAR = "LANGCODE_CONFIG_DIR"
52 |
53 | LANGCODE_DIRNAME = ".langcode"
54 | LANGCODE_FILENAME = "langcode.md"
55 | MCP_FILENAME = "mcp.json"
56 | MCP_PROJECT_REL = Path("src") / "langchain_code" / "config" / MCP_FILENAME
57 |
58 | BASE_SYSTEM = """You are LangCode, a coding assistant with access to filesystem, shell, and web tools.
59 |
60 | ## Core Behavior
61 | - Use tools to discover information before acting
62 | - Make changes autonomously - don't ask for permission or paths
63 | - Always verify your changes by reading files after editing
64 | - Provide clear, factual responses based on tool outputs
65 |
66 | ## Available Tools
67 | - **Files**: list_dir, glob, read_file, edit_by_diff, write_file, delete_file
68 | - **Search**: grep (find text in files)
69 | - **Shell**: run_cmd (git, tests, etc.)
70 | - **Scripts**: script_exec (run short Python/Bash/PowerShell/Node scripts in the repo)
71 | - **Web**: TavilySearch
72 | - **Multimodal**: process_multimodal (for images)
73 | - **Planning**: write_todos (track progress)
74 | - **Terminal**: read_terminal (mandatory when the user asks things like *"what�?Ts this?"*, *"what�?Ts this error?"*, or *"fix this"* without details �?" the info is almost always in the terminal)
75 |
76 | ## Workflow
77 | 1. **Check context**: If the user refers vaguely to �?othis,�?? �?oerror,�?? or �?ofix this,�??
78 | immediately call `read_terminal` to capture the terminal contents before doing anything else.
79 | 2. **Discover**: Use glob/grep/list_dir to understand the codebase
80 | 3. **Read**: Use read_file on relevant files
81 | 4. **Act**: Make precise edits with edit_by_diff or create new files with write_file
82 | 5. **Verify**: Re-read files and run commands to confirm changes
83 | 6. **Commit**: Use git commands to save your work
84 |
85 |
86 | ## Rules
87 | - Always use tools rather than guessing
88 | - For vague user queries about issues, errors, or �?owhat�?Ts this,�??
89 | invoke `read_terminal` immediately �?" do not ask the user to paste errors.
90 | - For file edits, show exactly what changed
91 | - Include relevant command outputs in your response
92 | - Keep responses focused and actionable
93 | """
94 |
95 | RUNTIME_POLICY = """
96 | ## Runtime Discipline (LangCode)
97 | - Explore first: `list_dir`, `glob`, `grep` to find targets. Never ask the user for paths.
98 | - Directory handling: if a folder is missing, just write files to that nested path (parents are auto-created).
99 | - Script fallback (when tools can't express the logic cleanly):
100 | 1) Prefer **Python** short scripts; else bash/pwsh/node if truly needed.
101 | 2) Run with `script_exec(language="python", code=..., timeout_sec=60)`.
102 | 3) If exit != 0, read stderr/stdout, **fix the script**, and retry up to 2 times.
103 | 4) Keep responses factual and include the key log lines (command, exit, brief stdout/stderr).
104 | - Verification: after edits/scripts, re-read files or run a quick command to confirm.
105 | - Never spawn background daemons; keep everything inside the project root.
106 | """.strip()
107 |
108 | BASE_DEEP_SUFFIX = """
109 | ## Planning & TODOs (MANDATORY)
110 | - FIRST THING: Call `write_todos([...])` with 3-8 concrete, verb-first steps BEFORE doing anything else.
111 | - Examples of steps: "Examine file structure", "Search for patterns", "Read implementation", "Summarize findings"
112 | - Before working on any step, call `update_todo_status(index, "in_progress")`.
113 | - After finishing a step, call `update_todo_status(index, "completed")`.
114 | - If you discover additional work, call `append_todo("...")` immediately.
115 | - Keep only ONE item marked "in_progress" at a time.
116 | - Always be specific: use file paths, function names, line numbers when applicable.
117 |
118 | ## Subagents
119 | - Prefer 'general-purpose' for iterative research/execution.
120 | """
121 |
122 | FEATURE_INSTR = """You are implementing a feature end-to-end.
123 | - Plan steps first (files to inspect, edits to make).
124 | - Use glob/grep to locate relevant files.
125 | - Use read_file to inspect.
126 | - Make targeted edits via edit_by_diff (preferred) or write_file for new files.
127 | - If a test command is available, call run_cmd with command "{TEST_CMD}" to execute it.
128 | - Present a concise summary of changes (list of files edited/created) and next steps."""
129 |
130 | BUGFIX_INSTR = """You are fixing a bug.
131 | - Parse any provided error log.
132 | - Use grep to locate suspicious symbols/stack frames.
133 | - Read the minimal code to understand the issue.
134 | - Propose a minimal safe patch via edit_by_diff (preferred).
135 | - If a test command is available, call run_cmd with command "{TEST_CMD}" to run it.
136 | - Explain the fix briefly and show the resulting diff."""
137 |
138 | AUTO_DEEP_INSTR = """Execute the request completely and autonomously.
139 |
140 | **MANDATORY TERMINATION RULE:**
141 | After completing your work, you MUST output exactly one message starting with "FINAL:" and then STOP. Do not continue using tools after outputting FINAL:.
142 |
143 | **Steps:**
144 | 1. Discover the codebase structure (glob, grep, read key files)
145 | - Always find files yourself using shell search (ls, find, grep, glob) and never rely on user hints/inputs. Walk the filesystem when needed.
146 | 2. Make the requested changes (edit_by_diff or write_file)
147 | 3. Test/verify your changes (run_cmd)
148 | 4. For visual content (diagrams, charts, images), generate only the rendered outputs when requested
149 | 5. Output FINAL: report and STOP
150 |
151 | **Termination Condition:**
152 | Once you have:
153 | - Used at least one discovery tool (glob/grep/read_file)
154 | - Made the requested changes
155 | - Generated all requested outputs (including rendered visual like PNG from mermaid diagrams). Avoid saving .mmd files
156 | - Run at least one shell command
157 | - Committed your work (or attempted to)
158 |
159 | Then output your FINAL: report and do NOT use any more tools.
160 |
161 | **Output Format:**
162 | ```
163 | FINAL:
164 | - Accomplished: [what you did]
165 | - Files changed: [list of files]
166 | - Command results: [key outputs]
167 | - Status: [complete/blocked and why]
168 | ```
169 |
170 | **Rules:**
171 | - No intermediate status updates
172 | - Use tools for all facts
173 | - Don't ask questions - act autonomously
174 | - Complete ALL requested deliverables before terminating
175 | - STOP after outputting FINAL: - do not continue
176 | """
177 |
178 | CHAT_SESSION_TITLE = "LangChain Code Agent | Chat"
179 | DEEP_CHAT_SESSION_TITLE = "LangChain Code Agent | Deep Chat"
180 | AUTO_CHAT_SUFFIX = " (Auto)"
181 |
182 | TODO_PANEL_TITLE = "TODOs"
183 | TODO_PLANNING_TEXT = "Planning tasks..."
184 | TODO_EMPTY_TEXT = "No tasks were emitted by the agent."
185 | TODO_ANIMATION_DELAY = 0.15
186 | TODO_STEP_HEADER = "Agent steps:"
187 |
188 | AUTOPILOT_PROMPT = (
189 | "AUTOPILOT: Start now. Discover files (glob/list_dir/grep), read targets (read_file), "
190 | "perform edits (edit_by_diff/write_file), and run at least one run_cmd (git/tests) capturing stdout/"
191 | "stderr + exit code. Then produce one 'FINAL:' report and STOP. No questions."
192 | )
193 |
194 | FEATURE_SESSION_TITLE = "LangChain Code Agent | Feature"
195 | FIX_SESSION_TITLE = "LangChain Code Agent | Fix"
196 | ANALYZE_SESSION_TITLE = "LangChain Code Agent | Analyze"
197 | GLOBAL_ENV_TITLE = "LangCode | Global Environment"
198 | PROJECT_ENV_TITLE = "LangCode | Project Environment"
199 | INSTRUCTIONS_TITLE = "LangChain Code Agent | Custom Instructions"
200 |
201 | FIX_FALLBACK_PROMPT = "Fix the bug using the provided log."
202 |
203 | PROVIDER_KEY_LABELS = {
204 | "OPENAI_API_KEY": "OpenAI",
205 | "ANTHROPIC_API_KEY": "Anthropic",
206 | "GOOGLE_API_KEY": "Gemini",
207 | "GEMINI_API_KEY": "Gemini (alt)",
208 | "GROQ_API_KEY": "Groq",
209 | "TOGETHER_API_KEY": "Together",
210 | "FIREWORKS_API_KEY": "Fireworks",
211 | "PERPLEXITY_API_KEY": "Perplexity",
212 | "DEEPSEEK_API_KEY": "DeepSeek",
213 | "TAVILY_API_KEY": "Tavily (web search)",
214 | }
215 |
216 | DOCTOR_FOOTER_TIP = "Tip: run 'langcode instr' to set project rules; edit environment via the launcher."
217 |
--------------------------------------------------------------------------------
/src/langchain_code/cli_components/display.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import re
4 | from pathlib import Path
5 | from typing import Any, Dict, List, Optional
6 |
7 | from functools import lru_cache
8 | from rich import box
9 | from rich.console import Console, Group
10 | from rich.align import Align
11 | from rich.markdown import Markdown
12 | from rich.panel import Panel
13 | from rich.rule import Rule
14 | from rich.style import Style
15 | from rich.text import Text
16 | from pyfiglet import Figlet
17 |
18 | from .state import console, in_selection_hub
19 |
20 |
21 | @lru_cache(maxsize=64)
22 | def _ascii_gradient_lines(width: int, text: str, font: str, gradient: str) -> Group:
23 | def _hex_to_rgb(h: str) -> tuple[int, int, int]:
24 | h = h.lstrip("#")
25 | return tuple(int(h[i:i + 2], 16) for i in (0, 2, 4))
26 |
27 | def _lerp(a: int, b: int, t: float) -> int:
28 | return int(a + (b - a) * t)
29 |
30 | def _interpolate_palette(palette: list[str], steps: int) -> list[str]:
31 | if steps <= 1:
32 | return [palette[0]]
33 | out, steps_total = [], steps - 1
34 | for x in range(steps):
35 | pos = x / steps_total if steps_total else 0
36 | seg = min(int(pos * (len(palette) - 1)), len(palette) - 2)
37 | seg_start = seg / (len(palette) - 1)
38 | seg_end = (seg + 1) / (len(palette) - 1)
39 | local_t = (pos - seg_start) / (seg_end - seg_start + 1e-9)
40 | c1, c2 = _hex_to_rgb(palette[seg]), _hex_to_rgb(palette[seg + 1])
41 | rgb = tuple(_lerp(a, b, local_t) for a, b in zip(c1, c2))
42 | out.append(f"#{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}")
43 | return out
44 |
45 | fig = Figlet(font=font, width=width)
46 | lines = fig.renderText(text).rstrip("\n").splitlines()
47 | if not lines:
48 | return Group()
49 |
50 | max_len = max(len(line) for line in lines)
51 | palette = ["#052e1e", "#064e3b", "#065f46", "#047857", "#059669", "#16a34a", "#22c55e", "#34d399"]
52 | if gradient == "light_to_dark":
53 | palette = list(reversed(palette))
54 | ramp = _interpolate_palette(palette, max_len)
55 |
56 | rendered_lines: List[Any] = []
57 | for raw in lines:
58 | if not raw.strip():
59 | rendered_lines.append(Align.center(Text(""), width=width))
60 | continue
61 | styled = Text()
62 | for idx, ch in enumerate(raw):
63 | styled.append(ch, style=Style(color=ramp[idx], bold=(ch != " ")))
64 | rendered_lines.append(Align.center(styled, width=width))
65 | return Group(*rendered_lines)
66 | def langcode_ascii_renderable(width: int, text: str = "LangCode", font: str = "ansi_shadow", gradient: str = "dark_to_light") -> Group:
67 | if width < 60:
68 | return Group(Align.center(Text(text, style="bold green"), width=width))
69 | return _ascii_gradient_lines(width, text, font, gradient)
70 |
71 |
72 | def print_langcode_ascii(
73 | console: Console,
74 | text: str = "LangCode",
75 | font: str = "ansi_shadow",
76 | gradient: str = "dark_to_light",
77 | ) -> None:
78 | width = getattr(console.size, "width", 80)
79 | console.print(langcode_ascii_renderable(width, text, font, gradient))
80 |
81 |
82 | def session_banner(
83 | provider: Optional[str],
84 | project_dir: Path,
85 | title_text: str,
86 | *,
87 | interactive: bool = False,
88 | apply: bool = False,
89 | test_cmd: Optional[str] = None,
90 | tips: Optional[List[str]] = None,
91 | model_info: Optional[Dict[str, Any]] = None,
92 | router_enabled: bool = False,
93 | deep_mode: bool = False,
94 | command_name: Optional[str] = None,
95 | ) -> Panel:
96 | title = Text(title_text, style="bold magenta")
97 | body = Text()
98 |
99 | body.append("Provider: ", style="bold")
100 | if provider and provider.strip() and " " not in provider:
101 | body.append(provider.upper())
102 | else:
103 | body.append((provider or "not set"), style="dim")
104 | body.append("\n")
105 |
106 | body.append("Project: ", style="bold")
107 | body.append(str(project_dir))
108 |
109 | badge = Text()
110 | if router_enabled:
111 | badge.append(" [ROUTER ON]", style="bold green")
112 | if command_name:
113 | badge.append(f" [{command_name}]", style="bold blue")
114 | if deep_mode:
115 | badge.append(" [DEEP MODE]", style="bold magenta")
116 | if apply:
117 | badge.append(" [APPLY MODE]", style="bold red")
118 | if test_cmd:
119 | badge.append(f" tests: {test_cmd}", style="italic")
120 | if badge:
121 | body.append("\n")
122 | body.append_text(badge)
123 |
124 | if model_info:
125 | body.append("\n")
126 | model_line = (
127 | f"Model: {model_info.get('model_name', '(unknown)')} "
128 | f"[{model_info.get('langchain_model_name', '?')}]"
129 | f" | priority={model_info.get('priority_used','balanced')}"
130 | )
131 | body.append(model_line, style="dim")
132 |
133 | if interactive:
134 | body.append("\n\n")
135 | body.append("Type your request. /clear to redraw, /select to change mode, /exit or /quit to quit. Ctrl+C also exits.\n", style="dim")
136 |
137 | if tips:
138 | body.append("\n")
139 | for item in tips:
140 | body.append(item + "\n", style="dim")
141 |
142 | return Panel(
143 | body,
144 | title=title,
145 | subtitle=Text("ReAct | Deep | Tools | Safe Edits", style="dim"),
146 | border_style="green",
147 | padding=(1, 2),
148 | box=box.HEAVY,
149 | )
150 |
151 |
152 | def pause_if_in_launcher() -> None:
153 | """If we were launched from the selection hub, wait for Enter before redrawing it."""
154 | if in_selection_hub():
155 | console.print(Rule(style="green"))
156 | console.input("[dim]Press Enter to return to the launcher...[/dim]")
157 |
158 |
159 | def print_session_header(
160 | title: str,
161 | provider: Optional[str],
162 | project_dir: Path,
163 | *,
164 | interactive: bool = False,
165 | apply: bool = False,
166 | test_cmd: Optional[str] = None,
167 | tips: Optional[List[str]] = None,
168 | model_info: Optional[Dict[str, Any]] = None,
169 | router_enabled: bool = False,
170 | deep_mode: bool = False,
171 | command_name: Optional[str] = None,
172 | ) -> None:
173 | console.clear()
174 | print_langcode_ascii(console, text="LangCode", font="ansi_shadow", gradient="dark_to_light")
175 | console.print(
176 | session_banner(
177 | provider,
178 | project_dir,
179 | title,
180 | interactive=interactive,
181 | apply=apply,
182 | test_cmd=test_cmd,
183 | tips=tips,
184 | model_info=model_info,
185 | router_enabled=router_enabled,
186 | deep_mode=deep_mode,
187 | command_name=command_name,
188 | )
189 | )
190 | console.print(Rule(style="green"))
191 |
192 |
193 | def looks_like_markdown(text: str) -> bool:
194 | """Heuristic: decide if the model output is Markdown."""
195 | if "```" in text:
196 | return True
197 | if re.search(r"(?m)^\s{0,3}#{1,6}\s", text):
198 | return True
199 | if re.search(r"(?m)^\s{0,3}[-*+]\s+", text):
200 | return True
201 | if re.search(r"(?m)^\s{0,3}\d+\.\s+", text):
202 | return True
203 | if re.search(r"`[^`]+`", text) or re.search(r"\*\*[^*]+\*\*", text):
204 | return True
205 | return False
206 |
207 |
208 | def to_text(content: Any) -> str:
209 | """Coerce Claude-style content blocks (list[dict|str]) into a single string."""
210 | if isinstance(content, str):
211 | return content
212 | if isinstance(content, list):
213 | parts = []
214 | for item in content:
215 | if isinstance(item, dict):
216 | val = item.get("text") or item.get("data") or item.get("content")
217 | if isinstance(val, str):
218 | parts.append(val)
219 | elif isinstance(item, str):
220 | parts.append(item)
221 | else:
222 | parts.append(str(item))
223 | return "\n".join(parts)
224 | return str(content)
225 |
226 |
227 | def normalize_chat_history_for_anthropic(history: List) -> List:
228 | """Return a copy of history with str content only (prevents .strip() on lists)."""
229 | out: List = []
230 | for msg in history:
231 | try:
232 | content = getattr(msg, "content", "")
233 | out.append(msg.__class__(content=to_text(content)))
234 | except Exception:
235 | out.append(msg.__class__(content=str(getattr(msg, "content", ""))))
236 | return out
237 |
238 |
239 | def panel_agent_output(text: str, title: str = "Agent", model_label: Optional[str] = None) -> Panel:
240 | """
241 | Render agent output full-width, with clean wrapping and proper Markdown
242 | when appropriate. This avoids the 'half-cut' panel look.
243 | """
244 | text = (text or "").rstrip()
245 |
246 | if looks_like_markdown(text):
247 | body = Markdown(text)
248 | else:
249 | t = Text.from_ansi(text) if "\x1b[" in text else Text(text)
250 | t.no_wrap = False
251 | t.overflow = "fold"
252 | body = t
253 |
254 | return Panel(
255 | body,
256 | title=title,
257 | border_style="cyan",
258 | box=box.ROUNDED,
259 | padding=(1, 2),
260 | expand=True,
261 | subtitle=(Text(f"Model: {model_label}", style="dim") if model_label else None),
262 | subtitle_align="right",
263 | )
264 |
265 |
266 | def panel_router_choice(info: Dict[str, Any]) -> Panel:
267 | if not info:
268 | body = Text("Router active, but no model info available.", style="dim")
269 | else:
270 | name = info.get("model_name", "(unknown)")
271 | langchain_name = info.get("langchain_model_name", "?")
272 | provider = info.get("provider", "?").upper()
273 | priority = info.get("priority_used", "balanced")
274 | latency = info.get("latency_tier", "?")
275 | rs = info.get("reasoning_strength", "?")
276 | ic = info.get("input_cost_per_million", "?")
277 | oc = info.get("output_cost_per_million", "?")
278 | ctx = info.get("context_window", "?")
279 | body = Text.from_markup(
280 | f"[bold]Router:[/bold] {provider} -> [bold]{name}[/bold] [dim]({langchain_name})[/dim]\n"
281 | f"[dim]priority={priority} | latency_tier={latency} | reasoning={rs}/10 | "
282 | f"cost=${ic}M in/${oc}M out | ctx={ctx} tokens[/dim]"
283 | )
284 | return Panel.fit(body, title="Model Selection", border_style="green", box=box.ROUNDED, padding=(0, 1))
285 |
286 |
287 | def show_loader():
288 | """Spinner that doesn't interfere with interactive prompts (y/n, input, click.confirm)."""
289 | return console.status("[bold]Processing...[/bold]", spinner="dots", spinner_style="green")
--------------------------------------------------------------------------------
/src/langchain_code/cli/commands/flows.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 | from pathlib import Path
5 | from typing import Optional
6 |
7 | import typer
8 | from rich.panel import Panel
9 | from rich.text import Text
10 |
11 | from ...cli_components.display import (
12 | print_session_header,
13 | panel_agent_output,
14 | panel_router_choice,
15 | show_loader,
16 | pause_if_in_launcher,
17 | )
18 | from ...cli_components.env import bootstrap_env
19 | from ...cli_components.state import console
20 | from ...cli_components.env import tail_bytes, extract_error_block, tty_log_path
21 | from ...cli_components.runtime import extract_last_content, thread_id_for
22 | from ...cli_components.agents import (
23 | agent_cache_get,
24 | agent_cache_put,
25 | resolve_provider,
26 | build_react_agent_with_optional_llm,
27 | build_deep_agent_with_optional_llm,
28 | )
29 | from ...config_core import get_model, get_model_info
30 | from ...workflows.feature_impl import FEATURE_INSTR
31 | from ...workflows.bug_fix import BUGFIX_INSTR
32 | from ..constants_runtime import (
33 | FEATURE_SESSION_TITLE,
34 | FIX_SESSION_TITLE,
35 | ANALYZE_SESSION_TITLE,
36 | FIX_FALLBACK_PROMPT,
37 | )
38 |
39 |
40 | def feature(
41 | request: str = typer.Argument(..., help='e.g. "Add a dark mode toggle in settings"'),
42 | llm: Optional[str] = typer.Option(None, "--llm", help="anthropic | gemini | openai | ollama"),
43 | project_dir: Path = typer.Option(Path.cwd(), "--project-dir", exists=True, file_okay=False),
44 | test_cmd: Optional[str] = typer.Option(None, "--test-cmd", help='e.g. "pytest -q" or "npm test"'),
45 | apply: bool = typer.Option(False, "--apply", help="Apply writes and run commands without interactive confirm."),
46 | router: bool = typer.Option(False, "--router", help="Auto-route to the most efficient LLM for this request."),
47 | priority: str = typer.Option("balanced", "--priority", help="balanced | cost | speed | quality"),
48 | verbose: bool = typer.Option(False, "--verbose", help="Show model selection panel."),
49 | ):
50 | bootstrap_env(project_dir, interactive_prompt_if_missing=True)
51 |
52 | priority = (priority or "balanced").lower()
53 | if priority not in {"balanced", "cost", "speed", "quality"}:
54 | priority = "balanced"
55 |
56 | provider = resolve_provider(llm, router)
57 | model_info = None
58 | chosen_llm = None
59 |
60 | if router:
61 | model_info = get_model_info(provider, request, priority)
62 | chosen_llm = get_model(provider, request, priority)
63 |
64 | print_session_header(
65 | FEATURE_SESSION_TITLE,
66 | provider,
67 | project_dir,
68 | interactive=False,
69 | apply=apply,
70 | test_cmd=test_cmd,
71 | model_info=(model_info if (router and verbose) else None),
72 | router_enabled=router,
73 | )
74 | if router and verbose and model_info:
75 | console.print(panel_router_choice(model_info))
76 |
77 | model_key = (model_info or {}).get("langchain_model_name", "default")
78 | cache_key = ("react", provider, model_key, str(project_dir.resolve()), False)
79 | cached = agent_cache_get(cache_key)
80 | if not router and provider in {"openai", "ollama"}:
81 | chosen_llm = get_model(provider)
82 | if cached is None:
83 | agent = build_react_agent_with_optional_llm(
84 | provider=provider,
85 | project_dir=project_dir,
86 | llm=chosen_llm,
87 | apply=apply,
88 | test_cmd=test_cmd,
89 | instruction_seed=FEATURE_INSTR,
90 | )
91 | agent_cache_put(cache_key, agent)
92 | else:
93 | agent = cached
94 |
95 | with show_loader():
96 | res = agent.invoke({"input": request, "chat_history": []})
97 | output = res.get("output", "") if isinstance(res, dict) else str(res)
98 | console.print(panel_agent_output(output, title="Feature Result"))
99 | pause_if_in_launcher()
100 |
101 |
102 | def fix(
103 | request: Optional[str] = typer.Argument(None, help='e.g. "Fix crash on image upload"'),
104 | log: Optional[Path] = typer.Option(None, "--log", exists=True, help="Path to error log or stack trace."),
105 | llm: Optional[str] = typer.Option(None, "--llm", help="anthropic | gemini | openai | ollama"),
106 | project_dir: Path = typer.Option(Path.cwd(), "--project-dir", exists=True, file_okay=False),
107 | test_cmd: Optional[str] = typer.Option(None, "--test-cmd", help='e.g. "pytest -q"'),
108 | apply: bool = typer.Option(False, "--apply", help="Apply writes and run commands without interactive confirm."),
109 | router: bool = typer.Option(False, "--router", help="Auto-route to the most efficient LLM for this request."),
110 | priority: str = typer.Option("balanced", "--priority", help="balanced | cost | speed | quality"),
111 | verbose: bool = typer.Option(False, "--verbose", help="Show model selection panel."),
112 | from_tty: bool = typer.Option(
113 | False,
114 | "--from-tty",
115 | help="Use most recent output from the current logged terminal session (run your command via `langcode wrap ...` or `langcode shell`).",
116 | ),
117 | tty_id: Optional[str] = typer.Option(None, "--tty-id", help="Which session to read; defaults to current TTY."),
118 | ):
119 | bootstrap_env(project_dir, interactive_prompt_if_missing=True)
120 |
121 | priority = (priority or "balanced").lower()
122 | if priority not in {"balanced", "cost", "speed", "quality"}:
123 | priority = "balanced"
124 |
125 | provider = resolve_provider(llm, router)
126 |
127 | bug_input = (request or "").strip()
128 | if log:
129 | bug_input += "\n\n--- ERROR LOG ---\n" + Path(log).read_text(encoding="utf-8", errors="ignore")
130 | elif from_tty:
131 | tlog = os.environ.get("LANGCODE_TTY_LOG") or str(tty_log_path(tty_id))
132 | p = Path(tlog)
133 | if p.exists():
134 | recent = tail_bytes(p)
135 | block = extract_error_block(recent).strip()
136 | if block:
137 | bug_input += "\n\n--- ERROR LOG (from TTY) ---\n" + block
138 | console.print(Panel.fit(Text(f"Using error from session log: {p}", style="dim"), border_style="cyan"))
139 | else:
140 | console.print(
141 | Panel.fit(
142 | Text("No TTY session log found. Run your failing command via `langcode wrap ` or `langcode shell`.", style="yellow"),
143 | border_style="yellow",
144 | )
145 | )
146 | bug_input = bug_input.strip() or FIX_FALLBACK_PROMPT
147 |
148 | model_info = None
149 | chosen_llm = None
150 | if router:
151 | model_info = get_model_info(provider, bug_input, priority)
152 | chosen_llm = get_model(provider, bug_input, priority)
153 |
154 | print_session_header(
155 | FIX_SESSION_TITLE,
156 | provider,
157 | project_dir,
158 | interactive=False,
159 | apply=apply,
160 | test_cmd=test_cmd,
161 | model_info=(model_info if (router and verbose) else None),
162 | router_enabled=router,
163 | )
164 | if router and verbose and model_info:
165 | console.print(panel_router_choice(model_info))
166 |
167 | model_key = (model_info or {}).get("langchain_model_name", "default")
168 | cache_key = ("react", provider, model_key, str(project_dir.resolve()), False)
169 | cached = agent_cache_get(cache_key)
170 | if not router and provider in {"openai", "ollama"}:
171 | chosen_llm = get_model(provider)
172 | if cached is None:
173 | agent = build_react_agent_with_optional_llm(
174 | provider=provider,
175 | project_dir=project_dir,
176 | llm=chosen_llm,
177 | apply=apply,
178 | test_cmd=test_cmd,
179 | instruction_seed=BUGFIX_INSTR,
180 | )
181 | agent_cache_put(cache_key, agent)
182 | else:
183 | agent = cached
184 |
185 | with show_loader():
186 | res = agent.invoke({"input": bug_input, "chat_history": []})
187 | output = res.get("output", "") if isinstance(res, dict) else str(res)
188 | console.print(panel_agent_output(output, title="Fix Result"))
189 | pause_if_in_launcher()
190 |
191 |
192 | def analyze(
193 | request: str = typer.Argument(..., help='e.g. "What are the main components of this project?"'),
194 | llm: Optional[str] = typer.Option(None, "--llm", help="anthropic | gemini | openai | ollama"),
195 | project_dir: Path = typer.Option(Path.cwd(), "--project-dir", exists=True, file_okay=False),
196 | router: bool = typer.Option(False, "--router", help="Auto-route to the most efficient LLM for this request."),
197 | priority: str = typer.Option("balanced", "--priority", help="balanced | cost | speed | quality"),
198 | verbose: bool = typer.Option(False, "--verbose", help="Show model selection panel."),
199 | ):
200 | bootstrap_env(project_dir, interactive_prompt_if_missing=True)
201 |
202 | priority = (priority or "balanced").lower()
203 | if priority not in {"balanced", "cost", "speed", "quality"}:
204 | priority = "balanced"
205 |
206 | provider = resolve_provider(llm, router)
207 |
208 | model_info = None
209 | chosen_llm = None
210 | if router:
211 | model_info = get_model_info(provider, request, priority)
212 | chosen_llm = get_model(provider, request, priority)
213 |
214 | print_session_header(
215 | ANALYZE_SESSION_TITLE,
216 | provider,
217 | project_dir,
218 | interactive=False,
219 | apply=False,
220 | model_info=(model_info if (router and verbose) else None),
221 | router_enabled=router,
222 | )
223 | if router and verbose and model_info:
224 | console.print(panel_router_choice(model_info))
225 |
226 | model_key = (model_info or {}).get("langchain_model_name", "default")
227 | cache_key = ("deep", provider, model_key, str(project_dir.resolve()), False)
228 | cached = agent_cache_get(cache_key)
229 | if not router and provider in {"openai", "ollama"}:
230 | chosen_llm = get_model(provider)
231 | if cached is None:
232 | agent = build_deep_agent_with_optional_llm(
233 | provider=provider,
234 | project_dir=project_dir,
235 | llm=chosen_llm,
236 | apply=False,
237 | )
238 | agent_cache_put(cache_key, agent)
239 | else:
240 | agent = cached
241 |
242 | with show_loader():
243 | output = ""
244 | try:
245 | res = agent.invoke(
246 | {"messages": [{"role": "user", "content": request}]},
247 | config={
248 | "recursion_limit": 45,
249 | "configurable": {"thread_id": thread_id_for(project_dir, "analyze")},
250 | },
251 | )
252 | output = (
253 | extract_last_content(res.get("messages", [])).strip()
254 | if isinstance(res, dict) and "messages" in res
255 | else str(res)
256 | )
257 | except Exception as e:
258 | output = f"Analyze error: {e}"
259 | console.print(panel_agent_output(output or "No response generated.", title="Analysis Result"))
260 | pause_if_in_launcher()
261 |
262 |
263 | __all__ = ["feature", "fix", "analyze"]
264 |
265 |
--------------------------------------------------------------------------------
/src/langchain_code/tools/fs_local.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from pathlib import Path
3 | from difflib import unified_diff
4 | from langchain_core.tools import tool
5 | from ..hooks import get_hook_runner
6 |
7 |
8 | def _rooted(project_dir: str, path: str) -> Path:
9 | """
10 | Resolve `path` inside the project root and refuse path traversal.
11 | """
12 | p = Path(project_dir).joinpath(path).resolve()
13 | root = Path(project_dir).resolve()
14 | if not str(p).startswith(str(root)):
15 | raise ValueError("Path escapes project_dir")
16 | return p
17 |
18 |
19 | def _clip(s: str, n: int = 24000) -> str:
20 | """Trim long strings for readable tool outputs."""
21 | return s if len(s) <= n else s[:n] + "\n...[truncated]..."
22 |
23 |
24 | def _should_apply(apply: bool, safety: str, is_mutation: bool) -> tuple[bool, str | None]:
25 | """
26 | Enforce write/exec safety gates.
27 |
28 | safety:
29 | - 'auto' : allow non-mutations only (block mutations unless apply=True)
30 | - 'require' : require apply=True for any mutation
31 | - 'force' : bypass apply and run anyway
32 | """
33 | safety = (safety or "auto").strip().lower()
34 | if safety not in {"auto", "require", "force"}:
35 | safety = "auto"
36 | if safety == "force":
37 | return True, None
38 | if not is_mutation:
39 | return True, None
40 | if safety == "require" and not apply:
41 | return False, "Execution requires apply=True (explicit consent). Re-run with --apply."
42 | if safety == "auto" and not apply:
43 | return False, ("Declined without apply: this is a file modification. "
44 | "Re-run with --apply, or set safety='force' if you intend to write.")
45 | return True, None
46 |
47 |
48 | def _with_hooks(base: str, *hook_results) -> str:
49 | """Append any hook outputs as a `[hooks]` section."""
50 | chunks = []
51 | for hr in hook_results:
52 | if hr and getattr(hr, "outputs", None):
53 | chunks.extend([o for o in hr.outputs if o])
54 | if not chunks:
55 | return base
56 | return f"{base}\n\n[hooks]\n" + "\n\n".join(chunks)
57 |
58 |
59 | def make_list_dir_tool(project_dir: str):
60 | @tool(
61 | "list_dir",
62 | description="List the contents of a directory relative to the project root. Directories end with '/'.",
63 | return_direct=False,
64 | )
65 | def list_dir(path: str = ".") -> str:
66 | """
67 | List a directory (relative to project root).
68 |
69 | Args:
70 | path: Directory path (default '.').
71 | Returns:
72 | One item per line, with directories suffixed by '/'.
73 | """
74 | p = _rooted(project_dir, path)
75 | if not p.exists():
76 | return f"{path} not found."
77 | if not p.is_dir():
78 | return f"{path} is not a directory."
79 | items = []
80 | for child in sorted(p.iterdir()):
81 | suffix = "/" if child.is_dir() else ""
82 | items.append(str(Path(path) / (child.name + suffix)))
83 | return "\n".join(items)
84 |
85 | return list_dir
86 |
87 |
88 | def make_read_file_tool(project_dir: str):
89 | @tool(
90 | "read_file",
91 | description="Read and return the full text content of a file relative to the project root.",
92 | return_direct=False,
93 | )
94 | def read_file(path: str) -> str:
95 | """
96 | Read a text file.
97 |
98 | Args:
99 | path: File path (relative to project root).
100 | Returns:
101 | Entire file content as UTF-8 text or an error message.
102 | """
103 | p = _rooted(project_dir, path)
104 | if not p.exists() or not p.is_file():
105 | return f"{path} not found or not a file."
106 | try:
107 | return p.read_text(encoding="utf-8")
108 | except Exception as e:
109 | return f"Error reading {path}: {e}"
110 |
111 | return read_file
112 |
113 |
114 | def make_write_file_tool(project_dir: str, apply: bool):
115 | @tool(
116 | "write_file",
117 | description="Create or overwrite a file with given content. Respects --apply and fires pre/post hooks.",
118 | return_direct=False,
119 | )
120 | def write_file(
121 | path: str,
122 | content: str,
123 | *,
124 | safety: str = "require",
125 | report: str = "diff",
126 | ) -> str:
127 | """
128 | Write a file (create/overwrite).
129 |
130 | Args:
131 | path: Target file path (relative to project root).
132 | content: New file contents (UTF-8).
133 | safety: 'auto' | 'require' | 'force' (see safety policy).
134 | report: 'diff' to show unified diff, or 'summary'.
135 |
136 | Returns:
137 | Operation summary (and diff if requested). Appends a [hooks] section if hooks ran.
138 | """
139 | report = (report or "diff").strip().lower()
140 | if report not in {"diff", "summary"}:
141 | report = "diff"
142 |
143 | p = _rooted(project_dir, path)
144 | p.parent.mkdir(parents=True, exist_ok=True)
145 |
146 | old = ""
147 | if p.exists():
148 | try:
149 | old = p.read_text(encoding="utf-8")
150 | except Exception:
151 | old = ""
152 |
153 | diff = "\n".join(
154 | unified_diff(old.splitlines(), content.splitlines(), fromfile=f"a/{path}", tofile=f"b/{path}", lineterm="")
155 | )
156 |
157 | runner = get_hook_runner(project_dir)
158 | pre = runner.fire(
159 | "pre_write_file",
160 | {"path": path, "diff": diff, "size": len(content), "apply": bool(apply), "mutating": True},
161 | )
162 | if not pre.allowed:
163 | return _with_hooks(f"write {path} blocked.\n{pre.message}", pre)
164 |
165 | allowed, msg = _should_apply(apply, safety, is_mutation=True)
166 | if not allowed:
167 | hdr = f"dry-run (apply={apply}): write {path}"
168 | if report == "summary":
169 | base = f"{hdr}\nChange size: {len(content)} chars\nReason: {msg}"
170 | else:
171 | base = f"{hdr}\nDiff:\n{_clip(diff)}\nReason: {msg}"
172 | return _with_hooks(base, pre)
173 |
174 | try:
175 | p.write_text(content, encoding="utf-8")
176 | if report == "summary":
177 | base = f"Wrote {len(content)} chars to {path}."
178 | else:
179 | base = f"Wrote {len(content)} chars to {path}.\nDiff:\n{_clip(diff)}"
180 | post = runner.fire(
181 | "post_write_file",
182 | {"path": path, "diff": diff, "size": len(content), "apply": bool(apply), "mutating": True},
183 | )
184 | return _with_hooks(base, pre, post)
185 | except Exception as e:
186 | return _with_hooks(f"Error writing {path}: {type(e).__name__}: {e}", pre)
187 |
188 | return write_file
189 |
190 |
191 | def make_edit_by_diff_tool(project_dir: str, apply: bool):
192 | @tool(
193 | "edit_by_diff",
194 | description="Replace an exact snippet in a file with a new snippet (single, safe micro-edit). Fires pre/post hooks.",
195 | return_direct=False,
196 | )
197 | def edit_by_diff(
198 | path: str,
199 | original_snippet: str,
200 | replaced_snippet: str,
201 | *,
202 | safety: str = "require",
203 | ) -> str:
204 | """
205 | Edit a file by replacing an exact snippet once.
206 |
207 | Args:
208 | path: File path (relative to project root).
209 | original_snippet: Exact text to find.
210 | replaced_snippet: Replacement text.
211 | safety: 'auto' | 'require' | 'force'.
212 |
213 | Returns:
214 | Operation summary with unified diff, plus [hooks] if any ran.
215 | """
216 | p = _rooted(project_dir, path)
217 | if not p.exists() or not p.is_file():
218 | return f"{path} not found or not a file."
219 | try:
220 | text = p.read_text(encoding="utf-8")
221 | except Exception as e:
222 | return f"Error reading {path}: {e}"
223 |
224 | if original_snippet not in text:
225 | return f"Original snippet not found in {path}."
226 |
227 | new_text = text.replace(original_snippet, replaced_snippet, 1)
228 | diff = "\n".join(
229 | unified_diff(text.splitlines(), new_text.splitlines(), fromfile=f"a/{path}", tofile=f"b/{path}", lineterm="")
230 | )
231 |
232 | runner = get_hook_runner(project_dir)
233 | pre = runner.fire(
234 | "pre_edit_by_diff", {"path": path, "diff": diff, "apply": bool(apply), "mutating": True}
235 | )
236 | if not pre.allowed:
237 | return _with_hooks(f"edit {path} blocked.\n{pre.message}", pre)
238 |
239 | allowed, msg = _should_apply(apply, safety, is_mutation=True)
240 | if not allowed:
241 | base = f"dry-run (apply={apply}): edit {path}\nDiff:\n{_clip(diff)}\nReason: {msg}"
242 | return _with_hooks(base, pre)
243 |
244 | try:
245 | p.write_text(new_text, encoding="utf-8")
246 | base = f"Applied 1 edit to {path}.\nDiff:\n{_clip(diff)}"
247 | post = runner.fire(
248 | "post_edit_by_diff", {"path": path, "diff": diff, "apply": bool(apply), "mutating": True}
249 | )
250 | return _with_hooks(base, pre, post)
251 | except Exception as e:
252 | return _with_hooks(f"Error writing {path}: {type(e).__name__}: {e}", pre)
253 |
254 | return edit_by_diff
255 |
256 |
257 | def make_delete_file_tool(project_dir: str, apply: bool):
258 | @tool(
259 | "delete_file",
260 | description="Delete a file. Respects --apply and fires pre/post hooks.",
261 | return_direct=False,
262 | )
263 | def delete_file(path: str, *, safety: str = "require") -> str:
264 | """
265 | Delete a file.
266 |
267 | Args:
268 | path: File path (relative to project root).
269 | safety: 'auto' | 'require' | 'force'.
270 |
271 | Returns:
272 | Operation summary, plus [hooks] if any ran.
273 | """
274 | p = _rooted(project_dir, path)
275 | if not p.exists():
276 | return f"{path} not found."
277 | if not p.is_file():
278 | return f"{path} is not a file."
279 |
280 | runner = get_hook_runner(project_dir)
281 | pre = runner.fire("pre_delete_file", {"path": path, "apply": bool(apply), "mutating": True})
282 | if not pre.allowed:
283 | return _with_hooks(f"delete {path} blocked.\n{pre.message}", pre)
284 |
285 | allowed, msg = _should_apply(apply, safety, is_mutation=True)
286 | if not allowed:
287 | return _with_hooks(f"dry-run (apply={apply}): delete {path}\nReason: {msg}", pre)
288 |
289 | try:
290 | p.unlink()
291 | base = f"Deleted {path}."
292 | post = runner.fire("post_delete_file", {"path": path, "apply": bool(apply), "mutating": True})
293 | return _with_hooks(base, pre, post)
294 | except Exception as e:
295 | return _with_hooks(f"Error deleting {path}: {type(e).__name__}: {e}", pre)
296 |
297 | return delete_file
298 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------