├── .gitmodules ├── tests ├── __init__.py ├── stream_responses │ ├── response7.json │ ├── response8.json │ ├── response5.json │ ├── response6.json │ ├── response1.json │ ├── response2.json │ ├── response3.json │ └── response4.json ├── test_prompter.py ├── conftest.py ├── test_cli_topic.py ├── test_utils.py ├── test_config.py ├── test_openai_message.py ├── test_command_parser.py └── test_namespace.py ├── devchat ├── msg │ ├── __init__.py │ ├── schema.py │ ├── util.py │ ├── chatting.py │ ├── user_info.py │ ├── topic_util.py │ └── log_util.py ├── _service │ ├── README.md │ ├── __init__.py │ ├── schema │ │ ├── __init__.py │ │ ├── request.py │ │ └── response.py │ ├── config.py │ ├── route │ │ ├── __init__.py │ │ ├── logs.py │ │ ├── topics.py │ │ └── message.py │ ├── main.py │ ├── uvicorn_logging.py │ └── gunicorn_logging.py ├── workflow │ ├── __init__.py │ ├── command │ │ ├── __init__.py │ │ ├── run.py │ │ ├── config.py │ │ ├── update.py │ │ ├── list.py │ │ └── env.py │ ├── README.md │ ├── envs.py │ ├── cli.py │ ├── user_setting.py │ ├── path.py │ ├── update_flowchart.md │ ├── schema.py │ ├── namespace.py │ └── step.py ├── chatmark │ ├── .gitignore │ ├── README.md │ ├── __init__.py │ ├── chatmark_example │ │ ├── README.md │ │ └── main.py │ ├── step.py │ ├── iobase.py │ └── form.py ├── _cli │ ├── errors.py │ ├── __init__.py │ ├── main.py │ ├── topic.py │ ├── route.py │ ├── prompt.py │ ├── log.py │ ├── utils.py │ └── run.py ├── anthropic │ ├── __init__.py │ └── anthropic_chat.py ├── __main__.py ├── __init__.py ├── memory │ ├── __init__.py │ ├── base.py │ └── fixsize_memory.py ├── ide │ ├── __init__.py │ ├── idea_services.py │ ├── types.py │ ├── rpc.py │ ├── vscode_services.py │ └── service.py ├── path.py ├── openai │ ├── __init__.py │ ├── openai_message.py │ ├── http_openai.py │ └── openai_chat.py ├── engine │ ├── __init__.py │ ├── recursive_prompter.py │ ├── command_parser.py │ ├── router.py │ ├── namespace.py │ └── util.py ├── llm │ ├── __init__.py │ ├── text_confirm.py │ ├── chat.py │ └── pipeline.py ├── message.py ├── workspace_util.py ├── chat.py └── config.py ├── workflow ├── workflows │ ├── code │ │ ├── python.txt │ │ └── instruct.txt │ ├── commit_message │ │ └── instruct.txt │ └── release_note │ │ └── instruct.txt └── commands │ └── git │ └── git.yml ├── Makefile ├── .circleci └── config.yml ├── .github ├── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md └── workflows │ └── dev.yaml ├── scripts └── purge_topics.py ├── no_binary_install.sh ├── pyproject.toml ├── README.zh.md ├── .gitignore └── README.md /.gitmodules: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /devchat/msg/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /devchat/_service/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /devchat/_service/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /devchat/workflow/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /devchat/chatmark/.gitignore: -------------------------------------------------------------------------------- 1 | tmp/ -------------------------------------------------------------------------------- /devchat/_service/schema/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /devchat/workflow/command/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /devchat/_cli/errors.py: -------------------------------------------------------------------------------- 1 | class MissContentInPromptException(Exception): 2 | pass 3 | -------------------------------------------------------------------------------- /devchat/anthropic/__init__.py: -------------------------------------------------------------------------------- 1 | from .anthropic_chat import AnthropicChatParameters 2 | 3 | __all__ = ["AnthropicChatParameters"] 4 | -------------------------------------------------------------------------------- /devchat/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if __name__ == "__main__": 4 | from devchat._cli.main import main as _main 5 | 6 | sys.exit(_main(windows_expand_args=False)) 7 | -------------------------------------------------------------------------------- /devchat/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | script_dir = os.path.dirname(os.path.realpath(__file__)) 4 | os.environ["TIKTOKEN_CACHE_DIR"] = os.path.join(script_dir, "tiktoken_cache") 5 | -------------------------------------------------------------------------------- /devchat/memory/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import ChatMemory 2 | from .fixsize_memory import FixSizeChatMemory 3 | 4 | __all__ = [ 5 | "ChatMemory", 6 | "FixSizeChatMemory", 7 | ] 8 | -------------------------------------------------------------------------------- /devchat/chatmark/README.md: -------------------------------------------------------------------------------- 1 | # ChatMark 2 | 3 | ChatMark is a markup language for user interaction in chat message. 4 | 5 | This module provides python implementation for common widgets in ChatMark. 6 | -------------------------------------------------------------------------------- /devchat/ide/__init__.py: -------------------------------------------------------------------------------- 1 | from .service import IDEService 2 | from .types import * # noqa: F403 3 | from .types import __all__ as types_all 4 | 5 | __all__ = types_all + [ 6 | "IDEService", 7 | ] 8 | -------------------------------------------------------------------------------- /workflow/workflows/code/python.txt: -------------------------------------------------------------------------------- 1 | When writing Python code, include type hints where appropriate and maintain compliance with PEP-8 guidelines, such as providing docstrings for modules, classes, and functions. 2 | -------------------------------------------------------------------------------- /devchat/path.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # ------------------------------- 4 | # devchat basic paths 5 | # ------------------------------- 6 | USE_DIR = os.path.expanduser("~") 7 | USER_CHAT_DIR = os.path.join(USE_DIR, ".chat") 8 | -------------------------------------------------------------------------------- /devchat/workflow/README.md: -------------------------------------------------------------------------------- 1 | # Workflow Engine 2 | 3 | The Workflow Engine allows use to create, manage, and run workflows in DevChat. 4 | 5 | This is the refactored and enhanced version of the engine/ module and some commands in _cli/ module. 6 | -------------------------------------------------------------------------------- /devchat/workflow/command/run.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | 4 | @click.command(help="Run a workflow.", name="run") 5 | def run_workflow(workflow_name: str, user_input: str): 6 | # TODO: Replace `devchat route` with this command(`devchat workflow run`) later 7 | pass 8 | -------------------------------------------------------------------------------- /devchat/chatmark/__init__.py: -------------------------------------------------------------------------------- 1 | from .form import Form 2 | from .step import Step 3 | from .widgets import Button, Checkbox, Radio, TextEditor 4 | 5 | __all__ = [ 6 | "Checkbox", 7 | "TextEditor", 8 | "Radio", 9 | "Button", 10 | "Form", 11 | "Step", 12 | ] 13 | -------------------------------------------------------------------------------- /devchat/openai/__init__.py: -------------------------------------------------------------------------------- 1 | from .openai_chat import OpenAIChat, OpenAIChatConfig, OpenAIChatParameters 2 | from .openai_message import OpenAIMessage 3 | from .openai_prompt import OpenAIPrompt 4 | 5 | __all__ = [ 6 | "OpenAIChat", 7 | "OpenAIChatConfig", 8 | "OpenAIChatParameters", 9 | "OpenAIMessage", 10 | "OpenAIPrompt", 11 | ] 12 | -------------------------------------------------------------------------------- /devchat/workflow/envs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Explicitly define the environment variables used in the workflow engine. 3 | """ 4 | 5 | import os 6 | 7 | PYTHON_PATH = os.environ.get("PYTHONPATH", "") 8 | DEVCHAT_PYTHON_PATH = os.environ.get("DEVCHAT_PYTHONPATH", PYTHON_PATH) 9 | 10 | # the path to the mamba binary 11 | MAMBA_BIN_PATH = os.environ.get("MAMBA_BIN_PATH", "") 12 | -------------------------------------------------------------------------------- /tests/stream_responses/response7.json: -------------------------------------------------------------------------------- 1 | { 2 | "choices": [ 3 | { 4 | "delta": {}, 5 | "finish_reason": "stop", 6 | "index": 0 7 | } 8 | ], 9 | "created": 1682163460, 10 | "id": "chatcmpl-785qeSHlUo3lH4OnLuovtk3NqLcD6", 11 | "model": "gpt-3.5-turbo-0301", 12 | "object": "chat.completion.chunk" 13 | } -------------------------------------------------------------------------------- /tests/stream_responses/response8.json: -------------------------------------------------------------------------------- 1 | { 2 | "choices": [ 3 | { 4 | "delta": {}, 5 | "finish_reason": "stop", 6 | "index": 1 7 | } 8 | ], 9 | "created": 1682163460, 10 | "id": "chatcmpl-785qeSHlUo3lH4OnLuovtk3NqLcD6", 11 | "model": "gpt-3.5-turbo-0301", 12 | "object": "chat.completion.chunk" 13 | } -------------------------------------------------------------------------------- /tests/stream_responses/response5.json: -------------------------------------------------------------------------------- 1 | { 2 | "choices": [ 3 | { 4 | "delta": { 5 | "content": "." 6 | }, 7 | "finish_reason": null, 8 | "index": 0 9 | } 10 | ], 11 | "created": 1682163460, 12 | "id": "chatcmpl-785qeSHlUo3lH4OnLuovtk3NqLcD6", 13 | "model": "gpt-3.5-turbo-0301", 14 | "object": "chat.completion.chunk" 15 | } -------------------------------------------------------------------------------- /tests/stream_responses/response6.json: -------------------------------------------------------------------------------- 1 | { 2 | "choices": [ 3 | { 4 | "delta": { 5 | "content": "!" 6 | }, 7 | "finish_reason": null, 8 | "index": 1 9 | } 10 | ], 11 | "created": 1682163460, 12 | "id": "chatcmpl-785qeSHlUo3lH4OnLuovtk3NqLcD6", 13 | "model": "gpt-3.5-turbo-0301", 14 | "object": "chat.completion.chunk" 15 | } -------------------------------------------------------------------------------- /tests/stream_responses/response1.json: -------------------------------------------------------------------------------- 1 | { 2 | "choices": [ 3 | { 4 | "delta": { 5 | "role": "assistant" 6 | }, 7 | "finish_reason": null, 8 | "index": 0 9 | } 10 | ], 11 | "created": 1682163460, 12 | "id": "chatcmpl-785qeSHlUo3lH4OnLuovtk3NqLcD6", 13 | "model": "gpt-3.5-turbo-0301", 14 | "object": "chat.completion.chunk" 15 | } -------------------------------------------------------------------------------- /tests/stream_responses/response2.json: -------------------------------------------------------------------------------- 1 | { 2 | "choices": [ 3 | { 4 | "delta": { 5 | "content": "Tomorrow" 6 | }, 7 | "finish_reason": null, 8 | "index": 0 9 | } 10 | ], 11 | "created": 1682163460, 12 | "id": "chatcmpl-785qeSHlUo3lH4OnLuovtk3NqLcD6", 13 | "model": "gpt-3.5-turbo-0301", 14 | "object": "chat.completion.chunk" 15 | } -------------------------------------------------------------------------------- /tests/stream_responses/response3.json: -------------------------------------------------------------------------------- 1 | { 2 | "choices": [ 3 | { 4 | "delta": { 5 | "role": "assistant" 6 | }, 7 | "finish_reason": null, 8 | "index": 1 9 | } 10 | ], 11 | "created": 1682163460, 12 | "id": "chatcmpl-785qeSHlUo3lH4OnLuovtk3NqLcD6", 13 | "model": "gpt-3.5-turbo-0301", 14 | "object": "chat.completion.chunk" 15 | } -------------------------------------------------------------------------------- /tests/stream_responses/response4.json: -------------------------------------------------------------------------------- 1 | { 2 | "choices": [ 3 | { 4 | "delta": { 5 | "content": "Tomorrow" 6 | }, 7 | "finish_reason": null, 8 | "index": 1 9 | } 10 | ], 11 | "created": 1682163460, 12 | "id": "chatcmpl-785qeSHlUo3lH4OnLuovtk3NqLcD6", 13 | "model": "gpt-3.5-turbo-0301", 14 | "object": "chat.completion.chunk" 15 | } -------------------------------------------------------------------------------- /devchat/_cli/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from .log import log 4 | from .prompt import prompt 5 | from .route import route 6 | from .run import run 7 | from .topic import topic 8 | 9 | script_dir = os.path.dirname(os.path.realpath(__file__)) 10 | os.environ["TIKTOKEN_CACHE_DIR"] = os.path.join(script_dir, "..", "tiktoken_cache") 11 | 12 | __all__ = [ 13 | "log", 14 | "prompt", 15 | "run", 16 | "topic", 17 | "route", 18 | ] 19 | -------------------------------------------------------------------------------- /devchat/engine/__init__.py: -------------------------------------------------------------------------------- 1 | from .command_parser import Command, CommandParser, parse_command 2 | from .namespace import Namespace 3 | from .recursive_prompter import RecursivePrompter 4 | from .router import load_workflow_instruction, run_command 5 | 6 | __all__ = [ 7 | "parse_command", 8 | "Command", 9 | "CommandParser", 10 | "Namespace", 11 | "RecursivePrompter", 12 | "run_command", 13 | "load_workflow_instruction", 14 | ] 15 | -------------------------------------------------------------------------------- /devchat/_service/config.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from pydantic import BaseSettings 4 | 5 | 6 | class Settings(BaseSettings): 7 | PORT: int = 22222 8 | WORKERS: int = 2 9 | WORKSPACE: Optional[str] = None 10 | LOG_LEVEL: str = "INFO" 11 | LOG_FILE: Optional[str] = "dc_svc.log" 12 | JSON_LOGS: bool = False 13 | 14 | class Config: 15 | env_prefix = "DC_SVC_" 16 | case_sensitive = True 17 | 18 | 19 | config = Settings() 20 | -------------------------------------------------------------------------------- /devchat/ide/idea_services.py: -------------------------------------------------------------------------------- 1 | from .rpc import rpc_method 2 | from .types import LocationWithText 3 | 4 | 5 | class IdeaIDEService: 6 | def __init__(self): 7 | self._result = None 8 | 9 | @rpc_method 10 | def get_visible_range(self) -> LocationWithText: 11 | return LocationWithText.parse_obj(self._result) 12 | 13 | @rpc_method 14 | def get_selected_range(self) -> LocationWithText: 15 | return LocationWithText.parse_obj(self._result) 16 | -------------------------------------------------------------------------------- /devchat/llm/__init__.py: -------------------------------------------------------------------------------- 1 | from .chat import chat, chat_json 2 | from .openai import chat_completion_no_stream_return_json, chat_completion_stream 3 | from .text_confirm import llm_edit_confirm 4 | from .tools_call import chat_tools, llm_func, llm_param 5 | 6 | __all__ = [ 7 | "chat_completion_stream", 8 | "chat_completion_no_stream_return_json", 9 | "chat_json", 10 | "chat", 11 | "llm_edit_confirm", 12 | "llm_func", 13 | "llm_param", 14 | "chat_tools", 15 | ] 16 | -------------------------------------------------------------------------------- /devchat/anthropic/anthropic_chat.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Optional 2 | 3 | from pydantic import BaseModel, Field 4 | 5 | 6 | class AnthropicChatParameters(BaseModel, extra="ignore"): 7 | max_tokens_to_sample: int = Field(1024, ge=1) 8 | stop_sequences: Optional[List[str]] 9 | temperature: Optional[float] = Field(0.2, ge=0, le=1) 10 | top_p: Optional[float] 11 | top_k: Optional[int] 12 | metadata: Optional[Dict[str, Any]] 13 | stream: Optional[bool] = Field(True) 14 | -------------------------------------------------------------------------------- /devchat/chatmark/chatmark_example/README.md: -------------------------------------------------------------------------------- 1 | # chatmark_exmaple 2 | 3 | This is an example of how to use the chatmark module. 4 | 5 | Usage: 6 | 7 | 1. Copy the `chatmark_example` folder under `~/.chat/workflow/org` 8 | 2. Create `command.yml` under `~/.chat/workflow/org/chatmark_example` with the following content: 9 | ```yaml 10 | description: chatmark examples 11 | steps: 12 | - run: $command_python $command_path/main.py 13 | 14 | ``` 15 | 3. Use the command `/chatmark_example` in devchat vscode plugin. 16 | 17 | -------------------------------------------------------------------------------- /devchat/workflow/cli.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | from devchat.workflow.command.config import config_cmd 4 | from devchat.workflow.command.env import env 5 | from devchat.workflow.command.list import list_cmd 6 | from devchat.workflow.command.update import update 7 | 8 | 9 | @click.group(help="CLI for devchat workflow engine.") 10 | def workflow(): 11 | pass 12 | 13 | 14 | workflow.add_command(update) 15 | workflow.add_command(list_cmd) 16 | workflow.add_command(env) 17 | workflow.add_command(config_cmd) 18 | 19 | 20 | if __name__ == "__main__": 21 | workflow() 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: check fix 2 | 3 | div = $(shell printf '=%.0s' {1..120}) 4 | 5 | DIR="." 6 | check: 7 | @echo ${div} 8 | poetry run ruff check $(DIR) 9 | poetry run ruff format $(DIR) --check 10 | @echo "Done!" 11 | 12 | fix: 13 | @echo ${div} 14 | poetry run ruff format $(DIR) 15 | @echo ${div} 16 | poetry run ruff check $(DIR) --fix 17 | @echo "Done!" 18 | 19 | 20 | run-dev-svc: 21 | @echo "Running dev service on port 22222..." 22 | @uvicorn devchat._service.main:api_app --reload --port 22222 23 | 24 | run-svc: 25 | @echo "Running service..." 26 | @python devchat/_service/main.py 27 | -------------------------------------------------------------------------------- /devchat/_cli/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the main function for the DevChat CLI. 3 | """ 4 | 5 | import click 6 | 7 | from devchat._cli import log, prompt, route, run, topic 8 | from devchat.utils import get_logger 9 | from devchat.workflow.cli import workflow 10 | 11 | logger = get_logger(__name__) 12 | 13 | 14 | @click.group() 15 | def main(): 16 | """DevChat CLI: A command-line interface for DevChat.""" 17 | 18 | 19 | main.add_command(prompt) 20 | main.add_command(log) 21 | main.add_command(run) 22 | main.add_command(topic) 23 | main.add_command(route) 24 | 25 | main.add_command(workflow) 26 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | jobs: 4 | build: 5 | docker: 6 | - image: circleci/python:3.9 7 | steps: 8 | - checkout 9 | - run: 10 | name: Install Poetry 11 | command: | 12 | curl -sSL https://install.python-poetry.org | python3 - 13 | - run: 14 | name: Setup Python Environment 15 | command: | 16 | poetry install 17 | - run: 18 | name: Run linter 19 | command: | 20 | make check 21 | - run: 22 | name: Run Pytest 23 | command: | 24 | poetry run pytest 25 | 26 | workflows: 27 | version: 2 28 | build: 29 | jobs: 30 | - build -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /devchat/chatmark/step.py: -------------------------------------------------------------------------------- 1 | from contextlib import AbstractContextManager 2 | 3 | 4 | class Step(AbstractContextManager): 5 | """ 6 | Show a running step in the TUI. 7 | 8 | ChatMark syntax: 9 | 10 | ```Step 11 | # Something is running... 12 | some details... 13 | ``` 14 | 15 | Usage: 16 | with Step("Something is running..."): 17 | print("some details...") 18 | """ 19 | 20 | def __init__(self, title: str): 21 | self.title = title 22 | 23 | def __enter__(self): 24 | print(f"\n```Step\n# {self.title}", flush=True) 25 | 26 | def __exit__(self, exc_type, exc_val, exc_tb): 27 | # close the step 28 | print("\n```", flush=True) 29 | -------------------------------------------------------------------------------- /devchat/_service/route/__init__.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter 2 | 3 | from .logs import router as log_router 4 | from .message import router as message_router 5 | from .topics import router as topic_router 6 | from .workflows import router as workflow_router 7 | 8 | router = APIRouter() 9 | 10 | 11 | @router.get("/ping") 12 | async def ping(): 13 | return {"message": "pong"} 14 | 15 | 16 | router.include_router(workflow_router, prefix="/workflows", tags=["WorkflowManagement"]) 17 | router.include_router(message_router, prefix="/message", tags=["Message"]) 18 | router.include_router(log_router, prefix="/logs", tags=["LogManagement"]) 19 | router.include_router(topic_router, prefix="/topics", tags=["TopicManagement"]) 20 | -------------------------------------------------------------------------------- /devchat/memory/base.py: -------------------------------------------------------------------------------- 1 | class ChatMemory: 2 | """ 3 | ChatMemory is the base class for all chat memory classes. 4 | """ 5 | 6 | def __init__(self): 7 | pass 8 | 9 | def append(self, request, response): 10 | """ 11 | Append a request and response to the memory. 12 | """ 13 | # it must implemented in sub class 14 | 15 | def append_request(self, request): 16 | """ 17 | Append a request to the memory. 18 | """ 19 | 20 | def append_response(self, response): 21 | """ 22 | Append a request to the memory. 23 | """ 24 | 25 | def contexts(self): 26 | """ 27 | Return the contexts of the memory. 28 | """ 29 | -------------------------------------------------------------------------------- /scripts/purge_topics.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from tinydb import TinyDB 4 | 5 | 6 | def remove_topic_table(file_path: str): 7 | try: 8 | db = TinyDB(file_path) 9 | if "topics" in db.tables(): 10 | db.drop_table("topics") 11 | print("The 'topics' table has been removed.") 12 | else: 13 | print("The file does not contain a 'topics' table.") 14 | except Exception as exc: 15 | print(f"Error: {exc}. The file is not a valid TinyDB file or could not be processed.") 16 | 17 | 18 | if __name__ == "__main__": 19 | if len(sys.argv) != 2: 20 | print("Usage: python remove_topic_table.py ") 21 | sys.exit(1) 22 | 23 | remove_topic_table(sys.argv[1]) 24 | -------------------------------------------------------------------------------- /devchat/workflow/user_setting.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import oyaml as yaml 4 | 5 | from .path import USER_SETTINGS_FILENAME, WORKFLOWS_BASE 6 | from .schema import UserSettings 7 | 8 | 9 | def _load_user_settings() -> UserSettings: 10 | """ 11 | Load the user settings from the settings.yml file. 12 | """ 13 | settings_path = Path(WORKFLOWS_BASE) / USER_SETTINGS_FILENAME 14 | if not settings_path.exists(): 15 | return UserSettings() 16 | 17 | with open(settings_path, "r", encoding="utf-8") as file: 18 | content = yaml.safe_load(file.read()) 19 | 20 | if content: 21 | return UserSettings.parse_obj(content) 22 | 23 | return UserSettings() 24 | 25 | 26 | USER_SETTINGS = _load_user_settings() 27 | -------------------------------------------------------------------------------- /.github/workflows/dev.yaml: -------------------------------------------------------------------------------- 1 | name: Dev 2 | 3 | on: 4 | pull_request: 5 | branches: [main] 6 | push: 7 | branches: [main] 8 | 9 | jobs: 10 | lint-and-test: 11 | name: Run linter and test 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | - uses: actions/setup-python@v4 16 | with: 17 | python-version: "3.8" 18 | - uses: abatilo/actions-poetry@v2 19 | - name: Install Python dependencies 20 | run: | 21 | poetry install 22 | 23 | - name: Run linter 24 | run: | 25 | make check 26 | 27 | - name: Run Pytest 28 | run: | 29 | poetry run pytest 30 | env: 31 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY_FOR_TEST }} 32 | -------------------------------------------------------------------------------- /devchat/workflow/command/config.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | 4 | import click 5 | import oyaml as yaml 6 | 7 | from devchat.workflow.path import WORKFLOWS_BASE, WORKFLOWS_CONFIG_FILENAME 8 | 9 | 10 | @click.command(help="Workflow configuration.", name="config") 11 | @click.option("--json", "in_json", is_flag=True, help="Output in json format.") 12 | def config_cmd(in_json: bool): 13 | config_path = Path(WORKFLOWS_BASE) / WORKFLOWS_CONFIG_FILENAME 14 | config_content = {} 15 | if config_path.exists(): 16 | with open(config_path, "r", encoding="utf-8") as file: 17 | config_content = yaml.safe_load(file.read()) 18 | 19 | if not in_json: 20 | click.echo(config_content) 21 | 22 | else: 23 | json_format = json.dumps(config_content) 24 | click.echo(json_format) 25 | -------------------------------------------------------------------------------- /devchat/workflow/command/update.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import click 4 | 5 | from devchat.workflow.path import ( 6 | WORKFLOWS_BASE, 7 | ) 8 | from devchat.workflow.update_util import ( 9 | HAS_GIT, 10 | copy_workflows_usr, 11 | update_by_git, 12 | update_by_zip, 13 | ) 14 | 15 | 16 | @click.command(help="Update the workflow_base dir.") 17 | @click.option("-f", "--force", is_flag=True, help="Force update the workflows to the latest main.") 18 | def update(force: bool): 19 | click.echo(f"Updating wf repo... force: {force}") 20 | click.echo(f"WORKFLOWS_BASE: {WORKFLOWS_BASE}") 21 | 22 | base_path = Path(WORKFLOWS_BASE) 23 | 24 | if HAS_GIT: 25 | updated, message = update_by_git(base_path) 26 | else: 27 | updated, message = update_by_zip(base_path) 28 | 29 | click.echo(f"- Updated: {updated}\n- Message: {message}") 30 | copy_workflows_usr() 31 | -------------------------------------------------------------------------------- /devchat/message.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from dataclasses import dataclass 3 | 4 | 5 | @dataclass 6 | class Message(ABC): 7 | """ 8 | The basic unit of information in a prompt. 9 | """ 10 | 11 | content: str = "" 12 | 13 | INSTRUCT = "instruct" 14 | CONTEXT = "context" 15 | FUNCTION = "function" 16 | CHAT = "chat" 17 | 18 | @abstractmethod 19 | def to_dict(self) -> dict: 20 | """ 21 | Convert the message to a dictionary. 22 | """ 23 | 24 | @classmethod 25 | @abstractmethod 26 | def from_dict(cls, message_data: dict) -> "Message": 27 | """ 28 | Convert the message from a dictionary. 29 | """ 30 | 31 | @abstractmethod 32 | def stream_from_dict(self, message_data: dict) -> str: 33 | """ 34 | Append to the message from a dictionary returned from a streaming chat API. 35 | """ 36 | -------------------------------------------------------------------------------- /tests/test_prompter.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from devchat.engine import Namespace, RecursivePrompter 4 | 5 | 6 | def test_prompter(tmp_path): 7 | namespace = Namespace(tmp_path) 8 | prompter = RecursivePrompter(namespace) 9 | 10 | # Test when there are no 'prompt.txt' files 11 | os.makedirs(os.path.join(tmp_path, "usr", "a", "b", "c")) 12 | assert prompter.run("a.b.c") == "" 13 | 14 | # Test when there is a 'prompt.txt' file in one ancestor 15 | os.makedirs(os.path.join(tmp_path, "sys", "a", "b", "c")) 16 | with open(os.path.join(tmp_path, "sys", "a", "prompt.txt"), "w", encoding="utf-8") as file: 17 | file.write("prompt a") 18 | assert prompter.run("a.b.c") == "prompt a\n" 19 | 20 | # Test when there are 'prompt.txt' files in multiple ancestors 21 | with open(os.path.join(tmp_path, "usr", "a", "b", "prompt.txt"), "w", encoding="utf-8") as file: 22 | file.write("prompt b") 23 | assert prompter.run("a.b.c") == "prompt a\nprompt b\n" 24 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /workflow/commands/git/git.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | commands: 3 | git_diff: 4 | description: "Runs the 'git diff' command with optional flags and a specified path." 5 | parameters: 6 | option_string: 7 | type: string 8 | default: "" 9 | description: "Optional flags or arguments to pass to the 'git diff' command." 10 | path: 11 | type: string 12 | description: "The file or directory to run the 'git diff' command on." 13 | steps: 14 | - run: git diff << parameters.option_string >> << parameters.path >> 15 | git_log: 16 | description: "Runs the 'git log' command with optional flags and a specified path." 17 | parameters: 18 | option_string: 19 | type: string 20 | default: "" 21 | description: "Optional flags or arguments to pass to the 'git log' command." 22 | path: 23 | type: string 24 | description: "The file or directory to run the 'git log' command on." 25 | steps: 26 | - run: git log << parameters.option_string >> << parameters.path >> -------------------------------------------------------------------------------- /devchat/chatmark/iobase.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | 4 | def _send_message(message): 5 | out_data = f"""\n{message}\n""" 6 | print(out_data, flush=True) 7 | 8 | 9 | def _parse_chatmark_response(response): 10 | # resonse looks like: 11 | """ 12 | ``` some_name 13 | some key name 1: value1 14 | some key name 2: value2 15 | ``` 16 | """ 17 | # parse key values 18 | lines = response.strip().split("\n") 19 | if len(lines) <= 2: 20 | return {} 21 | 22 | data = yaml.safe_load("\n".join(lines[1:-1])) 23 | return data 24 | 25 | 26 | def pipe_interaction(message: str): 27 | _send_message(message) 28 | 29 | lines = [] 30 | while True: 31 | try: 32 | line = input() 33 | if line.strip().startswith("```yaml"): 34 | lines = [] 35 | elif line.strip() == "```": 36 | lines.append(line) 37 | break 38 | lines.append(line) 39 | except EOFError: 40 | pass 41 | 42 | response = "\n".join(lines) 43 | return _parse_chatmark_response(response) 44 | -------------------------------------------------------------------------------- /devchat/_cli/topic.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | 4 | @click.command(help="Manage topics") 5 | @click.option( 6 | "--list", "-l", "list_topics", is_flag=True, help="List topics in reverse chronological order." 7 | ) 8 | @click.option("--skip", default=0, help="Skip number of topics before showing the list.") 9 | @click.option("-n", "--max-count", default=100, help="Limit the number of topics to output.") 10 | def topic(list_topics: bool, skip: int, max_count: int): 11 | """ 12 | Manage topics. 13 | """ 14 | import json 15 | 16 | from devchat._cli.utils import get_model_config, handle_errors, init_dir 17 | from devchat.openai import OpenAIChat, OpenAIChatConfig 18 | from devchat.store import Store 19 | 20 | repo_chat_dir, user_chat_dir = init_dir() 21 | 22 | with handle_errors(): 23 | model, config = get_model_config(user_chat_dir) 24 | parameters_data = config.dict(exclude_unset=True) 25 | openai_config = OpenAIChatConfig(model=model, **parameters_data) 26 | 27 | chat = OpenAIChat(openai_config) 28 | store = Store(repo_chat_dir, chat) 29 | 30 | if list_topics: 31 | topics = store.select_topics(skip, skip + max_count) 32 | print(json.dumps(topics, indent=2)) 33 | -------------------------------------------------------------------------------- /no_binary_install.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | source $CONDA_PREFIX/etc/profile.d/conda.sh 3 | 4 | # input target dir with shell script argument 5 | if [ -n "$1" ]; then 6 | TARGET_DIR=$1 7 | else 8 | # Error 9 | echo "Please input a target dir with shell script argument" 10 | exit 1 11 | fi 12 | mkdir -p $TARGET_DIR 13 | 14 | conda remove -n devchat-no-binary --all --yes 15 | conda create -n devchat-no-binary python=3.8 -y 16 | 17 | conda activate devchat-no-binary 18 | 19 | SITE_PACKAGES=$(python -c "import site; print(site.getsitepackages()[0])") 20 | OLD_FOLDERS=$(mktemp) 21 | ls $SITE_PACKAGES > "$OLD_FOLDERS" 22 | 23 | 24 | pip install --no-binary :all: "pydantic<2" 25 | pip install charset-normalizer --no-binary :all: 26 | pip install git+https://github.com/yangbobo2021/tiktoken.git 27 | pip install . 28 | 29 | 30 | NEW_FOLDERS=$(mktemp) 31 | ls $SITE_PACKAGES > "$NEW_FOLDERS" 32 | ADDED_FOLDERS=$(comm -13 "$OLD_FOLDERS" "$NEW_FOLDERS") 33 | 34 | for folder in $ADDED_FOLDERS; do 35 | cp -r "$SITE_PACKAGES/$folder" "$TARGET_DIR" 36 | done 37 | 38 | rm "$OLD_FOLDERS" "$NEW_FOLDERS" 39 | 40 | NETWORKX_ATLAS="$TARGET_DIR/networkx/generators/atlas.py" 41 | sed -i '' 's|importlib\.resources|importlib_resources|' "$NETWORKX_ATLAS" 42 | 43 | echo "DevChat packages have installed to $TARGET_DIR" 44 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import tempfile 4 | from pathlib import Path 5 | 6 | import pytest 7 | from git import Repo 8 | 9 | 10 | @pytest.fixture(scope="function") 11 | def git_repo(request): 12 | # Create a temporary directory 13 | repo_dir = tempfile.mkdtemp() 14 | 15 | # Initialize a new Git repository in the temporary directory 16 | Repo.init(repo_dir) 17 | 18 | # Change the current working directory to the temporary directory 19 | prev_cwd = os.getcwd() 20 | os.chdir(repo_dir) 21 | 22 | # Add a cleanup function to remove the temporary directory after the test 23 | def cleanup(): 24 | os.chdir(prev_cwd) 25 | shutil.rmtree(repo_dir) 26 | 27 | request.addfinalizer(cleanup) 28 | return repo_dir 29 | 30 | 31 | @pytest.fixture(scope="function") 32 | def mock_home_dir(tmp_path, request): 33 | home_dir = Path(tmp_path / "home") 34 | home_dir.mkdir() 35 | 36 | original_home = os.environ.get("HOME") 37 | os.environ["HOME"] = str(home_dir) 38 | 39 | def cleanup(): 40 | if original_home is not None: 41 | os.environ["HOME"] = original_home 42 | else: 43 | del os.environ["HOME"] 44 | shutil.rmtree(home_dir) 45 | 46 | request.addfinalizer(cleanup) 47 | return home_dir 48 | -------------------------------------------------------------------------------- /tests/test_cli_topic.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | import time 4 | 5 | import pytest 6 | from click.testing import CliRunner 7 | 8 | from devchat._cli.main import main 9 | from devchat.utils import get_prompt_hash 10 | 11 | runner = CliRunner() 12 | 13 | 14 | @pytest.mark.skip(reason="No working as expected. However, the cli is outdated and not necessary.") 15 | def test_topic_list(git_repo): 16 | request = "Complete the sequence 1, 1, 3, 5, 9, ( ). Reply the number only." 17 | sys.argv = ["prompt", "--model=gpt-3.5-turbo", request] 18 | result = runner.invoke(main, ["prompt", "--model=gpt-3.5-turbo", request]) 19 | assert result.exit_code == 0 20 | topic1 = get_prompt_hash(result.output) 21 | 22 | time.sleep(3) 23 | 24 | result = runner.invoke(main, ["prompt", "--model=gpt-4", request]) 25 | assert result.exit_code == 0 26 | topic2 = get_prompt_hash(result.output) 27 | 28 | result = runner.invoke(main, ["topic", "--list"]) 29 | assert result.exit_code == 0 30 | topics = json.loads(result.output) 31 | assert len(topics) >= 2 32 | assert topics[0]["root_prompt"]["responses"][0] == "15" 33 | assert topics[1]["root_prompt"]["responses"][0] == "17" 34 | assert topics[0]["root_prompt"]["hash"] == topic2 35 | assert topics[1]["root_prompt"]["hash"] == topic1 36 | -------------------------------------------------------------------------------- /devchat/workspace_util.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Optional 3 | 4 | from .path import USER_CHAT_DIR 5 | 6 | 7 | def _ensure_workspace_chat_dir(workspace_path: str) -> str: 8 | """ 9 | Ensure the workspace chat directory exists and is ignored by git 10 | 11 | return the chat directory path 12 | """ 13 | assert workspace_path, "workspace path is required to create .chat directory" 14 | chat_dir = os.path.join(workspace_path, ".chat") 15 | 16 | if not os.path.exists(chat_dir): 17 | try: 18 | os.makedirs(chat_dir, exist_ok=True) 19 | except FileExistsError: 20 | pass 21 | 22 | # ignore .chat dir in user's workspace 23 | ignore_file = os.path.join(chat_dir, ".gitignore") 24 | ignore_content = "*\n" 25 | if not os.path.exists(ignore_file): 26 | with open(ignore_file, "w") as f: 27 | f.write(ignore_content) 28 | 29 | return chat_dir 30 | 31 | 32 | def get_workspace_chat_dir(workspace_path: Optional[str]) -> str: 33 | """ 34 | Get the chat directory for a workspace 35 | Return user chat directory if workspace is None 36 | """ 37 | workspace_chat_dir = USER_CHAT_DIR 38 | if workspace_path: 39 | workspace_chat_dir = _ensure_workspace_chat_dir(workspace_path) 40 | 41 | return workspace_chat_dir 42 | -------------------------------------------------------------------------------- /devchat/ide/types.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from pydantic import BaseModel 4 | 5 | __all__ = ["Position", "Range", "Location", "SymbolNode", "LocationWithText"] 6 | 7 | 8 | class Position(BaseModel): 9 | line: int # 0-based 10 | character: int # 0-based 11 | 12 | def __repr__(self): 13 | return f"Ln{self.line}:Col{self.character}" 14 | 15 | def __hash__(self): 16 | return hash(self.__repr__()) 17 | 18 | 19 | class Range(BaseModel): 20 | start: Position 21 | end: Position 22 | 23 | def __repr__(self): 24 | return f"{self.start} - {self.end}" 25 | 26 | def __hash__(self): 27 | return hash(self.__repr__()) 28 | 29 | 30 | class Location(BaseModel): 31 | abspath: str 32 | range: Range 33 | 34 | def __repr__(self): 35 | return f"{self.abspath}::{self.range}" 36 | 37 | def __hash__(self): 38 | return hash(self.__repr__()) 39 | 40 | 41 | class SymbolNode(BaseModel): 42 | name: str 43 | kind: str 44 | range: Range 45 | children: List["SymbolNode"] 46 | 47 | 48 | class LocationWithText(BaseModel): 49 | abspath: str 50 | range: Range 51 | text: str 52 | 53 | def __repr__(self): 54 | return f"{self.abspath}::{self.range}::{self.text}" 55 | 56 | def __hash__(self): 57 | return hash(self.__repr__()) 58 | -------------------------------------------------------------------------------- /devchat/workflow/command/list.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import List 3 | 4 | import click 5 | 6 | from devchat.utils import get_logger 7 | from devchat.workflow.namespace import ( 8 | WorkflowMeta, 9 | get_prioritized_namespace_path, 10 | iter_namespace, 11 | ) 12 | 13 | logger = get_logger(__name__) 14 | 15 | 16 | @click.command(help="List all local workflows.", name="list") 17 | @click.option("--json", "in_json", is_flag=True, help="Output in json format.") 18 | def list_cmd(in_json: bool): 19 | namespace_paths = get_prioritized_namespace_path() 20 | 21 | workflows: List[WorkflowMeta] = [] 22 | visited_names = set() 23 | for ns_path in namespace_paths: 24 | ws_names, visited_names = iter_namespace(ns_path, visited_names) 25 | workflows.extend(ws_names) 26 | 27 | if not in_json: 28 | # print basic info 29 | active_count = len([workflow for workflow in workflows if workflow.active]) 30 | total_count = len(workflows) 31 | click.echo(f"workflows (active/total): {active_count}/{total_count}") 32 | for workflow in workflows: 33 | click.echo(workflow) 34 | 35 | else: 36 | # convert workflows to json 37 | # data = [asdict(workflow) for workflow in workflows] 38 | data = [workflow.dict() for workflow in workflows] 39 | json_format = json.dumps(data) 40 | click.echo(json_format) 41 | -------------------------------------------------------------------------------- /workflow/workflows/commit_message/instruct.txt: -------------------------------------------------------------------------------- 1 | As a software developer assistant, your task is to provide clear and concise responses and write commit messages based on given code, requirements, or conversations. 2 | Follow these guidelines: 3 | 4 | 1. A commit message should include a title and multiple body lines. 5 | 2. Adhere to best practices, such as keeping titles under 50 characters and limiting body lines to under 72 characters. 6 | 3. Format all commit messages by enclosing each message within a block of triple backticks (```), and include 'commitmsg' alongside the beginning backticks. 7 | For example: 8 | ```commitmsg 9 | The title 10 | 11 | - The first bullet point. 12 | - The second bullet point. 13 | ``` 14 | 4. Utilize the diff output in the to create the summary. 15 | 5. Utilize the previous messages, if provided in the end of this prompt, to improve the title or bullet points by clearly conveying the intention of code changes. 16 | Note that not all previous messages are necessarily relevant. 17 | For example, disregard any previous commit messages you have written. 18 | You may encounter duplicate or conflicting messages, and the later messages should be considered as the most accurate. 19 | 6. Prioritize the diff output in the given and focus on actual code changes. 20 | Disregard any previous messages unrelated to the diff output. 21 | 22 | If you need more information, ask for it. 23 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from devchat.utils import parse_files 4 | 5 | 6 | def test_parse_files_empty_input(): 7 | assert not parse_files([]) 8 | 9 | 10 | def test_parse_files_nonexistent_file(): 11 | with pytest.raises(ValueError, match="File .* does not exist."): 12 | parse_files(["nonexistent_file.txt"]) 13 | 14 | 15 | def test_parse_files_empty_file(tmpdir): 16 | empty_file = tmpdir.join("empty_file.txt") 17 | empty_file.write("") 18 | 19 | with pytest.raises(ValueError, match="File .* is empty."): 20 | parse_files([str(empty_file)]) 21 | 22 | 23 | def test_parse_files_single_file(tmpdir): 24 | file1 = tmpdir.join("file1.txt") 25 | file1.write("Hello, World!") 26 | 27 | assert parse_files([str(file1)]) == ["Hello, World!"] 28 | 29 | 30 | def test_parse_files_multiple_files(tmpdir): 31 | file1 = tmpdir.join("file1.txt") 32 | file1.write("Hello, World!") 33 | file2 = tmpdir.join("file2.txt") 34 | file2.write("This is a test.") 35 | 36 | assert parse_files([str(file1), str(file2)]) == ["Hello, World!", "This is a test."] 37 | 38 | 39 | def test_parse_files_invalid_path(tmpdir): 40 | file1 = tmpdir.join("file1.txt") 41 | file1.write("Hello, World!") 42 | invalid_path = "invalid/path/file2.txt" 43 | 44 | with pytest.raises(ValueError, match="File .* does not exist."): 45 | parse_files(f"{file1},{invalid_path}") 46 | -------------------------------------------------------------------------------- /devchat/msg/schema.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Optional 2 | 3 | from pydantic import BaseModel, Field 4 | 5 | 6 | class MessageRequest(BaseModel): 7 | content: str = Field(..., description="message content") 8 | model_name: str = Field(..., description="LLM model name") 9 | workspace: Optional[str] = Field(None, description="absolute path to the workspace/repository") 10 | api_key: Optional[str] = Field(None, description="API key (OpenAI API key or DevChat Key)") 11 | api_base: Optional[str] = Field(None, description="API base url") 12 | parent: Optional[str] = Field(None, description="parent message hash in a thread") 13 | context: Optional[List[str]] = Field(None, description="paths to context files") 14 | 15 | 16 | class MessageResponseChunk(BaseModel): 17 | # TODO: add response hash 18 | # response_hash: str = Field( 19 | # ..., 20 | # description="response hash, all chunks in a response should have the same hash", 21 | # ) 22 | user: str = Field(..., description="user info") 23 | date: str = Field(..., description="date time") 24 | content: str = Field(..., description="chunk content") 25 | finish_reason: str = Field(default="", description="finish reason") 26 | # TODO: should handle isError in another way? 27 | isError: bool = Field(default=False, description="is error") 28 | extra: Dict = Field(default_factory=dict, description="extra data") 29 | -------------------------------------------------------------------------------- /devchat/_service/route/logs.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, HTTPException, status 2 | 3 | from devchat._service.schema import request, response 4 | from devchat.msg.log_util import delete_log_prompt, gen_log_prompt, insert_log_prompt 5 | 6 | router = APIRouter() 7 | 8 | 9 | @router.post("/insert", response_model=response.InsertLog) 10 | def insert( 11 | item: request.InsertLog, 12 | ): 13 | try: 14 | prompt = gen_log_prompt(item.jsondata, item.filepath) 15 | prompt_hash = insert_log_prompt(prompt, item.workspace) 16 | except Exception as e: 17 | detail = f"Failed to insert log: {str(e)}" 18 | raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=detail) 19 | return response.InsertLog(hash=prompt_hash) 20 | 21 | 22 | @router.post("/delete", response_model=response.DeleteLog) 23 | def delete( 24 | item: request.DeleteLog, 25 | ): 26 | try: 27 | success = delete_log_prompt(item.hash, item.workspace) 28 | if not success: 29 | detail = f"Failed to delete log <{item.hash}>. Log not found or is not a leaf." 30 | raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=detail) 31 | 32 | except Exception as e: 33 | detail = f"Failed to delete log <{item.hash}>: {str(e)}" 34 | raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=detail) 35 | 36 | return response.DeleteLog(success=success) 37 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from click.testing import CliRunner 4 | 5 | from devchat.config import ChatConfig, ConfigManager, GeneralModelConfig 6 | 7 | runner = CliRunner() 8 | 9 | 10 | def test_create_sample_config(tmp_path): 11 | ConfigManager(tmp_path) 12 | assert os.path.exists(os.path.join(tmp_path, "config.yml")) 13 | 14 | 15 | def test_load_and_validate_config(tmp_path): 16 | config_manager = ConfigManager(tmp_path) 17 | assert isinstance(config_manager.config, ChatConfig) 18 | 19 | 20 | def test_get_model_config(tmp_path): 21 | config_manager = ConfigManager(tmp_path) 22 | _, config = config_manager.model_config("gpt-4") 23 | assert config.max_input_tokens == 6000 24 | assert config.temperature == 0 25 | assert config.stream is True 26 | 27 | 28 | def test_update_model_config(tmp_path): 29 | model = "gpt-4" 30 | config_manager = ConfigManager(tmp_path) 31 | config = GeneralModelConfig(max_input_tokens=7000, temperature=0.5) 32 | updated_model_config = config_manager.update_model_config(model, config) 33 | assert updated_model_config == config_manager.model_config(model)[1] 34 | assert updated_model_config.max_input_tokens == 7000 35 | assert updated_model_config.temperature == 0.5 36 | assert updated_model_config.stream is True 37 | 38 | config_manager.sync() 39 | fresh_config_manager = ConfigManager(tmp_path) 40 | assert config_manager.config == fresh_config_manager.config 41 | -------------------------------------------------------------------------------- /devchat/_service/schema/request.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from pydantic import BaseModel, Field 4 | 5 | 6 | class UserMessage(BaseModel): 7 | content: str = Field(..., description="message content") 8 | model_name: str = Field(..., description="LLM model name") 9 | workspace: Optional[str] = Field(None, description="absolute path to the workspace/repository") 10 | api_key: Optional[str] = Field(None, description="API key (OpenAI API key or DevChat Key)") 11 | api_base: Optional[str] = Field(None, description="API base url") 12 | parent: Optional[str] = Field(None, description="parent message hash in a thread") 13 | context: Optional[List[str]] = Field(None, description="paths to context files") 14 | 15 | 16 | class InsertLog(BaseModel): 17 | workspace: Optional[str] = Field(None, description="absolute path to the workspace/repository") 18 | jsondata: Optional[str] = Field(None, description="data to insert in json format") 19 | filepath: Optional[str] = Field(None, description="file path to insert data in json format") 20 | 21 | 22 | class DeleteLog(BaseModel): 23 | hash: str = Field(..., description="hash of the prompt to delete") 24 | workspace: Optional[str] = Field(None, description="absolute path to the workspace/repository") 25 | 26 | 27 | class DeleteTopic(BaseModel): 28 | topic_hash: str = Field(..., description="hash of the topic to delete") 29 | workspace: Optional[str] = Field(None, description="absolute path to the workspace/repository") 30 | -------------------------------------------------------------------------------- /devchat/_service/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI 2 | from fastapi.middleware.cors import CORSMiddleware 3 | 4 | from devchat._service.config import config 5 | from devchat._service.route import router 6 | from devchat._service.uvicorn_logging import setup_logging 7 | 8 | api_app = FastAPI( 9 | title="DevChat Local Service", 10 | ) 11 | # 配置 CORS 12 | api_app.add_middleware( 13 | CORSMiddleware, 14 | allow_origins=["*"], # 允许所有源进行跨域请求 15 | allow_credentials=True, 16 | allow_methods=["*"], # 允许所有 HTTP 方法(如 GET、POST 等) 17 | allow_headers=["*"], # 允许所有请求头 18 | ) 19 | 20 | api_app.include_router(router) 21 | 22 | 23 | # app = socketio.ASGIApp(sio_app, api_app, socketio_path="devchat.socket") 24 | 25 | # NOTE: some references if we want to use socketio with FastAPI in the future 26 | 27 | # https://www.reddit.com/r/FastAPI/comments/170awhx/mount_socketio_to_fastapi/ 28 | # https://github.com/miguelgrinberg/python-socketio/blob/main/examples/server/asgi/fastapi-fiddle.py 29 | 30 | 31 | def main(): 32 | # Use uvicorn to run the app because gunicorn doesn't support Windows 33 | from uvicorn import Config, Server 34 | 35 | server = Server( 36 | Config( 37 | api_app, 38 | host="0.0.0.0", 39 | port=config.PORT, 40 | ), 41 | ) 42 | 43 | # setup logging last, to make sure no library overwrites it 44 | # (they shouldn't, but it happens) 45 | setup_logging() 46 | server.run() 47 | 48 | 49 | if __name__ == "__main__": 50 | main() 51 | -------------------------------------------------------------------------------- /devchat/msg/util.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from enum import Enum 3 | from typing import Any, Tuple 4 | 5 | from devchat.utils import unix_to_local_datetime, user_id 6 | from devchat.workflow.workflow import Workflow 7 | 8 | from .user_info import user_info 9 | 10 | 11 | class MessageType(Enum): 12 | """ 13 | Enum for message types 14 | """ 15 | 16 | CHATTING = "chatting" # chat with LLM directly 17 | WORKFLOW = "workflow" # trigger a workflow 18 | 19 | 20 | def mk_meta() -> Tuple[str, str]: 21 | """ 22 | Make metadata for a response 23 | """ 24 | name = user_info.name 25 | email = user_info.email 26 | user_str, _ = user_id(name, email) 27 | 28 | _timestamp = datetime.timestamp(datetime.now()) 29 | _local_time = unix_to_local_datetime(_timestamp) 30 | date_str = _local_time.strftime("%a %b %d %H:%M:%S %Y %z") 31 | 32 | return user_str, date_str 33 | 34 | 35 | def route_message_by_content(message_content: str) -> Tuple[MessageType, Any]: 36 | """ 37 | Route the message to the correct handler 38 | 1. trigger a workflow 39 | 2. chat with LLM directly 40 | """ 41 | content = message_content 42 | 43 | wf_name, wf_input = Workflow.parse_trigger(content) 44 | workflow = Workflow.load(wf_name) if wf_name else None 45 | 46 | if workflow: 47 | # the message should be handled by the workflow engine 48 | return MessageType.WORKFLOW, (workflow, wf_name, wf_input) 49 | 50 | else: 51 | # chat with LLM directly 52 | return MessageType.CHATTING, None 53 | -------------------------------------------------------------------------------- /devchat/workflow/path.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # ------------------------------- 4 | # devchat basic paths 5 | # ------------------------------- 6 | USE_DIR = os.path.expanduser("~") 7 | CHAT_DIR = os.path.join(USE_DIR, ".chat") 8 | CHAT_CONFIG_FILENAME = "config.yml" 9 | 10 | 11 | # ------------------------------- 12 | # workflow scripts paths 13 | # ------------------------------- 14 | WORKFLOWS_BASE_NAME = "scripts" 15 | WORKFLOWS_BASE = os.path.join(CHAT_DIR, WORKFLOWS_BASE_NAME) # TODO: a temporary name 16 | WORKFLOWS_CONFIG_FILENAME = "config.yml" # Under WORKFLOWS_BASE 17 | 18 | MERICO_WORKFLOWS = os.path.join(WORKFLOWS_BASE, "merico") 19 | COMMUNITY_WORKFLOWS = os.path.join(WORKFLOWS_BASE, "community") 20 | 21 | COMMAND_FILENAMES = ["command.yml", "command.yaml"] 22 | 23 | 24 | # ------------------------------- 25 | # workflow related cache data 26 | # ------------------------------- 27 | CACHE_DIR = os.path.join(WORKFLOWS_BASE, "cache") 28 | ENV_CACHE_DIR = os.path.join(CACHE_DIR, "env_cache") 29 | os.makedirs(ENV_CACHE_DIR, exist_ok=True) 30 | 31 | 32 | # ------------------------------- 33 | # config & settings files paths 34 | # ------------------------------- 35 | CUSTOM_BASE = os.path.join(WORKFLOWS_BASE, "custom") 36 | CUSTOM_CONFIG_FILE = os.path.join(CUSTOM_BASE, "config.yml") 37 | USER_SETTINGS_FILENAME = "user_settings.yml" # Under WORKFLOWS_BASE 38 | 39 | 40 | # ------------------------------- 41 | # Python environments paths 42 | # ------------------------------- 43 | MAMBA_ROOT = os.path.join(CHAT_DIR, "mamba") 44 | MAMBA_PY_ENVS = os.path.join(MAMBA_ROOT, "envs") 45 | -------------------------------------------------------------------------------- /devchat/llm/text_confirm.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E402 2 | from functools import wraps 3 | 4 | from devchat.chatmark import Checkbox, Form, TextEditor 5 | 6 | 7 | class MissEditConfirmFieldException(Exception): 8 | pass 9 | 10 | 11 | def edit_confirm(response): 12 | need_regenerate = Checkbox(["Need Regenerate"]) 13 | edit_text = TextEditor(response) 14 | feedback_text = TextEditor("") 15 | confirmation_form = Form( 16 | [ 17 | "Edit AI Response:", 18 | edit_text, 19 | "Need Regenerate?", 20 | need_regenerate, 21 | "Feedback if Regenerate:", 22 | feedback_text, 23 | ] 24 | ) 25 | confirmation_form.render() 26 | if len(need_regenerate.selections) > 0: 27 | return True, feedback_text.new_text 28 | return False, edit_text.new_text 29 | 30 | 31 | def llm_edit_confirm(edit_confirm_fun=edit_confirm): 32 | def decorator(func): 33 | @wraps(func) 34 | def wrapper(*args, **kwargs): 35 | nonlocal edit_confirm_fun 36 | if not edit_confirm_fun: 37 | raise MissEditConfirmFieldException() 38 | 39 | while True: 40 | response = func(*args, **kwargs) 41 | if not response: 42 | return response 43 | 44 | do_regenerate, new_response = edit_confirm_fun(response) 45 | if do_regenerate: 46 | kwargs["__user_request__"] = {"role": "user", "content": new_response} 47 | else: 48 | return new_response if new_response else response 49 | 50 | return wrapper 51 | 52 | return decorator 53 | -------------------------------------------------------------------------------- /devchat/engine/recursive_prompter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | 4 | from .namespace import Namespace 5 | 6 | 7 | class RecursivePrompter: 8 | def __init__(self, namespace: Namespace): 9 | self.namespace = namespace 10 | 11 | def run(self, name: str) -> str: 12 | ancestors = name.split(".") 13 | merged_content = "" 14 | for index in range(len(ancestors)): 15 | ancestor_name = ".".join(ancestors[: index + 1]) 16 | file_path = self.namespace.get_file(ancestor_name, "prompt.txt") 17 | if file_path: 18 | with open(file_path, "r", encoding="utf-8") as file: 19 | prompt_content = file.read() 20 | # replace @file@ with the content of the file 21 | prompt_content = self._replace_file_references(file_path, prompt_content) 22 | merged_content += prompt_content 23 | merged_content += "\n" 24 | 25 | return merged_content 26 | 27 | def _replace_file_references(self, prompt_file_path: str, content: str) -> str: 28 | # prompt_file_path is the path to the file that contains the content 29 | # @relative file path@: file is relative to the prompt_file_path 30 | pattern = re.compile(r"@(.+?)@") 31 | matches = pattern.findall(content) 32 | for match in matches: 33 | file_path = os.path.join(os.path.dirname(prompt_file_path), match) 34 | if os.path.exists(file_path): 35 | with open(file_path, "r", encoding="utf-8") as file: 36 | file_content = file.read() 37 | content = content.replace(f"@{match}@", file_content) 38 | return content 39 | -------------------------------------------------------------------------------- /workflow/workflows/code/instruct.txt: -------------------------------------------------------------------------------- 1 | As a software developer assistant, your tasks are to: 2 | 3 | - Provide a clear and concise response to address the user's requirements. 4 | - Write code and give advice based on given code or information in the if provided. 5 | - Follow language-specific best practices and common coding standards. 6 | 7 | When responding: 8 | 9 | 1. First summarize the requirements or provided information in your own words. 10 | The summary should better be written in bullet points (excluding code). 11 | 2. When modifying the provided code, include the entire modified functions, but exclude any unmodified functions. 12 | If any global statements are changed, include the full global statements; otherwise, do not include them. 13 | 3. Enclose code or changes within blocks using triple backticks (```), and include the programming language and the file path. 14 | For example: 15 | ```python path=./path/to/file.py 16 | print("Hello, World!") 17 | ``` 18 | Do your best to deduce the file path based on the given or previous messages. 19 | If you are still uncertain about the file path of the code, feel free to omit it. 20 | 4. Use separate code blocks for different files. 21 | 5. When providing a suggestion or instruction, begin by explaining the reason behind it. 22 | 6. You may not receive all the direct information needed for your task. 23 | Analyze the given to understand how existing code was written, and use this knowledge for your task. 24 | 7. Note that not all previous messages or contexts are necessarily relevant. 25 | 8. Respond in the language of the request. 26 | You may encounter duplicate or conflicting messages or contexts, and the later ones should be considered as the most accurate. 27 | 28 | If you need more information, ask for it. 29 | -------------------------------------------------------------------------------- /devchat/memory/fixsize_memory.py: -------------------------------------------------------------------------------- 1 | from .base import ChatMemory 2 | 3 | 4 | class FixSizeChatMemory(ChatMemory): 5 | """ 6 | FixSizeChatMemory is a memory class that stores up 7 | to a fixed number of requests and responses. 8 | """ 9 | 10 | def __init__(self, max_size: int = 5, messages=None, system_prompt=None): 11 | """ 12 | init the memory 13 | """ 14 | super().__init__() 15 | self._max_size = max_size 16 | # store last max_size messages 17 | if messages is None: 18 | messages = [] 19 | 20 | self._messages = messages[-max_size * 2 :] 21 | self._system_prompt = system_prompt 22 | 23 | def append(self, request, response): 24 | """ 25 | Append a request and response to the memory. 26 | """ 27 | self._messages.append(request) 28 | self._messages.append(response) 29 | if len(self._messages) > self._max_size * 2: 30 | self._messages = self._messages[-self._max_size * 2 :] 31 | 32 | def append_request(self, request): 33 | """ 34 | Append a request to the memory. 35 | """ 36 | self._messages.append(request) 37 | 38 | def append_response(self, response): 39 | """ 40 | Append a response to the memory. 41 | """ 42 | self._messages.append(response) 43 | if len(self._messages) > self._max_size * 2: 44 | self._messages = self._messages[-self._max_size * 2 :] 45 | 46 | def contexts(self): 47 | """ 48 | Return the contexts of the memory. 49 | """ 50 | messages = self._messages.copy() 51 | # insert system prompt at the beginning 52 | if self._system_prompt: 53 | messages = [{"role": "system", "content": self._system_prompt}] + messages 54 | return messages 55 | -------------------------------------------------------------------------------- /devchat/_service/uvicorn_logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | from loguru import logger 6 | 7 | from devchat._service.config import config 8 | from devchat.workspace_util import get_workspace_chat_dir 9 | 10 | 11 | class InterceptHandler(logging.Handler): 12 | def emit(self, record): 13 | # get corresponding Loguru level if it exists 14 | try: 15 | level = logger.level(record.levelname).name 16 | except ValueError: 17 | level = record.levelno 18 | 19 | # find caller from where originated the logged message 20 | frame, depth = sys._getframe(6), 6 21 | while frame and frame.f_code.co_filename == logging.__file__: 22 | frame = frame.f_back 23 | depth += 1 24 | 25 | logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage()) 26 | 27 | 28 | def setup_logging(): 29 | # intercept everything at the root logger 30 | logging.root.handlers = [InterceptHandler()] 31 | logging.root.setLevel(config.LOG_LEVEL) 32 | 33 | # remove every other logger's handlers 34 | # and propagate to root logger 35 | for name in logging.root.manager.loggerDict.keys(): 36 | logging.getLogger(name).handlers = [] 37 | logging.getLogger(name).propagate = True 38 | 39 | workspace_chat_dir = get_workspace_chat_dir(config.WORKSPACE) 40 | log_file = os.path.join(workspace_chat_dir, config.LOG_FILE) 41 | 42 | # configure loguru 43 | logger.configure( 44 | handlers=[ 45 | {"sink": sys.stdout, "serialize": config.JSON_LOGS}, 46 | { 47 | "sink": log_file, 48 | "serialize": config.JSON_LOGS, 49 | "rotation": "10 days", 50 | "retention": "30 days", 51 | "enqueue": True, 52 | }, 53 | ] 54 | ) 55 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "devchat" 3 | version = "0.3.0" 4 | description = "DevChat is an open-source tool that helps developers write prompts to generate code and documentation." 5 | authors = ["DevChat Team "] 6 | license = "Apache License 2.0" 7 | readme = "README.md" 8 | homepage = "https://github.com/devchat-ai/devchat" 9 | classifiers = [ 10 | "Development Status :: 4 - Beta", 11 | "License :: OSI Approved :: Apache Software License", 12 | "Programming Language :: Python :: 3", 13 | "Intended Audience :: Developers", 14 | "Intended Audience :: Education", 15 | "Intended Audience :: Information Technology", 16 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 17 | "Topic :: Software Development" 18 | ] 19 | 20 | [tool.poetry.dependencies] 21 | python = "^3.8" 22 | pydantic = "1.10.16" 23 | networkx = "^3.1" 24 | openai = "1.35.15" 25 | rich_click = "^1.6.1" 26 | tiktoken = ">0.4.0" 27 | tinydb = "^4.7.1" 28 | urllib3 = "<2.0" 29 | importlib-metadata = "^6.8.0" 30 | gitpython = "^3.1.32" 31 | oyaml = "^1.0" 32 | colorama = "^0.4.6" 33 | tenacity = "^8.2.3" 34 | pathspec = "^0.12.1" 35 | importlib-resources = "^6.1.1" 36 | fastapi = "^0.111.0" 37 | uvicorn = {extras = ["standard"], version = "^0.30.1"} 38 | gunicorn = "^22.0.0" 39 | loguru = "^0.7.2" 40 | win32-setctime = "^1.1.0" 41 | virtualenv = "^20.27.1" 42 | 43 | [tool.poetry.scripts] 44 | devchat = "devchat._cli.main:main" 45 | 46 | [tool.poetry.group.dev.dependencies] 47 | pytest = "^7.4.0" 48 | ruff = "^0.4.4" 49 | 50 | [build-system] 51 | requires = ["poetry-core>=1.0.0"] 52 | build-backend = "poetry.core.masonry.api" 53 | 54 | [tool.ruff] 55 | target-version = "py38" 56 | line-length = 100 57 | 58 | [tool.ruff.lint] 59 | select = [ 60 | "E", # Error 61 | "W", # Warning 62 | "F", # pyflakes 63 | "I", # isort 64 | ] 65 | fixable = ["ALL"] 66 | 67 | -------------------------------------------------------------------------------- /devchat/chat.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Iterator 3 | 4 | from devchat.prompt import Prompt 5 | 6 | 7 | class Chat(ABC): 8 | """ 9 | Chat interface for managing chat-related interactions. 10 | 11 | This interface defines methods for prompting a chat system with 12 | a list of Message objects and retrieving responses, either as a 13 | complete response or as a streaming response. 14 | """ 15 | 16 | @abstractmethod 17 | def init_prompt(self, request: str) -> Prompt: 18 | """ 19 | Initialize a prompt for the chat system. 20 | 21 | Args: 22 | request (str): The basic request of the prompt. 23 | The returned prompt can be combined with more instructions and context. 24 | """ 25 | 26 | @abstractmethod 27 | def load_prompt(self, data: dict) -> Prompt: 28 | """ 29 | Load a prompt from a dictionary. 30 | 31 | Args: 32 | data (dict): The dictionary containing the prompt data. 33 | """ 34 | 35 | @abstractmethod 36 | def complete_response(self, prompt: Prompt) -> str: 37 | """ 38 | Retrieve a complete response JSON string from the chat system. 39 | 40 | Args: 41 | prompt (Prompt): A prompt of messages representing the conversation. 42 | Returns: 43 | str: A JSON string representing the complete response. 44 | """ 45 | 46 | @abstractmethod 47 | def stream_response(self, prompt: Prompt) -> Iterator: 48 | """ 49 | Retrieve a streaming response as an iterator of JSON strings from the chat system. 50 | 51 | Args: 52 | prompt (Prompt): A prompt of messages representing the conversation. 53 | Returns: 54 | Iterator: An iterator over JSON strings (to be converted to) representing the response. 55 | """ 56 | -------------------------------------------------------------------------------- /devchat/workflow/update_flowchart.md: -------------------------------------------------------------------------------- 1 | 2 | ```mermaid 3 | flowchart TD 4 | A([Start]) --> B{is git installed?} 5 | B -->|No| C(Update by downloading zip file) 6 | C --> E{is workflow_base/ dir exists?} 7 | E -->|No| F(Try to download latest zip \nand extract it to workflow_base/ ) 8 | F --> H{downloaded and extracted successfully?} 9 | H -->|No| I([Failed to update. \nWorkflow is unavailable.\nShould try again later.]) 10 | H -->|Yes| J([Update completed]) 11 | E -->|Yes| G(Try to download latest zip \nand extract it to workflow_base_new/) 12 | G --> K{downloaded and extracted successfully?} 13 | K -->|No| L([Skip update this time.]) 14 | K -->|Yes| M(1. Archive workflow_base/ to the backup dir\n2. Rename workflow_base_new/ to workflow_base/) 15 | M --> N([Update completed]) 16 | 17 | 18 | B -->|Yes| D(Update by git command) 19 | D --> D1{is workflow_base/ dir exists?} 20 | D1 -->|No| D2(Try to clone the repo to workflow_base/) 21 | D2 --> D3{cloned successfully?} 22 | D3 -->|No| D4([Failed to update. \nWorkflow is unavailable.\nShould try again later.]) 23 | D3 -->|Yes| D5([Update completed]) 24 | 25 | D1 -->|Yes| D6{is workflow_base/ a git repo?} 26 | D6 -->|No| D7(Try to clone the repo to workflow_base_new/) 27 | D7 --> D8{cloned successfully?} 28 | D8 -->|No| D9([Skip update this time.]) 29 | D8 -->|Yes| D10(1. Archive workflow_base/ to the backup dir\n2. Rename workflow_base_new/ to workflow_base/) 30 | D10 --> D11([Update completed]) 31 | 32 | D6 -->|Yes| D12{is the repo on main branch?} 33 | D12 -->|No| D13([Skip update because it is on dev branch.]) 34 | D12 -->|Yes| D14(1. Archive current workflow_base/ to the backup dir\n2. Try to pull the latest main) 35 | D14 --> D15{pulled successfully?} 36 | D15 -->|No| D16([Skip update this time.]) 37 | D15 -->|Yes| D17([Update completed]) 38 | ``` 39 | -------------------------------------------------------------------------------- /README.zh.md: -------------------------------------------------------------------------------- 1 | DevChat 开源智能 IDE 插件 2 | = 3 | 用自然语言生成智能工作流,以知识工程完成 AI 落地的最后一公里 4 | - 5 | 6 | 虽然 GitHub Copilot、Cursor、Cline 让编码环节变得越来越智能,虽然 Dify、Flowise、扣子让工作流可以“拖拉拽”地实现,但是我们每天仍然犹如涉入了一片又一片”无AI”的海,在研发多种多样的**繁琐流程中,心累地扑腾。** 7 | 8 | 有人说,企业驳杂的定制要求把开发工具适配变成泥潭;也有“动手党”为了大大小小的个性化忙得不亦乐乎。 9 | **每个研发团队都有自己的性格,都值得让 AI 贴身服务,而落地这些不必很辛苦。** 10 | 11 | 我们打造 DevChat 开源社区,**帮助每一位开发者轻松迈过 AI 落地研发的最后一公里!** 12 | 13 | ## 核心特性 14 | 15 | 我们为 DevChat 赋予了两个核心。 16 | 17 | ### ❤️ 极简私人定制,几句话创建专属工作流 18 | 19 | - 告别“拖拉拽”手撸工作流的死板方式和学习成本,只用几句话描述就能轻松生成**智能工作流**,辅助或代劳程序员完成各类任务——不论是提交一个内容规范的 GitLab MR,还是生成可执行的 API 自动测试用例;抑或是你想让 AI 边干活边语音通报进展这样的细节。 20 | - 开源开放社区共建,逐步积累丰富的智能工作流库,从获得**丰富 IDE 上下文**的插件,到各种意想不到的**自主智能体**,总有一款“仙家法器”适合你。 21 | - 多层次定制能力,既支持企业级统一要求(如代码规范),也能适配团队或个人的工具、流程和习惯。基于目录和 Git 等现有基础设施实现,秉承极简设计,不引入冗余的管理系统。 22 | 23 | ### ❤️ 最懂私域知识,以知识工程理解软件研发 24 | 25 | - **集成知识图谱能力**,支持多样语义查询,查询前静态构建与查询时动态构建相结合,兼顾最佳效果与性能表现。 26 | - 针对具体场景分类知识,**增强 AI 生成效果。** 27 | - 例如通过分析 API 文档中所有接口、参数、功能间的各种关系,让 AI 自主测试能够组合多个 API 生成用例,减少探索步骤,提升最终测试脚本的准确性。 28 | 29 | ## 设计选择 30 | 31 | 从第一性原理出发,DevChat 是你**长期的最佳选择**。 32 | 33 | - 仅让智能体自由发挥是不够的。这不是基础大模型能力的问题,而是个人和组织经验如何传递的问题。长期来看,我们可以假设大模型的智能达到人类水平,而人类又是如何达到高效的生产力呢?个体需要逐步积累经验,组织需要逐步形成流程。只有将这些隐性知识与 AI 结合,才可能实现研发效能的提升。工作流正是**隐性知识显性化**的途径。我们相信,**工作流的积累是 AI 生产力工具沉淀价值的主要形式**。 34 | - 智能体仅接入工具是不够的。我们将智能体视为可自主规划和行动的工作流。虽然接入各种所需工具是智能体行动的必要条件,但更重要的是**为智能体提供完成任务所需的高质量私域知识**。与把重心放在工具生态的 IDE 不同,我们致力于打造先进的知识工程能力,让 DevChat 成为最懂你的私人助理。 35 | 36 | 🤝 诚挚邀请有共同技术愿景的开发者伙伴们加入我们的社区:[GitHub](https://github.com/devchat-ai/workflows) & [Discord](https://discord.com/invite/JNyVGz8y)! 37 | 38 | ## 其他功能 39 | 40 | - IDE 插件基础功能: 41 | - 代码生成和自动补全; 42 | - 辅助代码理解与编辑; 43 | - 在项目上下文中进行高效 AI 问答。 44 | - 支持全球最新大模型,一次充值用所有: 45 | - GPT-4o/o1; 46 | - Claude 3.5/3.7 Sonnet; 47 | - DeepSeek-V3/R1; 48 | - Llama 3.3: 70B; 49 | - Qwen2.5-Turbo…… 50 | - 集成自主接口测试: 51 | - 上传 API 文档,一键获得可执行的用例和脚本; 52 | - 全程 AI 自主完成,极少人工干预; 53 | - 支持多接口联动、数据校验,多角度构造复杂场景用例。 54 | 55 | -------------------------------------------------------------------------------- /devchat/workflow/schema.py: -------------------------------------------------------------------------------- 1 | import re 2 | from typing import Dict, List, Optional, Union 3 | 4 | from pydantic import BaseModel, Extra, ValidationError, validator 5 | 6 | 7 | class WorkflowPyConf(BaseModel): 8 | version: Optional[str] # python version 9 | dependencies: str # absolute path to the requirements file 10 | env_name: Optional[str] # python env name, will use the workflow name if not set 11 | 12 | @validator("version") 13 | def validate_version(cls, value): 14 | pattern = r"^\d+\.\d+(\.\d+)?$" 15 | if not re.match(pattern, value): 16 | raise ValidationError( 17 | f"Invalid version format: {value}. Expected format is x.y or x.y.z" 18 | ) 19 | return value 20 | 21 | 22 | class ExternalPyConf(BaseModel): 23 | env_name: str # the env_name of workflow python to act as 24 | python_bin: str # the python executable path 25 | 26 | 27 | class UserSettings(BaseModel): 28 | external_workflow_python: List[ExternalPyConf] = [] 29 | 30 | class Config: 31 | extra = Extra.ignore 32 | 33 | 34 | class WorkflowConfig(BaseModel): 35 | description: str 36 | root_path: str # the root path of the workflow 37 | steps: List[Dict] 38 | input_required: bool = False # True for required 39 | hint: Optional[str] = None 40 | workflow_python: Optional[WorkflowPyConf] = None 41 | help: Optional[Union[str, Dict[str, str]]] = None 42 | 43 | @validator("input_required", pre=True) 44 | def to_boolean(cls, value): 45 | return value.lower() == "required" 46 | 47 | class Config: 48 | extra = Extra.ignore 49 | 50 | 51 | class RuntimeParameter(BaseModel): 52 | model_name: str 53 | devchat_python: str 54 | workflow_python: str = "" 55 | user_input: Optional[str] 56 | history_messages: Optional[Dict] 57 | parent_hash: Optional[str] 58 | 59 | class Config: 60 | extra = Extra.allow 61 | -------------------------------------------------------------------------------- /devchat/engine/command_parser.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Dict, List, Optional 3 | 4 | import oyaml as yaml 5 | from pydantic import BaseModel 6 | 7 | from .namespace import Namespace 8 | 9 | 10 | class Parameter(BaseModel): 11 | type: str = "string" 12 | description: Optional[str] = None 13 | enum: Optional[List[str]] = None 14 | default: Optional[str] = None 15 | 16 | 17 | class Command(BaseModel): 18 | description: str 19 | hint: Optional[str] = None 20 | parameters: Optional[Dict[str, Parameter]] = None 21 | input: Optional[str] = None 22 | steps: Optional[List[Dict[str, str]]] = None 23 | path: Optional[str] = None 24 | 25 | 26 | class CommandParser: 27 | def __init__(self, namespace: Namespace): 28 | self.namespace = namespace 29 | 30 | def parse(self, name: str) -> Command: 31 | """ 32 | Parse a command configuration file to JSON. 33 | 34 | :param name: The command name in the namespace. 35 | :return: The JSON representation of the command. 36 | """ 37 | file_path = self.namespace.get_file(name, "command.yml") 38 | if not file_path: 39 | return None 40 | return parse_command(file_path) 41 | 42 | 43 | def parse_command(file_path: str) -> Command: 44 | """ 45 | Parse and validate a YAML configuration file. 46 | 47 | :param file_path: The path to the configuration file. 48 | :return: The validated configuration as a Pydantic model. 49 | """ 50 | # get path from file_path, /xx1/xx2/xx3.py => /xx1/xx2 51 | config_dir = os.path.dirname(file_path) 52 | 53 | with open(file_path, "r", encoding="utf-8") as file: 54 | # replace {curpath} with config_dir 55 | content = file.read().replace("$command_path", config_dir.replace("\\", "/")) 56 | config_dict = yaml.safe_load(content) 57 | config = Command(**config_dict) 58 | config.path = file_path 59 | return config 60 | -------------------------------------------------------------------------------- /devchat/engine/router.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import List 3 | 4 | from .command_runner import CommandRunner 5 | from .namespace import Namespace 6 | from .recursive_prompter import RecursivePrompter 7 | from .util import CommandUtil 8 | 9 | 10 | def load_workflow_instruction(user_input: str): 11 | user_input = user_input.strip() 12 | if len(user_input) == 0: 13 | return None 14 | if user_input[:1] != "/": 15 | return None 16 | 17 | workflows_dir = os.path.join(os.path.expanduser("~/.chat"), "workflows") 18 | if not os.path.exists(workflows_dir): 19 | return None 20 | if not os.path.isdir(workflows_dir): 21 | return None 22 | 23 | namespace = Namespace(workflows_dir) 24 | prompter = RecursivePrompter(namespace) 25 | 26 | command_name = user_input.split()[0][1:] 27 | command_prompts = prompter.run(command_name) 28 | 29 | return [command_prompts] 30 | 31 | 32 | def run_command( 33 | model_name: str, history_messages: List[dict], input_text: str, parent_hash: str, auto_fun: bool 34 | ): 35 | """ 36 | load command config, and then run Command 37 | """ 38 | # split input_text by ' ','\n','\t' 39 | if len(input_text.strip()) == 0: 40 | return None 41 | if input_text.strip()[:1] != "/": 42 | if not (auto_fun and model_name.startswith("gpt-")): 43 | return None 44 | 45 | # TODO 46 | # use auto select workflow to run command 47 | return None 48 | 49 | commands = input_text.split() 50 | command = commands[0][1:] 51 | 52 | command_obj = CommandUtil.load_command(command) 53 | if not command_obj or not command_obj.steps: 54 | return None 55 | 56 | runner = CommandRunner(model_name) 57 | return runner.run_command( 58 | command_name=command, 59 | command=command_obj, 60 | history_messages=history_messages, 61 | input_text=input_text, 62 | parent_hash=parent_hash, 63 | ) 64 | -------------------------------------------------------------------------------- /devchat/msg/chatting.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Iterator, List, Optional 3 | 4 | from devchat._cli.utils import get_model_config 5 | from devchat.assistant import Assistant 6 | from devchat.openai.openai_chat import OpenAIChat, OpenAIChatConfig 7 | from devchat.path import USER_CHAT_DIR 8 | from devchat.store import Store 9 | from devchat.utils import parse_files 10 | from devchat.workspace_util import get_workspace_chat_dir 11 | 12 | 13 | def _get_model_and_config(model: Optional[str], config_str: Optional[str]): 14 | model, config = get_model_config(USER_CHAT_DIR, model) 15 | 16 | parameters_data = config.dict(exclude_unset=True) 17 | if config_str: 18 | config_data = json.loads(config_str) 19 | parameters_data.update(config_data) 20 | return model, parameters_data 21 | 22 | 23 | def chatting( 24 | content: str, 25 | model_name: str, 26 | parent: Optional[str], 27 | workspace: Optional[str], 28 | context_files: Optional[List[str]], 29 | ) -> Iterator[str]: 30 | workspace_chat_dir = get_workspace_chat_dir(workspace) 31 | 32 | context_contents = parse_files(context_files) 33 | 34 | model, parameters_data = _get_model_and_config(model_name, None) 35 | max_input_tokens = parameters_data.get("max_input_tokens", 4000) 36 | 37 | openai_config = OpenAIChatConfig(model=model, **parameters_data) 38 | chat = OpenAIChat(openai_config) 39 | chat_store = Store(workspace_chat_dir, chat) 40 | 41 | assistant = Assistant( 42 | chat=chat, 43 | store=chat_store, 44 | max_prompt_tokens=max_input_tokens, 45 | need_store=False, 46 | ) 47 | assistant.make_prompt( 48 | request=content, 49 | instruct_contents=None, 50 | context_contents=context_contents, 51 | functions=None, 52 | parent=parent, 53 | references=None, 54 | function_name=None, 55 | ) 56 | 57 | for res in assistant.iterate_response(): 58 | yield res 59 | -------------------------------------------------------------------------------- /workflow/workflows/release_note/instruct.txt: -------------------------------------------------------------------------------- 1 | As a software developer assistant, your task is to provide clear and concise responses and write release notes based on the given context. 2 | Follow these guidelines: 3 | 4 | 1. A release note should at least contain two sections, "Highlights" and "Change Log" unless required otherwise. 5 | 2. In the "Highlights" section, summarize and list the most important changes in the release, ordered by importance. 6 | 3. In the "Change Log" section, list all the changes in the release. 7 | Classify the changes into different subsections, including but not limited to improvements, bug fixes, and documentation. 8 | Try best to void putting changes in a "Other Changes" subsection. 9 | 4. For each subsection of "Change Log", further group and summarize the changes into a bullet list. 10 | Try best to group more than one commit into a bullet. 11 | At the end of each bullet, include the issue numbers or commit hashes in parentheses if applicable. 12 | 5. Format a release note by enclosing it within a block of triple backticks (```), and include 'release' alongside the beginning backticks. 13 | 14 | Here is an example: 15 | ```release 16 | ## Highlights 17 | 18 | - Added commitmsg as block type for commit messages 19 | - Fixed a bug in the order of storing prompts 20 | - Replaced video with gifs in README.md 21 | 22 | ## Change Log 23 | 24 | ### Improvements 25 | 26 | - Optimized the order of messages (1e6a130) 27 | - Added token counting utils (#32) (e3c2064, 2e6e130) 28 | - Limited token number for Assistant to make prompt (312fbfe, afffe48) 29 | - Added commitmsg as code type for commit messages (f49dd6d) 30 | 31 | ### Bug Fixes 32 | 33 | - Fixed a bug in the order of storing prompts (24d8009) 34 | - Fixed a bug of overwriting request token number (600ea31) 35 | 36 | ### Documentation 37 | 38 | - Replaced video with gifs in README.md (b50f081, d5aa6d2, 3c8a6bf, a5a81a9) 39 | - Updated instruct.txt (27fe87f) 40 | ``` 41 | 42 | If you need more information, ask for it. -------------------------------------------------------------------------------- /tests/test_openai_message.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from devchat.openai import OpenAIMessage 4 | 5 | 6 | def test_valid_message_creation(): 7 | message = OpenAIMessage(role="user", content="Hello, World!") 8 | assert message.role == "user" 9 | assert message.content == "Hello, World!" 10 | assert message.name is None 11 | 12 | 13 | def test_valid_message(): 14 | message = OpenAIMessage(content="Hello, World!", role="user", name="John_Doe") 15 | assert message.to_dict() == {"role": "user", "content": "Hello, World!", "name": "John_Doe"} 16 | 17 | 18 | def test_invalid_role(): 19 | with pytest.raises(ValueError): 20 | OpenAIMessage(content="Hello, World!", role="invalid_role") 21 | 22 | 23 | def test_none_content(): 24 | message = OpenAIMessage(role="system", content=None) 25 | assert message.content is None 26 | 27 | 28 | def test_invalid_name(): 29 | with pytest.raises(ValueError): 30 | OpenAIMessage(content="Hello, World!", role="user", name="Invalid@Name") 31 | 32 | 33 | def test_empty_name(): 34 | with pytest.raises(ValueError): 35 | OpenAIMessage(content="Hello, World!", role="user", name="") 36 | 37 | 38 | def test_blank_name(): 39 | with pytest.raises(ValueError): 40 | OpenAIMessage(content="Hello, World!", role="user", name=" ") 41 | 42 | 43 | def test_none_name(): 44 | message = OpenAIMessage(content="Hello, World!", role="user", name=None) 45 | assert message.to_dict() == {"role": "user", "content": "Hello, World!"} 46 | 47 | 48 | def test_from_dict(): 49 | message_data = {"content": "Welcome to the chat.", "role": "system"} 50 | message = OpenAIMessage.from_dict(message_data) 51 | assert message.role == "system" 52 | assert message.content == "Welcome to the chat." 53 | assert message.name is None 54 | 55 | 56 | def test_from_dict_with_name(): 57 | message_data = {"content": "Hello, Assistant!", "role": "user", "name": "JohnDoe"} 58 | message = OpenAIMessage.from_dict(message_data) 59 | assert message.role == "user" 60 | assert message.content == "Hello, Assistant!" 61 | assert message.name == "JohnDoe" 62 | -------------------------------------------------------------------------------- /devchat/msg/user_info.py: -------------------------------------------------------------------------------- 1 | import getpass 2 | import os 3 | import socket 4 | import subprocess 5 | from typing import Optional, Tuple 6 | 7 | 8 | class UserInfo: 9 | def __init__(self): 10 | self._name = None 11 | self._email = None 12 | 13 | self._load_user_info() 14 | 15 | @property 16 | def name(self) -> str: 17 | if not self._name: 18 | self._load_user_info() 19 | 20 | return self._name 21 | 22 | @property 23 | def email(self) -> str: 24 | if not self._email: 25 | self._load_user_info() 26 | 27 | return self._email 28 | 29 | def _load_user_info(self): 30 | """ 31 | Load user info 32 | """ 33 | git_name, git_email = self.__get_git_user_info() 34 | 35 | if git_name and git_email: 36 | self._name = git_name 37 | self._email = git_email 38 | return 39 | 40 | sys_name = self.__get_sys_user_name() 41 | name = git_name or sys_name 42 | 43 | mock_email = name + "@" + socket.gethostname() 44 | email = git_email or mock_email 45 | 46 | self._name = name 47 | self._email = email 48 | return 49 | 50 | def __get_git_user_info(self) -> Tuple[Optional[str], Optional[str]]: 51 | """ 52 | Load user info from git 53 | """ 54 | name, email = None, None 55 | try: 56 | cmd = ["git", "config", "user.name"] 57 | name = subprocess.check_output(cmd, encoding="utf-8").strip() 58 | except Exception: 59 | pass 60 | 61 | try: 62 | cmd = ["git", "config", "user.email"] 63 | email = subprocess.check_output(cmd, encoding="utf-8").strip() 64 | except Exception: 65 | pass 66 | 67 | return name, email 68 | 69 | def __get_sys_user_name(self) -> str: 70 | """ 71 | Get user name from system 72 | """ 73 | name = "devchat_anonymous" 74 | try: 75 | name = getpass.getuser() 76 | except Exception: 77 | user_dir = os.path.expanduser("~") 78 | name = user_dir.split(os.sep)[-1] 79 | 80 | return name 81 | 82 | 83 | user_info = UserInfo() 84 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .vscode 3 | 4 | # PyCharm 5 | .idea/ 6 | 7 | # Byte-compiled / optimized / DLL files 8 | __pycache__/ 9 | *.py[cod] 10 | *$py.class 11 | 12 | # C extensions 13 | *.so 14 | 15 | # Distribution / packaging 16 | .Python 17 | build/ 18 | develop-eggs/ 19 | dist/ 20 | downloads/ 21 | eggs/ 22 | .eggs/ 23 | lib/ 24 | lib64/ 25 | parts/ 26 | sdist/ 27 | var/ 28 | wheels/ 29 | pip-wheel-metadata/ 30 | share/python-wheels/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | MANIFEST 35 | 36 | # PyInstaller 37 | # Usually these files are written by a python script from a template 38 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 39 | *.manifest 40 | *.spec 41 | 42 | # Installer logs 43 | pip-log.txt 44 | pip-delete-this-directory.txt 45 | 46 | # Unit test / coverage reports 47 | htmlcov/ 48 | .tox/ 49 | .nox/ 50 | .coverage 51 | .coverage.* 52 | .cache 53 | nosetests.xml 54 | coverage.xml 55 | *.cover 56 | *.py,cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | 60 | # Translations 61 | *.mo 62 | *.pot 63 | 64 | # Django stuff: 65 | *.log 66 | local_settings.py 67 | db.sqlite3 68 | db.sqlite3-journal 69 | 70 | # Flask stuff: 71 | instance/ 72 | .webassets-cache 73 | 74 | # Scrapy stuff: 75 | .scrapy 76 | 77 | # Sphinx documentation 78 | docs/_build/ 79 | 80 | # PyBuilder 81 | target/ 82 | 83 | # Jupyter Notebook 84 | .ipynb_checkpoints 85 | 86 | # IPython 87 | profile_default/ 88 | ipython_config.py 89 | 90 | # pyenv 91 | .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 101 | __pypackages__/ 102 | 103 | # Celery stuff 104 | celerybeat-schedule 105 | celerybeat.pid 106 | 107 | # SageMath parsed files 108 | *.sage.py 109 | 110 | # Environments 111 | .env 112 | .venv 113 | env/ 114 | venv/ 115 | ENV/ 116 | env.bak/ 117 | venv.bak/ 118 | 119 | # Spyder project settings 120 | .spyderproject 121 | .spyproject 122 | 123 | # Rope project settings 124 | .ropeproject 125 | 126 | # mkdocs documentation 127 | /site 128 | 129 | # mypy 130 | .mypy_cache/ 131 | .dmypy.json 132 | dmypy.json 133 | 134 | # Pyre type checker 135 | .pyre/ 136 | 137 | -------------------------------------------------------------------------------- /devchat/_service/route/topics.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from fastapi import APIRouter, HTTPException, Query, status 4 | 5 | from devchat._service.schema import request, response 6 | from devchat.msg.topic_util import delete_topic as del_topic 7 | from devchat.msg.topic_util import get_topic_shortlogs, get_topics 8 | 9 | router = APIRouter() 10 | 11 | 12 | @router.get("/{topic_root_hash}/logs", response_model=List[response.ShortLog]) 13 | def get_topic_logs( 14 | topic_root_hash: str, 15 | limit: int = Query(1, gt=0, description="maximum number of records to return"), 16 | offset: int = Query(0, ge=0, description="offset of the first record to return"), 17 | workspace: Optional[str] = Query(None, description="absolute path to the workspace/repository"), 18 | ): 19 | records = get_topic_shortlogs(topic_root_hash, limit, offset, workspace) 20 | 21 | logs = [response.ShortLog.parse_obj(record) for record in records] 22 | return logs 23 | 24 | 25 | @router.get("", response_model=List[response.TopicSummary]) 26 | def list_topics( 27 | limit: int = Query(1, gt=0, description="maximum number of records to return"), 28 | offset: int = Query(0, ge=0, description="offset of the first record to return"), 29 | workspace: Optional[str] = Query(None, description="absolute path to the workspace/repository"), 30 | ): 31 | topics = get_topics( 32 | limit=limit, 33 | offset=offset, 34 | workspace_path=workspace, 35 | with_deleted=False, 36 | ) 37 | 38 | summaries = [ 39 | response.TopicSummary( 40 | latest_time=topic["latest_time"], 41 | title=topic["title"], 42 | hidden=topic["hidden"], 43 | root_prompt_hash=topic["root_prompt"]["hash"], 44 | root_prompt_user=topic["root_prompt"]["user"], 45 | root_prompt_date=topic["root_prompt"]["date"], 46 | root_prompt_request=topic["root_prompt"]["request"], 47 | root_prompt_response=topic["root_prompt"]["responses"][0], 48 | ) 49 | for topic in topics 50 | ] 51 | return summaries 52 | 53 | 54 | @router.post("/delete", response_model=response.DeleteTopic) 55 | def delete_topic( 56 | item: request.DeleteTopic, 57 | ): 58 | try: 59 | del_topic(item.topic_hash, item.workspace) 60 | 61 | except Exception as e: 62 | detail = f"Failed to delete topic <{item.topic_hash}>: {str(e)}" 63 | raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=detail) 64 | 65 | return response.DeleteTopic(topic_hash=item.topic_hash) 66 | -------------------------------------------------------------------------------- /devchat/ide/rpc.py: -------------------------------------------------------------------------------- 1 | import os 2 | from functools import wraps 3 | 4 | import requests 5 | 6 | BASE_SERVER_URL = os.environ.get("DEVCHAT_IDE_SERVICE_URL", "http://localhost:3000") 7 | 8 | 9 | def rpc_call(f): 10 | @wraps(f) 11 | def wrapper(*args, **kwargs): 12 | if os.environ.get("DEVCHAT_IDE_SERVICE_URL", "") == "": 13 | # maybe in a test, user don't want to mock services functions 14 | return 15 | 16 | try: 17 | function_name = f.__name__ 18 | url = f"{BASE_SERVER_URL}/{function_name}" 19 | 20 | data = dict(zip(f.__code__.co_varnames, args)) 21 | data.update(kwargs) 22 | headers = {"Content-Type": "application/json"} 23 | 24 | response = requests.post(url, json=data, headers=headers) 25 | 26 | if response.status_code != 200: 27 | raise Exception(f"Server error: {response.status_code}") 28 | 29 | response_data = response.json() 30 | if "error" in response_data: 31 | raise Exception(f"Server returned an error: {response_data['error']}") 32 | return response_data.get("result", None) 33 | except ConnectionError as err: 34 | # TODO 35 | raise err 36 | 37 | return wrapper 38 | 39 | 40 | def rpc_method(f): 41 | """ 42 | Decorator for Service methods 43 | """ 44 | 45 | @wraps(f) 46 | def wrapper(self, *args, **kwargs): 47 | if os.environ.get("DEVCHAT_IDE_SERVICE_URL", "") == "": 48 | # maybe in a test, user don't want to mock services functions 49 | return 50 | 51 | try: 52 | function_name = f.__name__ 53 | url = f"{BASE_SERVER_URL}/{function_name}" 54 | 55 | data = dict(zip(f.__code__.co_varnames[1:], args)) # Exclude "self" 56 | data.update(kwargs) 57 | headers = {"Content-Type": "application/json"} 58 | 59 | response = requests.post(url, json=data, headers=headers) 60 | 61 | if response.status_code != 200: 62 | raise Exception(f"Server error: {response.status_code}") 63 | 64 | response_data = response.json() 65 | if "error" in response_data: 66 | raise Exception(f"Server returned an error: {response_data['error']}") 67 | 68 | # Store the result in the _result attribute of the instance 69 | self._result = response_data.get("result", None) 70 | return f(self, *args, **kwargs) 71 | 72 | except ConnectionError as err: 73 | # TODO 74 | raise err 75 | 76 | return wrapper 77 | -------------------------------------------------------------------------------- /devchat/workflow/command/env.py: -------------------------------------------------------------------------------- 1 | """ 2 | Commands for managing the python environment of workflows. 3 | """ 4 | 5 | import sys 6 | from pathlib import Path 7 | from typing import List, Optional 8 | 9 | import click 10 | 11 | from devchat.workflow.env_manager import MAMBA_PY_ENVS, PyEnvManager 12 | 13 | 14 | def _get_all_env_names() -> List[str]: 15 | """ 16 | Get all the python env names of workflows. 17 | """ 18 | # devchat reserved envs 19 | excludes = ["devchat", "devchat-ask", "devchat-commands"] 20 | 21 | envs_path = Path(MAMBA_PY_ENVS) 22 | envs = [env.name for env in envs_path.iterdir() if env.is_dir() and env.name not in excludes] 23 | return envs 24 | 25 | 26 | @click.command(help="List all the python envs of workflows.", name="list") 27 | def list_envs(): 28 | envs = _get_all_env_names() 29 | click.echo(f"Found {len(envs)} python envs of workflows:") 30 | click.echo("\n".join(envs)) 31 | 32 | 33 | @click.command(help="Remove a specific workflow python env.") 34 | @click.option( 35 | "--env-name", 36 | "-n", 37 | help="The name of the python env to remove.", 38 | required=False, 39 | type=str, 40 | ) 41 | @click.option("--all", "all_flag", help="Remove all the python envs of workflows.", is_flag=True) 42 | def remove(env_name: Optional[str] = None, all_flag: bool = False): 43 | if not env_name and not all_flag: 44 | click.echo("Please provide the name of the python env to remove.") 45 | sys.exit(1) 46 | 47 | if env_name: 48 | manager = PyEnvManager() 49 | remove_ok = manager.remove(env_name) 50 | 51 | if remove_ok: 52 | click.echo(f"Removed python env: {env_name}") 53 | sys.exit(0) 54 | 55 | else: 56 | click.echo(f"Failed to remove python env: {env_name}") 57 | sys.exit(1) 58 | 59 | if all_flag: 60 | envs = _get_all_env_names() 61 | manager = PyEnvManager() 62 | ok = [] 63 | failed = [] 64 | for name in envs: 65 | remove_ok = manager.remove(name) 66 | if remove_ok: 67 | ok.append(name) 68 | else: 69 | failed.append(name) 70 | 71 | click.echo(f"Removed {len(ok)} python envs of workflows:") 72 | click.echo("\n".join(ok)) 73 | if failed: 74 | click.echo(f"Failed to remove {len(failed)} python envs of workflows:") 75 | click.echo("\n".join(failed)) 76 | 77 | sys.exit(0) 78 | 79 | 80 | @click.group(help="Manage the python environment of workflows.") 81 | def env(): 82 | pass 83 | 84 | 85 | env.add_command(list_envs) 86 | env.add_command(remove) 87 | -------------------------------------------------------------------------------- /devchat/msg/topic_util.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Dict, List, Optional 3 | 4 | from devchat._cli.utils import get_model_config 5 | from devchat.openai.openai_chat import OpenAIChat, OpenAIChatConfig 6 | from devchat.store import Store 7 | from devchat.workspace_util import USER_CHAT_DIR, get_workspace_chat_dir 8 | 9 | 10 | def get_topic_shortlogs( 11 | topic_root_hash: str, limit: int, offset: int, workspace_path: Optional[str] 12 | ) -> List[Dict]: 13 | short_logs = [] 14 | 15 | user_chat_dir = USER_CHAT_DIR 16 | workspace_chat_dir = get_workspace_chat_dir(workspace_path) 17 | 18 | model, config = get_model_config(user_chat_dir) 19 | openai_config = OpenAIChatConfig(model=model, **config.dict(exclude_unset=True)) 20 | 21 | chat = OpenAIChat(openai_config) 22 | store = Store(workspace_chat_dir, chat) 23 | 24 | logs = store.select_prompts(offset, offset + limit, topic_root_hash) 25 | for log in logs: 26 | try: 27 | short_logs.append(log.shortlog()) 28 | except Exception: 29 | # TODO: log the error 30 | continue 31 | 32 | return short_logs 33 | 34 | 35 | def get_topics( 36 | limit: int, offset: int, workspace_path: Optional[str], with_deleted: bool = False 37 | ) -> List[Dict]: 38 | topics = [] 39 | 40 | user_chat_dir = USER_CHAT_DIR 41 | workspace_chat_dir = get_workspace_chat_dir(workspace_path) 42 | 43 | model, config = get_model_config(user_chat_dir) 44 | openai_config = OpenAIChatConfig(model=model, **config.dict(exclude_unset=True)) 45 | 46 | chat = OpenAIChat(openai_config) 47 | store = Store(workspace_chat_dir, chat) 48 | 49 | topics = store.select_topics(offset, offset + limit) 50 | 51 | if not with_deleted: 52 | # filter out deleted topics 53 | record_file = os.path.join(workspace_chat_dir, ".deletedTopics") 54 | if os.path.exists(record_file): 55 | with open(record_file, "r") as f: 56 | deleted_topics = f.read().split("\n") 57 | 58 | topics = [t for t in topics if t["root_prompt"]["hash"] not in deleted_topics] 59 | 60 | return topics 61 | 62 | 63 | def delete_topic(topic_hash: str, workspace_path: Optional[str]): 64 | """ 65 | Logicalily delete a topic 66 | """ 67 | workspace_chat_dir = get_workspace_chat_dir(workspace_path) 68 | 69 | record_file = os.path.join(workspace_chat_dir, ".deletedTopics") 70 | if not os.path.exists(record_file): 71 | with open(record_file, "w") as f: 72 | f.write(topic_hash) 73 | else: 74 | with open(record_file, "a") as f: 75 | f.write(f"\n{topic_hash}") 76 | 77 | return 78 | -------------------------------------------------------------------------------- /devchat/_cli/route.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from typing import List, Optional 3 | 4 | import click 5 | 6 | 7 | @click.command(help="Route a prompt to the specified LLM") 8 | @click.argument("content", required=False) 9 | @click.option("-p", "--parent", help="Input the parent prompt hash to continue the conversation.") 10 | @click.option( 11 | "-r", 12 | "--reference", 13 | multiple=True, 14 | help="Input one or more specific previous prompts to include in the current prompt.", 15 | ) 16 | @click.option( 17 | "-i", "--instruct", multiple=True, help="Add one or more files to the prompt as instructions." 18 | ) 19 | @click.option( 20 | "-c", "--context", multiple=True, help="Add one or more files to the prompt as a context." 21 | ) 22 | @click.option("-m", "--model", help="Specify the model to use for the prompt.") 23 | @click.option( 24 | "--config", 25 | "config_str", 26 | help="Specify a JSON string to overwrite the default configuration for this prompt.", 27 | ) 28 | @click.option( 29 | "-a", 30 | "--auto", 31 | is_flag=True, 32 | default=False, 33 | required=False, 34 | help="Answer question by function-calling.", 35 | ) 36 | def route( 37 | content: Optional[str], 38 | parent: Optional[str], 39 | reference: Optional[List[str]], 40 | instruct: Optional[List[str]], 41 | context: Optional[List[str]], 42 | model: Optional[str], 43 | config_str: Optional[str] = None, 44 | auto: Optional[bool] = False, 45 | ): 46 | """ 47 | This command performs interactions with the specified large language model (LLM) 48 | by sending prompts and receiving responses. 49 | 50 | Examples 51 | -------- 52 | 53 | To send a multi-line message to the LLM, use the here-doc syntax: 54 | 55 | ```bash 56 | devchat prompt << 'EOF' 57 | What is the capital of France? 58 | Can you tell me more about its history? 59 | EOF 60 | ``` 61 | 62 | Note the quotes around EOF in the first line, to prevent the shell from expanding variables. 63 | 64 | Configuration 65 | ------------- 66 | 67 | DevChat CLI reads configuration from `~/.chat/config.yml` 68 | (if `~/.chat` is not accessible, it will try `.chat` in your current Git or SVN root directory). 69 | You can edit the file to modify default configuration. 70 | 71 | To use OpenAI's APIs, you have to set an API key by the environment variable `OPENAI_API_KEY`. 72 | Run the following command line with your API key: 73 | 74 | ```bash 75 | export OPENAI_API_KEY="sk-..." 76 | ``` 77 | 78 | """ 79 | from devchat._cli.router import llm_route 80 | 81 | llm_route(content, parent, reference, instruct, context, model, config_str, auto) 82 | sys.exit(0) 83 | -------------------------------------------------------------------------------- /devchat/_service/schema/response.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Optional 2 | 3 | from pydantic import BaseModel, Field 4 | 5 | 6 | class MessageCompletionChunk(BaseModel): 7 | # TODO: add response hash 8 | # response_hash: str = Field( 9 | # ..., 10 | # description="response hash, all chunks in a response should have the same hash", 11 | # ) 12 | user: str = Field(..., description="user info") 13 | date: str = Field(..., description="date time") 14 | content: str = Field(..., description="chunk content") 15 | finish_reason: str = Field(default="", description="finish reason") 16 | # TODO: should handle isError in another way? 17 | isError: bool = Field(default=False, description="is error") 18 | extra: Dict = Field(default_factory=dict, description="extra data") 19 | 20 | 21 | class InsertLog(BaseModel): 22 | hash: Optional[str] = Field(None, description="hash of the inserted data") 23 | 24 | 25 | class DeleteLog(BaseModel): 26 | success: bool = Field(..., description="success status") 27 | 28 | 29 | class ShortLog(BaseModel): 30 | user: str = Field(..., description="user id (name and email)") 31 | date: int = Field(..., description="timestamp") 32 | context: List[Dict] = Field(..., description="context data") 33 | request: str = Field(..., description="request content(message)") 34 | responses: List[str] = Field(..., description="response contents(messages)") 35 | request_tokens: int = Field(..., description="number of tokens in the request") 36 | response_tokens: int = Field(..., description="number of tokens in the response") 37 | hash: str = Field(..., description="hash of the log record") 38 | parent: Optional[str] = Field(None, description="hash of the parent log record") 39 | 40 | 41 | class TopicSummary(BaseModel): 42 | latest_time: int = Field(..., description="timestamp of the latest log") 43 | hidden: bool = Field(..., description="hidden status of the topic") 44 | # root prompt info 45 | root_prompt_hash: str = Field(..., description="hash of the log summary") 46 | root_prompt_user: str = Field(..., description="root hash of the log") 47 | root_prompt_date: int = Field(..., description="timestamp") 48 | root_prompt_request: str = Field(..., description="truncated request content(message)") 49 | root_prompt_response: str = Field(..., description="truncated response content(message)") 50 | title: Optional[str] = Field(None, description="title of the topic") 51 | 52 | 53 | class DeleteTopic(BaseModel): 54 | topic_hash: str = Field(..., description="hash of the deleted topic") 55 | 56 | 57 | class UpdateWorkflows(BaseModel): 58 | updated: bool = Field(..., description="Whether the workflows are updated.") 59 | message: str = Field(..., description="The message of the update.") 60 | -------------------------------------------------------------------------------- /devchat/chatmark/form.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Optional, Union 2 | 3 | from .iobase import pipe_interaction 4 | from .widgets import Button, Widget 5 | 6 | 7 | class Form: 8 | """ 9 | A container for different widgets 10 | 11 | Syntax: 12 | """ 13 | 14 | def __init__( 15 | self, 16 | components: List[Union[Widget, str]], 17 | title: Optional[str] = None, 18 | submit_button_name: Optional[str] = None, 19 | cancel_button_name: Optional[str] = None, 20 | ): 21 | """ 22 | components: components in the form, can be widgets (except Button) or strings 23 | title: title of the form 24 | """ 25 | assert ( 26 | any(isinstance(c, Button) for c in components) is False 27 | ), "Button is not allowed in Form" 28 | 29 | self._components = components 30 | self._title = title 31 | 32 | self._rendered = False 33 | self._submit = submit_button_name 34 | self._cancel = cancel_button_name 35 | 36 | @property 37 | def components(self) -> List[Union[Widget, str]]: 38 | """ 39 | Return the components 40 | """ 41 | 42 | return self._components 43 | 44 | def _in_chatmark(self) -> str: 45 | """ 46 | Generate ChatMark syntax for all components 47 | """ 48 | lines = [] 49 | 50 | if self._title: 51 | lines.append(self._title) 52 | 53 | for c in self.components: 54 | if isinstance(c, str): 55 | lines.append(c) 56 | elif isinstance(c, Widget): 57 | lines.append(c._in_chatmark()) 58 | else: 59 | raise ValueError(f"Invalid component {c}") 60 | 61 | return "\n".join(lines) 62 | 63 | def _parse_response(self, response: Dict): 64 | """ 65 | Parse response from user input 66 | """ 67 | for c in self.components: 68 | if isinstance(c, Widget): 69 | c._parse_response(response) 70 | 71 | def render(self): 72 | """ 73 | Render to receive user input 74 | """ 75 | if self._rendered: 76 | # already rendered once 77 | # not sure if the constraint is necessary 78 | # could be removed if re-rendering is needed 79 | raise RuntimeError("Widget can only be rendered once") 80 | 81 | self._rendered = True 82 | 83 | chatmark_header = "```chatmark" 84 | chatmark_header += f" submit={self._submit}" if self._submit else "" 85 | chatmark_header += f" cancel={self._cancel}" if self._cancel else "" 86 | 87 | lines = [ 88 | chatmark_header, 89 | self._in_chatmark(), 90 | "```", 91 | ] 92 | 93 | chatmark = "\n".join(lines) 94 | response = pipe_interaction(chatmark) 95 | self._parse_response(response) 96 | -------------------------------------------------------------------------------- /devchat/msg/log_util.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | from dataclasses import dataclass, field 4 | from typing import Dict, List, Optional, Tuple 5 | 6 | from devchat._cli.utils import get_model_config 7 | from devchat.openai.openai_chat import OpenAIChat, OpenAIChatConfig, OpenAIPrompt 8 | from devchat.store import Store 9 | from devchat.workspace_util import USER_CHAT_DIR, get_workspace_chat_dir 10 | 11 | from .user_info import user_info 12 | 13 | 14 | @dataclass 15 | class PromptData: 16 | model: str = "none" 17 | messages: Optional[List[Dict]] = field(default_factory=list) 18 | parent: Optional[str] = None 19 | references: Optional[List[str]] = field(default_factory=list) 20 | timestamp: int = time.time() 21 | request_tokens: int = 0 22 | response_tokens: int = 0 23 | 24 | 25 | def gen_log_prompt(jsondata: Optional[str] = None, filepath: Optional[str] = None) -> OpenAIPrompt: 26 | """ 27 | Generate a hash for a chat record 28 | """ 29 | assert jsondata is not None or filepath is not None, "Either jsondata or filepath is required." 30 | 31 | if jsondata is None: 32 | with open(filepath, "r", encoding="utf-8") as f: 33 | jsondata = f.read() 34 | 35 | prompt_data = PromptData(**json.loads(jsondata)) 36 | name = user_info.name 37 | email = user_info.email 38 | prompt = OpenAIPrompt(prompt_data.model, name, email) 39 | 40 | prompt.model = prompt_data.model 41 | prompt.input_messages(prompt_data.messages) 42 | prompt.parent = prompt_data.parent 43 | prompt.references = prompt_data.references 44 | prompt.timestamp = prompt_data.timestamp 45 | prompt.request_tokens = prompt_data.request_tokens 46 | prompt.response_tokens = prompt_data.response_tokens 47 | 48 | prompt.finalize_hash() 49 | 50 | return prompt 51 | 52 | 53 | def insert_log_prompt(prompt: OpenAIPrompt, workspace_path: Optional[str]) -> str: 54 | """ 55 | Insert a chat record 56 | 57 | return the hash of the inserted chat record (prompt) 58 | """ 59 | user_chat_dir = USER_CHAT_DIR 60 | workspace_chat_dir = get_workspace_chat_dir(workspace_path) 61 | 62 | model, config = get_model_config(user_chat_dir) 63 | openai_config = OpenAIChatConfig(model=model, **config.dict(exclude_unset=True)) 64 | 65 | chat = OpenAIChat(openai_config) 66 | store = Store(workspace_chat_dir, chat) 67 | _ = store.store_prompt(prompt) 68 | 69 | return prompt.hash 70 | 71 | 72 | def delete_log_prompt(hash: str, workspace_path: Optional[str]) -> Tuple[bool, Optional[str]]: 73 | """ 74 | Delete a chat record 75 | 76 | return: 77 | success: True if the prompt is deleted successfully, False otherwise 78 | """ 79 | user_chat_dir = USER_CHAT_DIR 80 | workspace_chat_dir = get_workspace_chat_dir(workspace_path) 81 | 82 | model, config = get_model_config(user_chat_dir) 83 | openai_config = OpenAIChatConfig(model=model, **config.dict(exclude_unset=True)) 84 | 85 | chat = OpenAIChat(openai_config) 86 | store = Store(workspace_chat_dir, chat) 87 | 88 | success = store.delete_prompt(hash) 89 | 90 | return success 91 | -------------------------------------------------------------------------------- /devchat/_cli/prompt.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from typing import List, Optional 3 | 4 | import click 5 | 6 | 7 | @click.command(help="Interact with the large language model (LLM).") 8 | @click.argument("content", required=False) 9 | @click.option("-p", "--parent", help="Input the parent prompt hash to continue the conversation.") 10 | @click.option( 11 | "-r", 12 | "--reference", 13 | multiple=True, 14 | help="Input one or more specific previous prompts to include in the current prompt.", 15 | ) 16 | @click.option( 17 | "-i", "--instruct", multiple=True, help="Add one or more files to the prompt as instructions." 18 | ) 19 | @click.option( 20 | "-c", "--context", multiple=True, help="Add one or more files to the prompt as a context." 21 | ) 22 | @click.option("-m", "--model", help="Specify the model to use for the prompt.") 23 | @click.option( 24 | "--config", 25 | "config_str", 26 | help="Specify a JSON string to overwrite the default configuration for this prompt.", 27 | ) 28 | @click.option( 29 | "-f", 30 | "--functions", 31 | type=click.Path(exists=True), 32 | help="Path to a JSON file with functions for the prompt.", 33 | ) 34 | @click.option( 35 | "-n", 36 | "--function-name", 37 | help="Specify the function name when the content is the output of a function.", 38 | ) 39 | @click.option( 40 | "-ns", 41 | "--not-store", 42 | is_flag=True, 43 | default=False, 44 | required=False, 45 | help="Do not save the conversation to the store.", 46 | ) 47 | def prompt( 48 | content: Optional[str], 49 | parent: Optional[str], 50 | reference: Optional[List[str]], 51 | instruct: Optional[List[str]], 52 | context: Optional[List[str]], 53 | model: Optional[str], 54 | config_str: Optional[str] = None, 55 | functions: Optional[str] = None, 56 | function_name: Optional[str] = None, 57 | not_store: Optional[bool] = False, 58 | ): 59 | """ 60 | This command performs interactions with the specified large language model (LLM) 61 | by sending prompts and receiving responses. 62 | 63 | Examples 64 | -------- 65 | 66 | To send a multi-line message to the LLM, use the here-doc syntax: 67 | 68 | ```bash 69 | devchat prompt << 'EOF' 70 | What is the capital of France? 71 | Can you tell me more about its history? 72 | EOF 73 | ``` 74 | 75 | Note the quotes around EOF in the first line, to prevent the shell from expanding variables. 76 | 77 | Configuration 78 | ------------- 79 | 80 | DevChat CLI reads configuration from `~/.chat/config.yml` 81 | (if `~/.chat` is not accessible, it will try `.chat` in your current Git or SVN root directory). 82 | You can edit the file to modify default configuration. 83 | 84 | To use OpenAI's APIs, you have to set an API key by the environment variable `OPENAI_API_KEY`. 85 | Run the following command line with your API key: 86 | 87 | ```bash 88 | export OPENAI_API_KEY="sk-..." 89 | ``` 90 | 91 | """ 92 | from devchat._cli.router import llm_prompt 93 | 94 | llm_prompt( 95 | content, 96 | parent, 97 | reference, 98 | instruct, 99 | context, 100 | model, 101 | config_str, 102 | functions, 103 | function_name, 104 | not_store, 105 | ) 106 | sys.exit(0) 107 | -------------------------------------------------------------------------------- /devchat/_service/route/message.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Iterator, Optional 3 | 4 | from fastapi import APIRouter 5 | from fastapi.responses import StreamingResponse 6 | 7 | from devchat._service.schema import request, response 8 | from devchat.msg.chatting import chatting 9 | from devchat.msg.util import MessageType, mk_meta, route_message_by_content 10 | from devchat.workflow.workflow import Workflow 11 | 12 | router = APIRouter() 13 | 14 | 15 | @router.post("/msg") 16 | def msg( 17 | message: request.UserMessage, 18 | ): 19 | if message.api_key: 20 | os.environ["OPENAI_API_KEY"] = message.api_key 21 | if message.api_base: 22 | os.environ["OPENAI_API_BASE"] = message.api_base 23 | 24 | user_str, date_str = mk_meta() 25 | 26 | message_type, extra = route_message_by_content(message.content) 27 | 28 | if message_type == MessageType.CHATTING: 29 | 30 | def gen_chat_response() -> Iterator[response.MessageCompletionChunk]: 31 | try: 32 | for res in chatting( 33 | content=message.content, 34 | model_name=message.model_name, 35 | parent=message.parent, 36 | workspace=message.workspace, 37 | context_files=message.context, 38 | ): 39 | chunk = response.MessageCompletionChunk( 40 | user=user_str, 41 | date=date_str, 42 | content=res, 43 | ) 44 | yield chunk.json() 45 | except Exception as e: 46 | chunk = response.MessageCompletionChunk( 47 | user=user_str, 48 | date=date_str, 49 | content=str(e), 50 | isError=True, 51 | ) 52 | yield chunk.json() 53 | raise e 54 | 55 | return StreamingResponse(gen_chat_response(), media_type="application/json") 56 | 57 | elif message_type == MessageType.WORKFLOW: 58 | workflow: Workflow 59 | wf_name: str 60 | wf_input: Optional[str] 61 | workflow, wf_name, wf_input = extra 62 | 63 | if workflow.should_show_help(wf_input): 64 | doc = workflow.get_help_doc(wf_input) 65 | 66 | def _gen_res_help() -> Iterator[response.MessageCompletionChunk]: 67 | yield response.MessageCompletionChunk( 68 | user=user_str, date=date_str, content=doc 69 | ).json() 70 | 71 | return StreamingResponse(_gen_res_help(), media_type="application/json") 72 | else: 73 | # return "should run workflow" response 74 | # then the client will trigger the workflow by devchat cli 75 | def _gen_res_run_workflow() -> Iterator[response.MessageCompletionChunk]: 76 | yield response.MessageCompletionChunk( 77 | user=user_str, 78 | date=date_str, 79 | content="", 80 | finish_reason="should_run_workflow", 81 | extra={"workflow_name": wf_name, "workflow_input": wf_input}, 82 | ).json() 83 | 84 | return StreamingResponse( 85 | _gen_res_run_workflow(), 86 | media_type="application/json", 87 | ) 88 | 89 | else: 90 | # Should not reach here 91 | chunk = response.MessageCompletionChunk( 92 | user=user_str, 93 | date=date_str, 94 | content="", 95 | ) 96 | return StreamingResponse((chunk.json() for _ in [1]), media_type="application/json") 97 | -------------------------------------------------------------------------------- /devchat/llm/chat.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import sys 4 | from functools import wraps 5 | 6 | from devchat.memory import ChatMemory 7 | 8 | from .openai import ( 9 | chat_completion_no_stream_return_json, 10 | chat_completion_stream, 11 | chat_completion_stream_commit, 12 | chunks_content, 13 | retry_timeout, 14 | stream_out_chunk, 15 | to_dict_content_and_call, 16 | ) 17 | from .pipeline import exception_handle, pipeline, retry 18 | 19 | chat_completion_stream_out = exception_handle( 20 | retry( 21 | pipeline( 22 | chat_completion_stream_commit, 23 | retry_timeout, 24 | stream_out_chunk, 25 | chunks_content, 26 | to_dict_content_and_call, 27 | ), 28 | times=3, 29 | ), 30 | lambda err: { 31 | "content": None, 32 | "function_name": None, 33 | "parameters": "", 34 | "error": err, 35 | }, 36 | ) 37 | 38 | 39 | def chat( 40 | prompt, 41 | memory: ChatMemory = None, 42 | stream_out: bool = False, 43 | model: str = os.environ.get("LLM_MODEL", "gpt-3.5-turbo-1106"), 44 | **llm_config, 45 | ): 46 | def decorator(func): 47 | @wraps(func) 48 | def wrapper(*args, **kwargs): 49 | nonlocal prompt, memory, model, llm_config 50 | prompt_new = prompt.format(**kwargs) 51 | messages = memory.contexts() if memory else [] 52 | if not any(item["content"] == prompt_new for item in messages) and prompt_new: 53 | messages.append({"role": "user", "content": prompt_new}) 54 | if "__user_request__" in kwargs: 55 | messages.append(kwargs["__user_request__"]) 56 | del kwargs["__user_request__"] 57 | 58 | llm_config["model"] = model 59 | if not stream_out: 60 | response = chat_completion_stream(messages, llm_config=llm_config) 61 | else: 62 | response = chat_completion_stream_out(messages, llm_config=llm_config) 63 | if not response.get("content", None): 64 | print(response["error"], file=sys.stderr) 65 | return None 66 | 67 | if memory: 68 | memory.append( 69 | {"role": "user", "content": prompt_new}, 70 | {"role": "assistant", "content": response["content"]}, 71 | ) 72 | return response["content"] 73 | 74 | return wrapper 75 | 76 | return decorator 77 | 78 | 79 | def chat_json( 80 | prompt, 81 | memory: ChatMemory = None, 82 | model: str = os.environ.get("LLM_MODEL", "gpt-3.5-turbo-1106"), 83 | **llm_config, 84 | ): 85 | def decorator(func): 86 | @wraps(func) 87 | def wrapper(*args, **kwargs): 88 | nonlocal prompt, memory, model, llm_config 89 | prompt_new = prompt.format(**kwargs) 90 | messages = memory.contexts() if memory else [] 91 | if not any(item["content"] == prompt_new for item in messages): 92 | messages.append({"role": "user", "content": prompt_new}) 93 | 94 | llm_config["model"] = model 95 | response = chat_completion_no_stream_return_json(messages, llm_config=llm_config) 96 | if not response: 97 | print(f"call {func.__name__} failed.", file=sys.stderr) 98 | 99 | if memory: 100 | memory.append( 101 | {"role": "user", "content": prompt_new}, 102 | {"role": "assistant", "content": json.dumps(response)}, 103 | ) 104 | return response 105 | 106 | return wrapper 107 | 108 | return decorator 109 | -------------------------------------------------------------------------------- /devchat/openai/openai_message.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import json 3 | from dataclasses import asdict, dataclass, field, fields 4 | from typing import Dict, Optional 5 | 6 | from devchat.message import Message 7 | 8 | 9 | @dataclass 10 | class OpenAIMessage(Message): 11 | role: str = None 12 | name: Optional[str] = None 13 | function_call: Dict[str, str] = field(default_factory=dict) 14 | 15 | def __post_init__(self): 16 | if not self._validate_role(): 17 | raise ValueError("Invalid role. Must be one of 'system', 'user', or 'assistant'.") 18 | 19 | if not self._validate_name(): 20 | raise ValueError( 21 | "Invalid name. Must contain a-z, A-Z, 0-9, and underscores, " 22 | "with a maximum length of 64 characters." 23 | ) 24 | 25 | def to_dict(self) -> dict: 26 | state = asdict(self) 27 | if state["name"] is None: 28 | del state["name"] 29 | if not state["function_call"] or len(state["function_call"].keys()) == 0: 30 | del state["function_call"] 31 | return state 32 | 33 | @classmethod 34 | def from_dict(cls, message_data: dict) -> "OpenAIMessage": 35 | keys = {f.name for f in fields(cls)} 36 | kwargs = {k: v for k, v in message_data.items() if k in keys} 37 | return cls(**kwargs) 38 | 39 | def function_call_to_json(self): 40 | ''' 41 | convert function_call to json 42 | function_call is like this: 43 | { 44 | "name": function_name, 45 | "arguments": '{"key": """value"""}' 46 | } 47 | ''' 48 | if not self.function_call: 49 | return "" 50 | function_call_copy = self.function_call.copy() 51 | if "arguments" in function_call_copy: 52 | # arguments field may be not a json string 53 | # we can try parse it by eval 54 | try: 55 | function_call_copy["arguments"] = ast.literal_eval(function_call_copy["arguments"]) 56 | except Exception: 57 | # if it is not a json string, we can do nothing 58 | try: 59 | function_call_copy["arguments"] = json.loads(function_call_copy["arguments"]) 60 | except Exception: 61 | pass 62 | return "```command\n" + json.dumps(function_call_copy) + "\n```" 63 | 64 | def stream_from_dict(self, message_data: dict) -> str: 65 | """Append to the message from a dictionary returned from a streaming chat API.""" 66 | delta = message_data.get("content", "") 67 | if self.content: 68 | self.content += delta 69 | else: 70 | self.content = delta 71 | 72 | return delta 73 | 74 | def _validate_role(self) -> bool: 75 | """Validate the role attribute. 76 | 77 | Returns: 78 | bool: True if the role is valid, False otherwise. 79 | """ 80 | return self.role in ["system", "user", "assistant", "function"] 81 | 82 | def _validate_name(self) -> bool: 83 | """Validate the name attribute. 84 | 85 | Returns: 86 | bool: True if the name is valid or None, False otherwise. 87 | """ 88 | return self._validate_string(self.name) 89 | 90 | def _validate_string(self, string: str) -> bool: 91 | """Validate a string attribute. 92 | 93 | Returns: 94 | bool: True if the string is valid or None, False otherwise. 95 | """ 96 | if string is None: 97 | return True 98 | if not string.strip(): 99 | return False 100 | return len(string) <= 64 and string.replace("_", "").isalnum() 101 | -------------------------------------------------------------------------------- /devchat/_service/gunicorn_logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | from gunicorn.app.base import BaseApplication 6 | from gunicorn.glogging import Logger 7 | from loguru import logger 8 | 9 | from devchat._service.config import config 10 | from devchat.workspace_util import get_workspace_chat_dir 11 | 12 | 13 | class InterceptHandler(logging.Handler): 14 | def emit(self, record): 15 | # get corresponding Loguru level if it exists 16 | try: 17 | level = logger.level(record.levelname).name 18 | except ValueError: 19 | level = record.levelno 20 | 21 | # find caller from where originated the logged message 22 | frame, depth = sys._getframe(6), 6 23 | while frame and frame.f_code.co_filename == logging.__file__: 24 | frame = frame.f_back 25 | depth += 1 26 | 27 | logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage()) 28 | 29 | 30 | class StubbedGunicornLogger(Logger): 31 | def setup(self, cfg): 32 | handler = logging.NullHandler() 33 | self.error_logger = logging.getLogger("gunicorn.error") 34 | self.error_logger.addHandler(handler) 35 | self.access_logger = logging.getLogger("gunicorn.access") 36 | self.access_logger.addHandler(handler) 37 | self.error_logger.setLevel(config.LOG_LEVEL) 38 | self.access_logger.setLevel(config.LOG_LEVEL) 39 | 40 | 41 | class StandaloneApplication(BaseApplication): 42 | """Our Gunicorn application.""" 43 | 44 | def __init__(self, app, options=None): 45 | self.options = options or {} 46 | self.application = app 47 | super().__init__() 48 | 49 | def load_config(self): 50 | config = { 51 | key: value 52 | for key, value in self.options.items() 53 | if key in self.cfg.settings and value is not None 54 | } 55 | for key, value in config.items(): 56 | self.cfg.set(key.lower(), value) 57 | 58 | def load(self): 59 | return self.application 60 | 61 | 62 | def run_with_gunicorn(app): 63 | intercept_handler = InterceptHandler() 64 | # logging.basicConfig(handlers=[intercept_handler], level=LOG_LEVEL) 65 | # logging.root.handlers = [intercept_handler] 66 | logging.root.setLevel(config.LOG_LEVEL) 67 | 68 | seen = set() 69 | for name in [ 70 | *logging.root.manager.loggerDict.keys(), 71 | "gunicorn", 72 | "gunicorn.access", 73 | "gunicorn.error", 74 | "uvicorn", 75 | "uvicorn.access", 76 | "uvicorn.error", 77 | ]: 78 | if name not in seen: 79 | seen.add(name.split(".")[0]) 80 | logging.getLogger(name).handlers = [intercept_handler] 81 | 82 | workspace_chat_dir = get_workspace_chat_dir(config.WORKSPACE) 83 | log_file = os.path.join(workspace_chat_dir, config.LOG_FILE) 84 | 85 | logger.configure( 86 | handlers=[ 87 | {"sink": sys.stdout, "serialize": config.JSON_LOGS}, 88 | { 89 | "sink": log_file, 90 | "serialize": config.JSON_LOGS, 91 | "rotation": "10 days", 92 | "retention": "30 days", 93 | "enqueue": True, 94 | }, 95 | ] 96 | ) 97 | 98 | options = { 99 | "bind": f"0.0.0.0:{config.PORT}", 100 | "workers": config.WORKERS, 101 | "accesslog": "-", 102 | "errorlog": "-", 103 | "worker_class": "uvicorn.workers.UvicornWorker", 104 | "logger_class": StubbedGunicornLogger, 105 | } 106 | 107 | StandaloneApplication(app, options).run() 108 | 109 | 110 | # https://pawamoy.github.io/posts/unify-logging-for-a-gunicorn-uvicorn-app/ 111 | -------------------------------------------------------------------------------- /devchat/_cli/log.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import sys 4 | import time 5 | from dataclasses import dataclass, field 6 | from typing import Dict, List, Optional 7 | 8 | import click 9 | 10 | 11 | @dataclass 12 | class PromptData: 13 | model: str = "none" 14 | messages: Optional[List[Dict]] = field(default_factory=list) 15 | parent: Optional[str] = None 16 | references: Optional[List[str]] = field(default_factory=list) 17 | timestamp: int = time.time() 18 | request_tokens: int = 0 19 | response_tokens: int = 0 20 | 21 | 22 | @click.command(help="Process logs") 23 | @click.option("--skip", default=0, help="Skip number prompts before showing the prompt history.") 24 | @click.option("-n", "--max-count", default=1, help="Limit the number of commits to output.") 25 | @click.option( 26 | "-t", 27 | "--topic", 28 | "topic_root", 29 | default=None, 30 | help="Hash of the root prompt of the topic to select prompts from.", 31 | ) 32 | @click.option("--insert", default=None, help="JSON string of the prompt to insert into the log.") 33 | @click.option("--delete", default=None, help="Hash of the leaf prompt to delete from the log.") 34 | def log(skip, max_count, topic_root, insert, delete): 35 | """ 36 | Manage the prompt history. 37 | """ 38 | from devchat._cli.utils import get_model_config, handle_errors, init_dir 39 | from devchat.openai.openai_chat import OpenAIChat, OpenAIChatConfig, OpenAIPrompt 40 | from devchat.store import Store 41 | from devchat.utils import get_logger, get_user_info 42 | 43 | logger = get_logger(__name__) 44 | 45 | # handler insert 46 | # if insert is a file path, then read file content as "insert" value, then delete that file 47 | try: 48 | if os.path.isfile(insert): 49 | insert_file = insert 50 | with open(insert_file, "r", encoding="utf-8") as f: 51 | insert = f.read() 52 | os.remove(insert_file) 53 | except Exception: 54 | pass 55 | 56 | if (insert or delete) and (skip != 0 or max_count != 1 or topic_root is not None): 57 | print( 58 | "Error: The --insert or --delete option cannot be used with other options.", 59 | file=sys.stderr, 60 | ) 61 | sys.exit(1) 62 | 63 | repo_chat_dir, user_chat_dir = init_dir() 64 | 65 | with handle_errors(): 66 | model, config = get_model_config(user_chat_dir) 67 | openai_config = OpenAIChatConfig(model=model, **config.dict(exclude_unset=True)) 68 | 69 | chat = OpenAIChat(openai_config) 70 | store = Store(repo_chat_dir, chat) 71 | 72 | if delete: 73 | success = store.delete_prompt(delete) 74 | if success: 75 | print(f"Prompt {delete} deleted successfully.") 76 | else: 77 | print(f"Failed to delete prompt {delete}.") 78 | else: 79 | if insert: 80 | prompt_data = PromptData(**json.loads(insert)) 81 | user, email = get_user_info() 82 | prompt = OpenAIPrompt(prompt_data.model, user, email) 83 | prompt.model = prompt_data.model 84 | prompt.input_messages(prompt_data.messages) 85 | prompt.parent = prompt_data.parent 86 | prompt.references = prompt_data.references 87 | prompt.timestamp = prompt_data.timestamp 88 | prompt.request_tokens = prompt_data.request_tokens 89 | prompt.response_tokens = prompt_data.response_tokens 90 | topic_root = store.store_prompt(prompt) 91 | 92 | recent_prompts = store.select_prompts(skip, skip + max_count, topic_root) 93 | logs = [] 94 | for record in recent_prompts: 95 | try: 96 | logs.append(record.shortlog()) 97 | except Exception as exc: 98 | logger.exception(exc) 99 | continue 100 | print(json.dumps(logs, indent=2)) 101 | -------------------------------------------------------------------------------- /devchat/openai/http_openai.py: -------------------------------------------------------------------------------- 1 | """ 2 | openai api call by PYTHON http client 3 | """ 4 | 5 | import http.client 6 | import json 7 | import os 8 | import ssl 9 | import sys 10 | from urllib.parse import urlparse 11 | 12 | 13 | class LineReader: 14 | """read line from stream""" 15 | 16 | def __init__(self, response): 17 | self.response = response 18 | 19 | def __iter__(self): 20 | return self 21 | 22 | def __next__(self): 23 | line = self.response.readline() 24 | if not line: 25 | raise StopIteration 26 | line = line.strip() 27 | if not line: 28 | return self.__next__() 29 | line = line.decode("utf-8") 30 | if not line.startswith("data:"): 31 | print("Receive invalid line: {line}", end="\n\n", file=sys.stderr) 32 | raise ValueError(f"Invalid line: {line}") 33 | 34 | if line[5:].strip() == "[DONE]": 35 | raise StopIteration 36 | try: 37 | return json.loads(line[5:]) 38 | except json.JSONDecodeError as err: 39 | print(f"Error decoding JSON: {err}", end="\n\n", file=sys.stderr) 40 | raise ValueError(f"Invalid line: {line}") from err 41 | 42 | 43 | def stream_response(connection: http.client.HTTPSConnection, data, headers): 44 | """stream response from openai api""" 45 | connection.request("POST", "/v1/chat/completions", body=json.dumps(data), headers=headers) 46 | response = connection.getresponse() 47 | 48 | if response.status != 200: 49 | response_body = response.read().decode("utf-8") 50 | print( 51 | f"received status code: {response.status} - reason: {response.reason}\n\n" 52 | f"response: {response_body}", 53 | end="\n\n", 54 | file=sys.stderr, 55 | ) 56 | 57 | try: 58 | error_detail = json.loads(response_body).get("detail", "No detail provided") 59 | except json.JSONDecodeError: 60 | error_detail = "Failed to decode JSON response" 61 | 62 | raise ValueError( 63 | f"Received status code: {response.status} - reason: {response.reason}" 64 | f" - detail: {error_detail}" 65 | ) 66 | return LineReader(response=response) 67 | 68 | 69 | def stream_request(api_key, api_base, data): 70 | """stream request to openai api""" 71 | try: 72 | headers = { 73 | "Content-Type": "application/json", 74 | "Authorization": f"Bearer {api_key}", 75 | } 76 | 77 | if api_base.startswith("https://"): 78 | url = api_base[8:] 79 | elif api_base.startswith("http://"): 80 | url = api_base[7:] 81 | else: 82 | print("Invalid API base URL", end="\n\n", file=sys.stderr) 83 | raise ValueError("Invalid API base URL") 84 | 85 | url = url.split("/")[0] 86 | proxy_url = os.environ.get("DEVCHAT_PROXY", "") 87 | parsed_url = urlparse(proxy_url) 88 | proxy_setting = { 89 | "host": parsed_url.hostname, 90 | **({"port": parsed_url.port} if parsed_url.port else {}), 91 | } 92 | 93 | if api_base.startswith("https://"): 94 | if proxy_setting["host"]: 95 | connection = http.client.HTTPSConnection( 96 | **proxy_setting, context=ssl._create_unverified_context() 97 | ) 98 | connection.set_tunnel(url) 99 | else: 100 | connection = http.client.HTTPSConnection( 101 | url, context=ssl._create_unverified_context() 102 | ) 103 | else: 104 | if proxy_setting["host"]: 105 | connection = http.client.HTTPConnection(**proxy_setting) 106 | connection.set_tunnel(url) 107 | else: 108 | connection = http.client.HTTPConnection(url) 109 | 110 | return stream_response(connection, data, headers) 111 | except Exception as err: 112 | print(err, end="\n\n", file=sys.stderr) 113 | raise err from err 114 | -------------------------------------------------------------------------------- /devchat/chatmark/chatmark_example/main.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from devchat.chatmark import Button, Checkbox, Form, Radio, Step, TextEditor 4 | 5 | 6 | def main(): 7 | print("\n\n---\n\n") 8 | 9 | # Step 10 | print("\n\n# Step Example\n\n") 11 | with Step("Something is running..."): 12 | print("Will sleep for 5 seconds...", flush=True) 13 | time.sleep(5) 14 | print("Done", flush=True) 15 | 16 | print("\n\n# Step Example with exception\n\n") 17 | try: 18 | with Step("Something is running (will raise exception)..."): 19 | print("Will sleep for 5 seconds...", flush=True) 20 | time.sleep(5) 21 | raise Exception("oops!") 22 | 23 | except Exception: 24 | pass 25 | 26 | # Button 27 | print("\n\n# Button Example\n\n") 28 | button = Button( 29 | [ 30 | "Yes", 31 | "Or", 32 | "No", 33 | ], 34 | ) 35 | button.render() 36 | 37 | idx = button.clicked 38 | print("\n\nButton result\n\n") 39 | print(f"\n\n{idx}: {button.buttons[idx]}\n\n") 40 | 41 | print("\n\n---\n\n") 42 | 43 | # Checkbox 44 | print("\n\n# Checkbox Example\n\n") 45 | checkbox = Checkbox( 46 | [ 47 | "A", 48 | "B", 49 | "C", 50 | "D", 51 | ], 52 | [True, False, False, True], 53 | ) 54 | checkbox.render() 55 | 56 | print(f"\n\ncheckbox.selections: {checkbox.selections}\n\n") 57 | for idx in checkbox.selections: 58 | print(f"\n\n{idx}: {checkbox.options[idx]}\n\n") 59 | 60 | print("\n\n---\n\n") 61 | 62 | # TextEditor 63 | print("\n\n# TextEditor Example\n\n") 64 | text_editor = TextEditor( 65 | "hello world\nnice to meet you", 66 | ) 67 | 68 | text_editor.render() 69 | 70 | print(f"\n\ntext_editor.new_text:\n\n{text_editor.new_text}\n\n") 71 | 72 | print("\n\n---\n\n") 73 | 74 | # Radio 75 | print("\n\n# Radio Example\n\n") 76 | radio = Radio( 77 | [ 78 | "Sun", 79 | "Moon", 80 | "Star", 81 | ], 82 | ) 83 | radio.render() 84 | 85 | print(f"\n\nradio.selection: {radio.selection}\n\n") 86 | if radio.selection is not None: 87 | print(f"\n\nradio.options[radio.selection]: {radio.options[radio.selection]}\n\n") 88 | 89 | print("\n\n---\n\n") 90 | 91 | # Form 92 | print("\n\n# Form Example\n\n") 93 | checkbox_1 = Checkbox( 94 | [ 95 | "Sprint", 96 | "Summer", 97 | "Autumn", 98 | "Winter", 99 | ] 100 | ) 101 | checkbox_2 = Checkbox( 102 | [ 103 | "金", 104 | "木", 105 | "水", 106 | "火", 107 | "土", 108 | ], 109 | ) 110 | radio_1 = Radio( 111 | [ 112 | "Up", 113 | "Down", 114 | ], 115 | ) 116 | radio_2 = Radio( 117 | [ 118 | "Left", 119 | "Center", 120 | "Right", 121 | ], 122 | ) 123 | text_editor_1 = TextEditor( 124 | "hello world\nnice to meet you", 125 | ) 126 | text_editor_2 = TextEditor( 127 | "hihihihihi", 128 | ) 129 | 130 | form = Form( 131 | [ 132 | "Some string in a form", 133 | checkbox_1, 134 | "Another string in a form", 135 | radio_1, 136 | "the third string in a form", 137 | checkbox_2, 138 | "the fourth string in a form", 139 | radio_2, 140 | "the fifth string in a form", 141 | text_editor_1, 142 | "the last string in a form", 143 | text_editor_2, 144 | ], 145 | ) 146 | 147 | form.render() 148 | 149 | print(f"\n\ncheckbox_1.selections: {checkbox_1.selections}\n\n") 150 | print(f"\n\ncheckbox_2.selections: {checkbox_2.selections}\n\n") 151 | print(f"\n\nradio_1.selection: {radio_1.selection}\n\n") 152 | print(f"\n\nradio_2.selection: {radio_2.selection}\n\n") 153 | print(f"\n\ntext_editor_1.new_text:\n\n{text_editor_1.new_text}\n\n") 154 | print(f"\n\ntext_editor_2.new_text:\n\n{text_editor_2.new_text}\n\n") 155 | 156 | 157 | if __name__ == "__main__": 158 | main() 159 | -------------------------------------------------------------------------------- /devchat/workflow/namespace.py: -------------------------------------------------------------------------------- 1 | """ 2 | Namespace management for workflows 3 | """ 4 | 5 | import os 6 | from pathlib import Path 7 | from typing import Dict, List, Set, Tuple 8 | 9 | import oyaml as yaml 10 | import yaml as pyyaml 11 | from pydantic import BaseModel, Extra, Field, ValidationError 12 | 13 | from devchat.utils import get_logger 14 | 15 | from .path import ( 16 | COMMAND_FILENAMES, 17 | COMMUNITY_WORKFLOWS, 18 | CUSTOM_BASE, 19 | CUSTOM_CONFIG_FILE, 20 | MERICO_WORKFLOWS, 21 | ) 22 | 23 | logger = get_logger(__name__) 24 | 25 | 26 | class CustomConfig(BaseModel): 27 | namespaces: List[str] = [] # active namespaces ordered by priority 28 | 29 | class Config: 30 | extra = Extra.ignore 31 | 32 | 33 | class WorkflowMeta(BaseModel): 34 | name: str = Field(..., description="workflow name") 35 | namespace: str = Field(..., description="workflow namespace") 36 | active: bool = Field(..., description="active flag") 37 | command_conf: Dict = Field(description="command configuration", default_factory=dict) 38 | 39 | def __str__(self): 40 | return f"{'*' if self.active else ' '} {self.name} ({self.namespace})" 41 | 42 | 43 | def _load_custom_config() -> CustomConfig: 44 | """ 45 | Load the custom config file. 46 | """ 47 | config = CustomConfig() 48 | 49 | if not os.path.exists(CUSTOM_CONFIG_FILE): 50 | return config 51 | 52 | with open(CUSTOM_CONFIG_FILE, "r", encoding="utf-8") as file: 53 | content = file.read() 54 | yaml_content = yaml.safe_load(content) 55 | try: 56 | if yaml_content: 57 | config = CustomConfig.parse_obj(yaml_content) 58 | except ValidationError as err: 59 | logger.warning("Invalid custom config file: %s", err) 60 | 61 | return config 62 | 63 | 64 | def get_prioritized_namespace_path() -> List[str]: 65 | """ 66 | Get the prioritized namespaces. 67 | 68 | priority: custom > merico > community 69 | """ 70 | config = _load_custom_config() 71 | 72 | namespaces = config.namespaces 73 | 74 | namespace_paths = [os.path.join(CUSTOM_BASE, ns) for ns in namespaces] 75 | 76 | namespace_paths.append(MERICO_WORKFLOWS) 77 | namespace_paths.append(COMMUNITY_WORKFLOWS) 78 | 79 | return namespace_paths 80 | 81 | 82 | def iter_namespace(ns_path: str, existing_names: Set[str]) -> Tuple[List[WorkflowMeta], Set[str]]: 83 | """ 84 | Get all workflows under the namespace path. 85 | 86 | Args: 87 | ns_path: the namespace path 88 | existing_names: the existing workflow names to check if the workflow is the first priority 89 | 90 | Returns: 91 | List[WorkflowMeta]: the workflows 92 | Set[str]: the updated existing workflow names 93 | """ 94 | root = Path(ns_path) 95 | interest_files = set(COMMAND_FILENAMES) 96 | result = [] 97 | unique_names = set(existing_names) 98 | for file in root.rglob("*"): 99 | try: 100 | if file.is_file() and file.name in interest_files: 101 | rel_path = file.relative_to(root) 102 | parts = rel_path.parts 103 | workflow_name = ".".join(parts[:-1]) 104 | is_first = workflow_name not in unique_names 105 | 106 | # load the config content from file 107 | with open(file, "r", encoding="utf-8") as file_handle: 108 | yaml_content = file_handle.read() 109 | command_conf = yaml.safe_load(yaml_content) 110 | # pop the "steps" field 111 | command_conf.pop("steps", None) 112 | 113 | workflow = WorkflowMeta( 114 | name=workflow_name, 115 | namespace=root.name, 116 | active=is_first, 117 | command_conf=command_conf, 118 | ) 119 | unique_names.add(workflow_name) 120 | result.append(workflow) 121 | except pyyaml.scanner.ScannerError as err: 122 | logger.error("Failed to load %s: %s", rel_path, err) 123 | except Exception as err: 124 | logger.error("Unknown error when loading %s: %s", rel_path, err) 125 | 126 | return result, unique_names 127 | 128 | 129 | def main(): 130 | paths = get_prioritized_namespace_path() 131 | for pathv in paths: 132 | print(pathv) 133 | 134 | 135 | if __name__ == "__main__": 136 | main() 137 | -------------------------------------------------------------------------------- /devchat/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from typing import Dict, List, Optional, Tuple 4 | 5 | import oyaml as yaml 6 | from pydantic import BaseModel 7 | 8 | 9 | class GeneralProviderConfig(BaseModel): 10 | api_key: Optional[str] 11 | api_base: Optional[str] 12 | 13 | 14 | class ModelConfig(BaseModel): 15 | max_input_tokens: Optional[int] = sys.maxsize 16 | provider: Optional[str] 17 | 18 | 19 | class GeneralModelConfig(ModelConfig): 20 | max_tokens: Optional[int] 21 | stop_sequences: Optional[List[str]] 22 | temperature: Optional[float] 23 | top_p: Optional[float] 24 | top_k: Optional[int] 25 | stream: Optional[bool] 26 | 27 | 28 | class ChatConfig(BaseModel): 29 | providers: Optional[Dict[str, GeneralProviderConfig]] 30 | models: Dict[str, GeneralModelConfig] 31 | default_model: Optional[str] 32 | 33 | 34 | class ConfigManager: 35 | def __init__(self, dir_path: str): 36 | self.config_path = os.path.join(dir_path, "config.yml") 37 | if not os.path.exists(self.config_path): 38 | self._create_sample_file() 39 | self._file_is_new = True 40 | else: 41 | self._file_is_new = False 42 | self.config = self._load_and_validate_config() 43 | 44 | @property 45 | def file_is_new(self) -> bool: 46 | return self._file_is_new 47 | 48 | @property 49 | def file_last_modified(self) -> float: 50 | return os.path.getmtime(self.config_path) 51 | 52 | def _load_and_validate_config(self) -> ChatConfig: 53 | with open(self.config_path, "r", encoding="utf-8") as file: 54 | data = yaml.safe_load(file) 55 | 56 | if "providers" in data: 57 | for provider, config in data["providers"].items(): 58 | data["providers"][provider] = GeneralProviderConfig(**config) 59 | for model, config in data["models"].items(): 60 | data["models"][model] = GeneralModelConfig(**config) 61 | 62 | return ChatConfig(**data) 63 | 64 | def model_config(self, model_id: Optional[str] = None) -> Tuple[str, ModelConfig]: 65 | if not model_id: 66 | if self.config.default_model: 67 | return self.model_config(self.config.default_model) 68 | if self.config.models: 69 | return next(iter(self.config.models.items())) 70 | raise ValueError(f"No models found in {self.config_path}") 71 | if model_id not in self.config.models: 72 | raise ValueError(f"Model '{model_id}' not found in {self.config_path}") 73 | return model_id, self.config.models[model_id] 74 | 75 | def update_model_config( 76 | self, model_id: str, new_config: GeneralModelConfig 77 | ) -> GeneralModelConfig: 78 | _, old_config = self.model_config(model_id) 79 | if new_config.max_input_tokens is not None: 80 | old_config.max_input_tokens = new_config.max_input_tokens 81 | updated_parameters = old_config.dict(exclude_unset=True) 82 | updated_parameters.update(new_config.dict(exclude_unset=True)) 83 | self.config.models[model_id] = type(new_config)(**updated_parameters) 84 | return self.config.models[model_id] 85 | 86 | def sync(self): 87 | with open(self.config_path, "w", encoding="utf-8") as file: 88 | yaml.dump(self.config.dict(exclude_unset=True), file) 89 | 90 | def _create_sample_file(self): 91 | sample_config = ChatConfig( 92 | providers={ 93 | "devchat.ai": GeneralProviderConfig(api_key=""), 94 | "openai.com": GeneralProviderConfig(api_key=""), 95 | "general": GeneralProviderConfig(), 96 | }, 97 | models={ 98 | "gpt-4": GeneralModelConfig( 99 | max_input_tokens=6000, provider="devchat.ai", temperature=0, stream=True 100 | ), 101 | "gpt-3.5-turbo-16k": GeneralModelConfig( 102 | max_input_tokens=12000, provider="devchat.ai", temperature=0, stream=True 103 | ), 104 | "gpt-3.5-turbo": GeneralModelConfig( 105 | max_input_tokens=3000, provider="devchat.ai", temperature=0, stream=True 106 | ), 107 | "claude-2": GeneralModelConfig(provider="general", max_tokens=20000), 108 | }, 109 | default_model="gpt-3.5-turbo", 110 | ) 111 | with open(self.config_path, "w", encoding="utf-8") as file: 112 | yaml.dump(sample_config.dict(exclude_unset=True), file) 113 | -------------------------------------------------------------------------------- /tests/test_command_parser.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | 4 | import pytest 5 | 6 | from devchat.engine import Command, CommandParser, Namespace, parse_command 7 | 8 | 9 | def test_parse_command(): 10 | # Test with a valid configuration file with most fields filled 11 | with tempfile.NamedTemporaryFile("w", delete=False) as config_file: 12 | config_file.write(""" 13 | description: Get the current weather in a given location 14 | parameters: 15 | location: 16 | type: string 17 | description: The city and state, e.g. San Francisco, CA 18 | unit: 19 | type: string 20 | enum: [celsius, fahrenheit] 21 | default: celsius 22 | steps: 23 | - run: 24 | ./get_weather --location=$location --unit=$unit 25 | """) 26 | config_file.seek(0) 27 | command = parse_command(config_file.name) 28 | assert isinstance(command, Command) 29 | command = command.dict() 30 | assert command["description"] == "Get the current weather in a given location" 31 | assert "location" in command["parameters"] 32 | assert command["parameters"]["unit"]["default"] == "celsius" 33 | assert command["steps"][0]["run"] == "./get_weather --location=$location --unit=$unit" 34 | 35 | # Test with a valid configuration file with missing optional fields 36 | with tempfile.NamedTemporaryFile("w", delete=False) as config_file: 37 | config_file.write(""" 38 | description: Prompt for /code 39 | parameters: 40 | """) 41 | config_file.seek(0) 42 | command = parse_command(config_file.name) 43 | assert command.parameters is None 44 | assert command.steps is None 45 | 46 | # Test with an invalid configuration file 47 | with tempfile.NamedTemporaryFile("w", delete=False) as config_file: 48 | config_file.write(""" 49 | description: 50 | parameters: 51 | location: 52 | type: string 53 | """) 54 | config_file.seek(0) 55 | with pytest.raises(Exception): 56 | parse_command(config_file.name) 57 | 58 | # Test with a non-existent file 59 | with pytest.raises(FileNotFoundError): 60 | parse_command("path/to/non_existent_file.yml") 61 | 62 | 63 | def test_command_parser(tmp_path): 64 | # Create a Namespace instance with the temporary directory as the root path 65 | namespace = Namespace(tmp_path) 66 | command_parser = CommandParser(namespace) 67 | 68 | # Test with a valid configuration file with most fields filled 69 | os.makedirs(os.path.join(tmp_path, "usr", "a", "b", "c"), exist_ok=True) 70 | command_file_path = os.path.join(tmp_path, "usr", "a", "b", "c", "command.yml") 71 | with open(command_file_path, "w", encoding="utf-8") as file: 72 | file.write(""" 73 | description: Get the current weather in a given location 74 | parameters: 75 | location: 76 | type: string 77 | description: The city and state, e.g. San Francisco, CA 78 | unit: 79 | type: string 80 | enum: [celsius, fahrenheit] 81 | default: celsius 82 | steps: 83 | - run: 84 | ./get_weather --location=$location --unit=$unit 85 | """) 86 | command = command_parser.parse("a.b.c") 87 | command = command.dict() 88 | assert command["description"] == "Get the current weather in a given location" 89 | assert "location" in command["parameters"] 90 | assert command["parameters"]["unit"]["default"] == "celsius" 91 | assert command["steps"][0]["run"] == "./get_weather --location=$location --unit=$unit" 92 | 93 | # Test with a valid configuration file with missing optional fields 94 | os.makedirs(os.path.join(tmp_path, "usr", "d", "e", "f"), exist_ok=True) 95 | command_file_path = os.path.join(tmp_path, "usr", "d", "e", "f", "command.yml") 96 | with open(command_file_path, "w", encoding="utf-8") as file: 97 | file.write(""" 98 | description: Prompt for /code 99 | parameters: 100 | """) 101 | command = command_parser.parse("d.e.f") 102 | command = command.dict() 103 | assert command["description"] == "Prompt for /code" 104 | assert command["parameters"] is None 105 | assert command["steps"] is None 106 | 107 | # Test with an invalid configuration file 108 | os.makedirs(os.path.join(tmp_path, "usr", "g", "h", "i"), exist_ok=True) 109 | command_file_path = os.path.join(tmp_path, "usr", "g", "h", "i", "command.yml") 110 | with open(command_file_path, "w", encoding="utf-8") as file: 111 | file.write(""" 112 | description: 113 | parameters: 114 | location: 115 | type: string 116 | """) 117 | with pytest.raises(Exception): 118 | command_parser.parse("g.h.i") 119 | 120 | # Test with a non-existent command 121 | command = command_parser.parse("j.k.l") 122 | assert command is None 123 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | ![devchat](https://github.com/devchat-ai/devchat/assets/592493/f39979fe-fe32-410b-bf9d-2118ac8ea3d5) 4 | 5 | # DevChat: AI Workflows Generated with Natural Language 6 | 7 | [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](http://makeapullrequest.com) 8 | [![CircleCI](https://circleci.com/gh/devchat-ai/devchat/tree/main.svg?style=shield)](https://circleci.com/gh/devchat-ai/devchat/tree/main) 9 | [![GitHub license](https://img.shields.io/github/license/devchat-ai/devchat.svg)](https://github.com/devchat-ai/devchat/blob/main/LICENSE) 10 | [![Downloads](https://pepy.tech/badge/devchat)](https://pepy.tech/project/devchat) 11 | [![PyPI version](https://badge.fury.io/py/devchat.svg)](https://badge.fury.io/py/devchat) 12 | [![Discord Chat](https://img.shields.io/discord/1106908489114206309?logo=discord)](https://discord.gg/JNyVGz8y) 13 | 14 | #### 🪄 Without leaving the IDE, direct AI to perform tasks using natural language. 15 | 16 | #### 🧩 Bridge the last mile of AI productivity through knowledge engineering. 17 | 18 | #### 🛠️ Tailor the AI coding assistant to align perfectly with your specific needs. 19 | 20 |
21 | 22 | *** 23 | 24 | While tools like GitHub Copilot, Cursor, and Cline are making coding increasingly intelligent, and platforms like Dify, Flowise, and Coze enable drag-and-drop workflows, we developers still find ourselves wading through vast AI-less seas daily, wearied by the myriad tedious processes in development. 25 | 26 | While some complex customization demands of enterprises have turned tool adaptation into tar pits, hands-on enthusiasts are busily engaged in various personalized tasks. 27 | Every dev team has its own character and deserves tailored AI services, and implementing these shouldn’t be arduous. 28 | 29 | We’ve built the DevChat open-source community to help every developer effortlessly cross the last mile of LLM productivity! 30 | 31 | ## Core Features 32 | 33 | ### ❤️ Simplified Personalization: Create Custom Workflows with a Few Sentences 34 | - Say goodbye to the rigid and learning-intensive “drag-and-drop” workflow frameworks. With just a few sentences, you can easily generate intelligent workflows that assist or handle various tasks for you — whether it’s submitting a standardized GitLab MR, generating API automated API test cases, or having the AI provide progress updates via voice notifications. 35 | - Through the open-source community, we aim to build a rich collection of intelligent workflows, ranging from plugins that access rich IDE context to various autonomous agents, ensuring there’s always a “magical tool” that suits you. 36 | 37 | ### ❤️ Deep Understanding of Private Knowledge Through Knowledge Engineering 38 | - Integrated knowledge graph capabilities support diverse semantic queries, combining static pre-construction before queries with dynamic construction during queries to balance optimal effectiveness and performance. 39 | - Classify knowledge for specific scenarios to enhance AI generations. 40 | - For example, by analyzing all interfaces, parameters, and relationships in API documents, our AI autonomous testing tool can use multiple APIs to generate test cases, reducing exploratory steps and improving the quality of test scripts. 41 | 42 | ## Quick Start 43 | 44 | - [Visual Studio Code extension](https://github.com/devchat-ai/devchat-vscode): Install from [Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=merico.devchat). 45 | 46 | - [IntelliJ Platform plugin](https://github.com/devchat-ai/devchat-intellij): Install from [JetBrains Marketplace](https://plugins.jetbrains.com/plugin/23258-devchat). 47 | 48 | ## Contributing 49 | 50 | - Repositories: 51 | - The core library and CLI: https://github.com/devchat-ai/devchat 52 | - System default workflows: https://github.com/devchat-ai/workflows 53 | - Visual Studio Code extension: https://github.com/devchat-ai/devchat-vscode 54 | - IntelliJ Platform plugin: https://github.com/devchat-ai/devchat-intellij 55 | 56 | - Issues and pull requests are welcome: https://github.com/devchat-ai/devchat/issues 57 | 58 | - Join our [Discord](https://discord.gg/JNyVGz8y)! 59 | 60 | ## What is Prompt-Centric Software Development (PCSD)? 61 | 62 | - The traditional code-centric paradigm is evolving. Stay ahead of the curve with DevChat. 63 | 64 | - Write prompts to create code. Transform prompts into all the artifacts in software engineering. 65 | 66 | image 67 | 68 | (This image is licensed by devchat.ai under a Creative Commons Attribution-ShareAlike 4.0 International License.) 69 | 70 | - We like to call it DevPromptOps 71 | 72 | image 73 | 74 | (This image is licensed by devchat.ai under a Creative Commons Attribution-ShareAlike 4.0 International License.) 75 | 76 | ## Contact 77 | 78 | Email: hello@devchat.ai 79 | 80 | We are creators of [Apache DevLake](https://devlake.apache.org/). 81 | -------------------------------------------------------------------------------- /devchat/engine/namespace.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | from typing import List, Optional 4 | 5 | 6 | class Namespace: 7 | def __init__(self, root_path: str, branches: List[str] = None): 8 | """ 9 | :param root_path: The root path of the namespace. 10 | :param branches: The hidden branches with ascending order of priority. 11 | """ 12 | self.root_path = root_path 13 | self.branches = branches if branches else ["sys", "org", "usr"] 14 | 15 | @staticmethod 16 | def is_valid_name(name: str) -> bool: 17 | """ 18 | Check if a name is valid. 19 | 20 | A valid name is either an empty string or 21 | a sequence of one or more alphanumeric characters, hyphens, or underscores, 22 | separated by single dots. Each component cannot contain a dot. 23 | 24 | :param name: The name to check. 25 | :return: True if the name is valid, False otherwise. 26 | """ 27 | # The regular expression pattern for a valid name 28 | if name is None: 29 | return False 30 | pattern = r"^$|^(?!.*\.\.)[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)*$" 31 | return bool(re.match(pattern, name)) 32 | 33 | def get_file(self, name: str, file: str) -> Optional[str]: 34 | """ 35 | :param name: The command name in the namespace. 36 | :param file: The target file name. 37 | :return: The full path of the target file in the command directory. 38 | """ 39 | if not self.is_valid_name(name): 40 | return None 41 | # Convert the dot-separated name to a path 42 | path = os.path.join(*name.split(".")) 43 | for branch in reversed(self.branches): 44 | full_path = os.path.join(self.root_path, branch, path) 45 | if os.path.isdir(full_path): 46 | # If it exists and is a directory, check for the file 47 | file_path = os.path.join(full_path, file) 48 | if os.path.isfile(file_path): 49 | # If the file exists, return its path 50 | return file_path 51 | # If no file is found, return None 52 | return None 53 | 54 | def list_files(self, name: str) -> List[str]: 55 | """ 56 | :param name: The command name in the namespace. 57 | :return: The full paths of the files in the command directory. 58 | """ 59 | if not self.is_valid_name(name): 60 | raise ValueError(f"Invalid name to list files: {name}") 61 | # Convert the dot-separated name to a path 62 | path = os.path.join(*name.split(".")) 63 | files = {} 64 | path_found = False 65 | for branch in self.branches: 66 | full_path = os.path.join(self.root_path, branch, path) 67 | if os.path.isdir(full_path): 68 | # If it exists and is a directory, get the files 69 | path_found = True 70 | for file in os.listdir(full_path): 71 | files[file] = os.path.join(full_path, file) 72 | # If no existing path is found, raise an error 73 | if not path_found: 74 | raise ValueError(f"Path not found to list files: {name}") 75 | # If path is found but no files exist, return an empty list 76 | # Sort the files in alphabetical order before returning 77 | return sorted(files.values()) if files else [] 78 | 79 | def list_names(self, name: str = "", recursive: bool = False) -> List[str]: 80 | """ 81 | :param name: The command name in the namespace. Defaults to the root. 82 | :param recursive: Whether to list all descendant names or only child names. 83 | :return: A list of all names under the given name. 84 | """ 85 | if not self.is_valid_name(name): 86 | raise ValueError(f"Invalid name to list names: {name}") 87 | commands = set() 88 | path = os.path.join(*name.split(".")) 89 | found = False 90 | for branch in self.branches: 91 | full_path = os.path.join(self.root_path, branch, path) 92 | if os.path.isdir(full_path): 93 | found = True 94 | self._add_dirnames_to_commands(full_path, name, commands) 95 | if recursive: 96 | self._add_recursive_dirnames_to_commands(full_path, name, commands) 97 | if not found: 98 | raise ValueError(f"Path not found to list names: '{name}'") 99 | return sorted(commands) 100 | 101 | def _add_dirnames_to_commands(self, full_path: str, name: str, commands: set): 102 | for dirname in os.listdir(full_path): 103 | if dirname.startswith("."): 104 | continue 105 | if os.path.isdir(os.path.join(full_path, dirname)): 106 | command_name = ".".join([name, dirname]) if name else dirname 107 | commands.add(command_name) 108 | 109 | def _add_recursive_dirnames_to_commands(self, full_path: str, name: str, commands: set): 110 | self._recursive_dir_walk(full_path, name, commands) 111 | 112 | def _recursive_dir_walk(self, full_path: str, name: str, commands: set): 113 | for dirname in os.listdir(full_path): 114 | if dirname.startswith("."): 115 | continue 116 | dir_path = os.path.join(full_path, dirname) 117 | if os.path.isdir(dir_path): 118 | command_name = ".".join([name, dirname]) if name else dirname 119 | commands.add(command_name) 120 | self._recursive_dir_walk(dir_path, command_name, commands) 121 | -------------------------------------------------------------------------------- /devchat/openai/openai_chat.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from typing import Dict, Iterator, List, Optional, Union 4 | 5 | from pydantic import BaseModel, Field 6 | 7 | from devchat.chat import Chat 8 | from devchat.utils import get_user_info, user_id 9 | 10 | from .http_openai import stream_request 11 | from .openai_message import OpenAIMessage 12 | from .openai_prompt import OpenAIPrompt 13 | 14 | 15 | class OpenAIChatParameters(BaseModel, extra="ignore"): 16 | temperature: Optional[float] = Field(0, ge=0, le=2) 17 | top_p: Optional[float] = Field(None, ge=0, le=1) 18 | n: Optional[int] = Field(None, ge=1) 19 | stream: Optional[bool] = Field(None) 20 | stop: Optional[Union[str, List[str]]] = Field(None) 21 | max_tokens: Optional[int] = Field(None, ge=1) 22 | presence_penalty: Optional[float] = Field(None, ge=-2.0, le=2.0) 23 | frequency_penalty: Optional[float] = Field(None, ge=-2.0, le=2.0) 24 | logit_bias: Optional[Dict[int, float]] = Field(None) 25 | user: Optional[str] = Field(None) 26 | request_timeout: Optional[int] = Field(32, ge=3) 27 | 28 | 29 | class OpenAIChatConfig(OpenAIChatParameters): 30 | """ 31 | Configuration object for the OpenAIChat APIs. 32 | """ 33 | 34 | model: str 35 | 36 | 37 | class OpenAIChat(Chat): 38 | """ 39 | OpenAIChat class that handles communication with the OpenAI Chat API. 40 | """ 41 | 42 | def __init__(self, config: OpenAIChatConfig): 43 | """ 44 | Initialize the OpenAIChat class with a configuration object. 45 | 46 | Args: 47 | config (OpenAIChatConfig): Configuration object with parameters for the OpenAI Chat API. 48 | """ 49 | self.config = config 50 | 51 | def init_prompt(self, request: str, function_name: Optional[str] = None) -> OpenAIPrompt: 52 | user, email = get_user_info() 53 | self.config.user = user_id(user, email)[1] 54 | prompt = OpenAIPrompt(self.config.model, user, email) 55 | prompt.set_request(request, function_name=function_name) 56 | return prompt 57 | 58 | def load_prompt(self, data: dict) -> OpenAIPrompt: 59 | data["_new_messages"] = { 60 | k: [OpenAIMessage.from_dict(m) for m in v] 61 | if isinstance(v, list) 62 | else OpenAIMessage.from_dict(v) 63 | for k, v in data["_new_messages"].items() 64 | if k != "function" 65 | } 66 | data["_history_messages"] = { 67 | k: [OpenAIMessage.from_dict(m) for m in v] for k, v in data["_history_messages"].items() 68 | } 69 | return OpenAIPrompt(**data) 70 | 71 | def complete_response(self, prompt: OpenAIPrompt) -> str: 72 | import httpx 73 | import openai 74 | 75 | # Filter the config parameters with set values 76 | config_params = self.config.dict(exclude_unset=True) 77 | if prompt.get_functions(): 78 | config_params["functions"] = prompt.get_functions() 79 | config_params["function_call"] = "auto" 80 | config_params["stream"] = False 81 | 82 | proxy_url = os.environ.get("DEVCHAT_PROXY", "") 83 | proxy_setting = ( 84 | {"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {} 85 | ) 86 | 87 | client = openai.OpenAI( 88 | api_key=os.environ.get("OPENAI_API_KEY", None), 89 | base_url=os.environ.get("OPENAI_API_BASE", None), 90 | http_client=httpx.Client(**proxy_setting, trust_env=False), 91 | ) 92 | 93 | response = client.chat.completions.create(messages=prompt.messages, **config_params) 94 | if isinstance(response, openai.types.chat.chat_completion.ChatCompletion): 95 | return json.dumps(response.dict()) 96 | return str(response) 97 | 98 | def stream_response(self, prompt: OpenAIPrompt) -> Iterator: 99 | api_key = os.environ.get("OPENAI_API_KEY", None) 100 | base_url = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1/") 101 | 102 | if ( 103 | not os.environ.get("USE_TIKTOKEN", False) 104 | and base_url.find("https://api.openai.com/v1") == -1 105 | ): 106 | config_params = self.config.dict(exclude_unset=True) 107 | if prompt.get_functions(): 108 | config_params["functions"] = prompt.get_functions() 109 | config_params["function_call"] = "auto" 110 | config_params["stream"] = True 111 | 112 | data = {"messages": prompt.messages, **config_params, "timeout": 180} 113 | response = stream_request(api_key, base_url, data) 114 | return response 115 | import httpx 116 | import openai 117 | 118 | # Filter the config parameters with set values 119 | config_params = self.config.dict(exclude_unset=True) 120 | if prompt.get_functions(): 121 | config_params["functions"] = prompt.get_functions() 122 | config_params["function_call"] = "auto" 123 | config_params["stream"] = True 124 | 125 | proxy_url = os.environ.get("DEVCHAT_PROXY", "") 126 | proxy_setting = ( 127 | {"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {} 128 | ) 129 | 130 | client = openai.OpenAI( 131 | api_key=os.environ.get("OPENAI_API_KEY", None), 132 | base_url=os.environ.get("OPENAI_API_BASE", None), 133 | http_client=httpx.Client(**proxy_setting, trust_env=False), 134 | ) 135 | 136 | response = client.chat.completions.create( 137 | messages=prompt.messages, **config_params, timeout=180 138 | ) 139 | return response 140 | -------------------------------------------------------------------------------- /devchat/ide/vscode_services.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from .rpc import rpc_call 4 | from .types import LocationWithText 5 | 6 | 7 | @rpc_call 8 | def run_code(code: str): 9 | pass 10 | 11 | 12 | @rpc_call 13 | def diff_apply(filepath, content): 14 | pass 15 | 16 | 17 | @rpc_call 18 | def get_symbol_defines_in_selected_code(): 19 | pass 20 | 21 | 22 | def find_symbol(command, abspath, line, col): 23 | code = ( 24 | f"const position = new vscode.Position({line}, {col});" 25 | f"const absPath = vscode.Uri.file('{abspath}');" 26 | f"return await vscode.commands.executeCommand('{command}', absPath, position);" 27 | ) 28 | result = run_code(code=code) 29 | return result 30 | 31 | 32 | def find_definition(abspath: str, line: int, col: int): 33 | return find_symbol("vscode.executeDefinitionProvider", abspath, line, col) 34 | 35 | 36 | def find_type_definition(abspath: str, line: int, col: int): 37 | return find_symbol("vscode.executeTypeDefinitionProvider", abspath, line, col) 38 | 39 | 40 | def find_declaration(abspath: str, line: int, col: int): 41 | return find_symbol("vscode.executeDeclarationProvider", abspath, line, col) 42 | 43 | 44 | def find_implementation(abspath: str, line: int, col: int): 45 | return find_symbol("vscode.executeImplementationProvider", abspath, line, col) 46 | 47 | 48 | def find_reference(abspath: str, line: int, col: int): 49 | return find_symbol("vscode.executeReferenceProvider", abspath, line, col) 50 | 51 | 52 | def document_symbols(abspath: str): 53 | code = ( 54 | f"const fileUri = vscode.Uri.file('{abspath}');" 55 | "return await vscode.commands.executeCommand(" 56 | "'vscode.executeDocumentSymbolProvider', fileUri);" 57 | ) 58 | symbols = run_code(code=code) 59 | return symbols 60 | 61 | 62 | def workspace_symbols(query: str): 63 | code = ( 64 | "return await vscode.commands.executeCommand('vscode.executeWorkspaceSymbolProvider'," 65 | f" '{query}');" 66 | ) 67 | return run_code(code=code) 68 | 69 | 70 | def active_text_editor(): 71 | code = "return vscode.window.activeTextEditor;" 72 | return run_code(code=code) 73 | 74 | 75 | def open_folder(folder: str): 76 | folder = folder.replace("\\", "/") 77 | code = ( 78 | f"const folderUri = vscode.Uri.file('{folder}');" 79 | "vscode.commands.executeCommand(`vscode.openFolder`, folderUri);" 80 | ) 81 | run_code(code=code) 82 | 83 | 84 | def visible_lines(): 85 | active_document = active_text_editor() 86 | fail_result = { 87 | "filePath": "", 88 | "visibleText": "", 89 | "visibleRange": [-1, -1], 90 | } 91 | 92 | if not active_document: 93 | return fail_result 94 | if not os.path.exists(active_document["document"]["uri"]["fsPath"]): 95 | return fail_result 96 | 97 | file_path = active_document["document"]["uri"]["fsPath"] 98 | start_line = active_document["visibleRanges"][0][0]["line"] 99 | end_line = active_document["visibleRanges"][0][1]["line"] 100 | 101 | # read file lines from start_line to end_line 102 | with open(file_path, "r", encoding="utf-8") as file: 103 | _lines = file.readlines() 104 | _visible_lines = _lines[start_line : end_line + 1] 105 | 106 | # continue with the rest of the function 107 | return { 108 | "filePath": file_path, 109 | "visibleText": "".join(_visible_lines), 110 | "visibleRange": [start_line, end_line], 111 | } 112 | 113 | 114 | def visible_range() -> LocationWithText: 115 | visible_range_text = visible_lines() 116 | return LocationWithText( 117 | text=visible_range_text["visibleText"], 118 | abspath=visible_range_text["filePath"], 119 | range={ 120 | "start": { 121 | "line": visible_range_text["visibleRange"][0], 122 | "character": 0, 123 | }, 124 | "end": { 125 | "line": visible_range_text["visibleRange"][1], 126 | "character": 0, 127 | }, 128 | }, 129 | ) 130 | 131 | 132 | def selected_lines(): 133 | active_document = active_text_editor() 134 | fail_result = { 135 | "filePath": "", 136 | "selectedText": "", 137 | "selectedRange": [-1, -1, -1, -1], 138 | } 139 | 140 | if not active_document: 141 | return fail_result 142 | if not os.path.exists(active_document["document"]["uri"]["fsPath"]): 143 | return fail_result 144 | 145 | file_path = active_document["document"]["uri"]["fsPath"] 146 | start_line = active_document["selection"]["start"]["line"] 147 | start_col = active_document["selection"]["start"]["character"] 148 | end_line = active_document["selection"]["end"]["line"] 149 | end_col = active_document["selection"]["end"]["character"] 150 | 151 | # read file lines from start_line to end_line 152 | with open(file_path, "r", encoding="utf-8") as file: 153 | _lines = file.readlines() 154 | _selected_lines = _lines[start_line : end_line + 1] 155 | 156 | # continue with the rest of the function 157 | return { 158 | "filePath": file_path, 159 | "selectedText": "".join(_selected_lines), 160 | "selectedRange": [start_line, start_col, end_line, end_col], 161 | } 162 | 163 | 164 | def selected_range() -> LocationWithText: 165 | selected_range_text = selected_lines() 166 | return LocationWithText( 167 | text=selected_range_text["selectedText"], 168 | abspath=selected_range_text["filePath"], 169 | range={ 170 | "start": { 171 | "line": selected_range_text["selectedRange"][0], 172 | "character": selected_range_text["selectedRange"][1], 173 | }, 174 | "end": { 175 | "line": selected_range_text["selectedRange"][2], 176 | "character": selected_range_text["selectedRange"][3], 177 | }, 178 | }, 179 | ) 180 | -------------------------------------------------------------------------------- /devchat/llm/pipeline.py: -------------------------------------------------------------------------------- 1 | """ 2 | pipeline utils 3 | """ 4 | 5 | import sys 6 | import time 7 | from typing import Dict 8 | 9 | import openai 10 | 11 | from devchat.ide import IDEService 12 | 13 | 14 | class RetryException(Exception): 15 | """Custom exception class for retry mechanism""" 16 | 17 | def __init__(self, err): 18 | """ 19 | Initialize RetryException with an error. 20 | 21 | Args: 22 | err: An error that needs to be handled. 23 | """ 24 | self.error = err 25 | 26 | 27 | # Retry decorator for wrapping a function to enable retries on failure 28 | def retry(func, times): 29 | """ 30 | Execute the function and retry on failure. 31 | 32 | Args: 33 | *args: Variable length argument list. 34 | **kwargs: Arbitrary keyword arguments. 35 | """ 36 | 37 | def wrapper(*args, **kwargs): 38 | for index in range(times): 39 | try: 40 | return func(*args, **kwargs) 41 | except RetryException as err: 42 | if index + 1 == times: 43 | raise err 44 | IDEService().ide_logging("debug", f"has retries: {index + 1}") 45 | continue 46 | except openai.APIStatusError as err: 47 | IDEService().ide_logging( 48 | "info", 49 | f"OpenAI API Status Error: {err.status_code} {err.body.get('detail', '')}", 50 | ) 51 | raise err from err 52 | except openai.APIError as err: 53 | IDEService().ide_logging( 54 | "info", 55 | ( 56 | f"OpenAI API Error: {err.code if err.code else ''} " 57 | f"{err.type if err.type else err}" 58 | ), 59 | ) 60 | raise err from err 61 | except Exception as err: 62 | IDEService().ide_logging("info", f"exception: {err.__class__} {str(err)}") 63 | raise err 64 | 65 | return wrapper 66 | 67 | 68 | # Exception handling decorator for wrapping a function to return error message on failure 69 | def exception_err(func): 70 | """ 71 | Execute the function and return error on failure. 72 | 73 | Args: 74 | *args: Variable length argument list. 75 | **kwargs: Arbitrary keyword arguments. 76 | """ 77 | 78 | def wrapper(*args, **kwargs): 79 | try: 80 | result = func(*args, **kwargs) 81 | return True, result 82 | # pylint: disable=W0718 83 | except Exception as err: 84 | return False, err 85 | 86 | return wrapper 87 | 88 | 89 | # Exception output handling decorator for wrapping a function to print error message on failure 90 | def exception_output_handle(func): 91 | """ 92 | Print the error and execute the function. 93 | 94 | Args: 95 | err: An error that needs to be handled. 96 | """ 97 | 98 | def wrapper(err): 99 | print(f"{err}", file=sys.stderr, flush=True) 100 | return func(err) 101 | 102 | return wrapper 103 | 104 | 105 | # Exception handling decorator for wrapping a function to handle specific error on failure 106 | def exception_handle(func, handler): 107 | """ 108 | Execute the function and handle specific error on failure. 109 | 110 | Args: 111 | *args: Variable length argument list. 112 | **kwargs: Arbitrary keyword arguments. 113 | """ 114 | 115 | def wrapper(*args, **kwargs): 116 | try: 117 | return func(*args, **kwargs) 118 | # pylint: disable=broad-except 119 | except (openai.APIStatusError, openai.APIError, Exception) as err: 120 | if isinstance(err, openai.APIStatusError): 121 | error_msg = ( 122 | f"OpenAI API Status Error: {err.status_code} {err.body.get('detail', '')}" 123 | ) 124 | elif isinstance(err, openai.APIError): 125 | error_msg = ( 126 | f"OpenAI API Error: {err.code if err.code else ''} " 127 | f"{err.type if err.type else err}" 128 | ) 129 | else: 130 | error_msg = f"Caught an exception of type {type(err)}: {err}" 131 | 132 | IDEService().ide_logging("error", error_msg) 133 | 134 | if handler: 135 | return handler(error_msg) 136 | else: 137 | raise err from err 138 | 139 | return wrapper 140 | 141 | 142 | # Pipeline decorator for wrapping a function to execute multiple functions in sequence 143 | def pipeline(*funcs): 144 | """ 145 | Execute multiple functions in sequence. 146 | 147 | Args: 148 | *args: Variable length argument list. 149 | **kwargs: Arbitrary keyword arguments. 150 | """ 151 | 152 | def wrapper(*args, **kwargs): 153 | start_time = time.time() 154 | 155 | for index, func in enumerate(funcs): 156 | if index > 0: 157 | if isinstance(args[0], Dict) and args[0].get("__type__", None) == "parallel": 158 | args = (func(*args[0]["value"]),) 159 | else: 160 | args = (func(*args),) 161 | else: 162 | args = (func(*args, **kwargs),) 163 | end_time = time.time() 164 | IDEService().ide_logging("debug", f"time on pipeline: {end_time-start_time}") 165 | return args[0] 166 | 167 | return wrapper 168 | 169 | 170 | # Parallel decorator for wrapping a function to execute multiple functions concurrently 171 | def parallel(*funcs): 172 | """ 173 | Execute multiple functions concurrently. 174 | 175 | Args: 176 | args: A list of arguments for the functions. 177 | """ 178 | 179 | def wrapper(args): 180 | results = {"__type__": "parallel", "value": []} 181 | for func in funcs: 182 | results["value"].append(func(args)) 183 | return results 184 | 185 | return wrapper 186 | -------------------------------------------------------------------------------- /devchat/_cli/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import zipfile 4 | from contextlib import contextmanager 5 | from typing import Any, List, Optional, Tuple 6 | 7 | from devchat._cli.errors import MissContentInPromptException 8 | from devchat.utils import add_gitignore, find_root_dir, get_logger, rmtree, setup_logger 9 | 10 | logger = get_logger(__name__) 11 | 12 | 13 | def download_and_extract_workflow(workflow_url, target_dir): 14 | import requests 15 | 16 | # Download the workflow zip file 17 | response = requests.get(workflow_url, stream=True, timeout=10) 18 | # Downaload file to temp dir 19 | os.makedirs(target_dir, exist_ok=True) 20 | zip_path = os.path.join(target_dir, "workflow.zip") 21 | with open(zip_path, "wb") as file_handle: 22 | for chunk in response.iter_content(chunk_size=8192): 23 | if chunk: 24 | file_handle.write(chunk) 25 | 26 | # Extract the zip file 27 | parent_dir = os.path.dirname(target_dir) 28 | with zipfile.ZipFile(zip_path, "r") as zip_ref: 29 | zip_ref.extractall(parent_dir) 30 | 31 | # Delete target directory if exists 32 | if os.path.exists(target_dir): 33 | rmtree(target_dir) 34 | 35 | # Rename extracted directory to target directory 36 | extracted_dir = os.path.join(parent_dir, "workflows-main") 37 | os.rename(extracted_dir, target_dir) 38 | 39 | 40 | @contextmanager 41 | def handle_errors(): 42 | # import openai 43 | """Handle errors in the CLI.""" 44 | try: 45 | yield 46 | # except openai.APIError as error: 47 | # logger.exception(error) 48 | # print(f"{type(error).__name__}: {error.type}", file=sys.stderr) 49 | # sys.exit(1) 50 | except MissContentInPromptException: 51 | print("Miss content in prompt command.", file=sys.stderr) 52 | sys.exit(1) 53 | except Exception as error: 54 | # import traceback 55 | # traceback.print_exc() 56 | logger.exception(error) 57 | print(f"{type(error).__name__}: {error}", file=sys.stderr) 58 | sys.exit(1) 59 | 60 | 61 | REPO_CHAT_DIR = None 62 | USER_CHAT_DIR = None 63 | 64 | 65 | def init_dir() -> Tuple[str, str]: 66 | """ 67 | Initialize the chat directories. 68 | 69 | Returns: 70 | REPO_CHAT_DIR: The chat directory in the repository. 71 | USER_CHAT_DIR: The chat directory in the user's home. 72 | """ 73 | global REPO_CHAT_DIR 74 | global USER_CHAT_DIR 75 | if REPO_CHAT_DIR and USER_CHAT_DIR: 76 | return REPO_CHAT_DIR, USER_CHAT_DIR 77 | 78 | repo_dir, user_dir = find_root_dir() 79 | if not repo_dir and not user_dir: 80 | print(f"Error: Failed to find home for .chat: {repo_dir}, {user_dir}", file=sys.stderr) 81 | sys.exit(1) 82 | 83 | if not repo_dir: 84 | repo_dir = user_dir 85 | elif not user_dir: 86 | user_dir = repo_dir 87 | 88 | try: 89 | REPO_CHAT_DIR = os.path.join(repo_dir, ".chat") 90 | if not os.path.exists(REPO_CHAT_DIR): 91 | os.makedirs(REPO_CHAT_DIR) 92 | except Exception: 93 | pass 94 | 95 | try: 96 | USER_CHAT_DIR = os.path.join(user_dir, ".chat") 97 | if not os.path.exists(USER_CHAT_DIR): 98 | os.makedirs(USER_CHAT_DIR) 99 | except Exception: 100 | pass 101 | 102 | if not os.path.isdir(REPO_CHAT_DIR): 103 | REPO_CHAT_DIR = USER_CHAT_DIR 104 | if not os.path.isdir(USER_CHAT_DIR): 105 | USER_CHAT_DIR = REPO_CHAT_DIR 106 | if not os.path.isdir(REPO_CHAT_DIR) or not os.path.isdir(USER_CHAT_DIR): 107 | print(f"Error: Failed to create {REPO_CHAT_DIR} and {USER_CHAT_DIR}", file=sys.stderr) 108 | sys.exit(1) 109 | 110 | try: 111 | setup_logger(os.path.join(REPO_CHAT_DIR, "error.log")) 112 | add_gitignore(REPO_CHAT_DIR, "*") 113 | except Exception as exc: 114 | logger.error("Failed to setup logger or add .gitignore: %s", exc) 115 | 116 | return REPO_CHAT_DIR, USER_CHAT_DIR 117 | 118 | 119 | def valid_git_repo(target_dir: str, valid_urls: List[str]) -> bool: 120 | """ 121 | Check if a directory is a valid Git repository and if its URL is in a list of valid URLs. 122 | 123 | :param target_dir: The path of the directory to check. 124 | :param valid_urls: A list of valid Git repository URLs. 125 | :return: True if the directory is a valid Git repository with a valid URL, False otherwise. 126 | """ 127 | try: 128 | from git import InvalidGitRepositoryError, Repo 129 | except Exception: 130 | pass 131 | 132 | try: 133 | repo = Repo(target_dir) 134 | repo_url = next(repo.remote().urls) 135 | return repo_url in valid_urls 136 | except InvalidGitRepositoryError: 137 | logger.exception("Not a valid Git repository: %s", target_dir) 138 | return False 139 | except Exception: 140 | return False 141 | 142 | 143 | def clone_git_repo(target_dir: str, repo_urls: List[Tuple[str, str]]): 144 | """ 145 | Clone a Git repository from a list of possible URLs. 146 | 147 | :param target_dir: The path where the repository should be cloned. 148 | :param repo_urls: A list of possible Git repository URLs. 149 | """ 150 | try: 151 | from git import GitCommandError, Repo 152 | except Exception: 153 | pass 154 | 155 | for url, branch in repo_urls: 156 | try: 157 | print(f"Cloning repository {url} to {target_dir}") 158 | Repo.clone_from(url, target_dir, branch=branch) 159 | print("Cloned successfully") 160 | return 161 | except GitCommandError: 162 | logger.exception("Failed to clone repository %s to %s", url, target_dir) 163 | continue 164 | raise GitCommandError(f"Failed to clone repository to {target_dir}") 165 | 166 | 167 | def get_model_config(user_chat_dir: str, model: Optional[str] = None) -> Tuple[str, Any]: 168 | from devchat.config import ConfigManager 169 | 170 | manager = ConfigManager(user_chat_dir) 171 | return manager.model_config(model) 172 | -------------------------------------------------------------------------------- /tests/test_namespace.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | 5 | from devchat.engine import Namespace 6 | 7 | 8 | def test_is_valid_name(): 9 | # Test valid names 10 | assert Namespace.is_valid_name("") is True 11 | assert Namespace.is_valid_name("a") is True 12 | assert Namespace.is_valid_name("A.b") is True 13 | assert Namespace.is_valid_name("a.2.c") is True 14 | assert Namespace.is_valid_name("a_b") is True 15 | assert Namespace.is_valid_name("a-b") is True 16 | assert Namespace.is_valid_name("a_3.4-d") is True 17 | 18 | # Test invalid names 19 | assert Namespace.is_valid_name(".") is False 20 | assert Namespace.is_valid_name("..") is False 21 | assert Namespace.is_valid_name("a..b") is False 22 | assert Namespace.is_valid_name(".a") is False 23 | assert Namespace.is_valid_name("3.") is False 24 | assert Namespace.is_valid_name("a/.b") is False 25 | assert Namespace.is_valid_name("a\\b") is False 26 | assert Namespace.is_valid_name("a*b") is False 27 | assert Namespace.is_valid_name("a?1") is False 28 | assert Namespace.is_valid_name("a:b") is False 29 | assert Namespace.is_valid_name("a|b") is False 30 | assert Namespace.is_valid_name('a"b') is False 31 | assert Namespace.is_valid_name("2b") is False 33 | 34 | 35 | def test_get_file(tmp_path): 36 | # Create a Namespace instance with the temporary directory as the root path 37 | namespace = Namespace(tmp_path) 38 | 39 | # Test case 1: a file that exists 40 | # Create a file in the 'usr' branch 41 | os.makedirs(os.path.join(tmp_path, "usr", "a", "b", "c"), exist_ok=True) 42 | file_path = os.path.join(tmp_path, "usr", "a", "b", "c", "file1.txt") 43 | with open(file_path, "w", encoding="utf-8") as file: 44 | file.write("test") 45 | assert namespace.get_file("a.b.c", "file1.txt") == file_path 46 | 47 | # Test case 2: a file that doesn't exist 48 | assert namespace.get_file("d.e.f", "file2.txt") is None 49 | 50 | # Test case 3: a file that exists in a later branch 51 | # Create a file in the 'sys' branch 52 | os.makedirs(os.path.join(tmp_path, "usr", "g", "h", "i"), exist_ok=True) 53 | os.makedirs(os.path.join(tmp_path, "sys", "g", "h", "i"), exist_ok=True) 54 | file_path = os.path.join(tmp_path, "sys", "g", "h", "i", "file3.txt") 55 | with open(file_path, "w", encoding="utf-8") as file: 56 | file.write("test") 57 | assert namespace.get_file("g.h.i", "file3.txt") == file_path 58 | 59 | # Test case 4: a file in 'usr' overwrites the same in 'sys' 60 | # Create the same file in the 'usr' and 'sys' branches 61 | os.makedirs(os.path.join(tmp_path, "usr", "j", "k", "l"), exist_ok=True) 62 | usr_file_path = os.path.join(tmp_path, "usr", "j", "k", "l", "file4.txt") 63 | os.makedirs(os.path.join(tmp_path, "sys", "j", "k", "l"), exist_ok=True) 64 | sys_file_path = os.path.join(tmp_path, "sys", "j", "k", "l", "file4.txt") 65 | with open(usr_file_path, "w", encoding="utf-8") as file: 66 | file.write("test") 67 | with open(sys_file_path, "w", encoding="utf-8") as file: 68 | file.write("test") 69 | assert namespace.get_file("j.k.l", "file4.txt") == usr_file_path 70 | 71 | 72 | def test_list_files(tmp_path): 73 | # Create a Namespace instance with the temporary directory as the root path 74 | namespace = Namespace(tmp_path) 75 | 76 | # Test case 1: a path that exists 77 | # Create a file in the 'usr' branch 78 | os.makedirs(os.path.join(tmp_path, "usr", "a", "b", "c"), exist_ok=True) 79 | file_path = os.path.join(tmp_path, "usr", "a", "b", "c", "file1.txt") 80 | with open(file_path, "w", encoding="utf-8") as file: 81 | file.write("test") 82 | assert namespace.list_files("a.b.c") == [file_path] 83 | 84 | # Test case 2: a path that doesn't exist 85 | with pytest.raises(ValueError): 86 | namespace.list_files("d.e.f") 87 | 88 | # Test case 3: a path exists but has no files 89 | os.makedirs(os.path.join(tmp_path, "org", "d", "e", "f"), exist_ok=True) 90 | assert not namespace.list_files("d.e.f") 91 | 92 | # Test case 4: a path that exists in a later branch 93 | # Create a file in the 'sys' branch 94 | os.makedirs(os.path.join(tmp_path, "usr", "g", "h", "i"), exist_ok=True) 95 | os.makedirs(os.path.join(tmp_path, "sys", "g", "h", "i"), exist_ok=True) 96 | file_path = os.path.join(tmp_path, "sys", "g", "h", "i", "file2.txt") 97 | with open(file_path, "w", encoding="utf-8") as file: 98 | file.write("test") 99 | assert namespace.list_files("g.h.i") == [file_path] 100 | 101 | # Test case 5: a path in 'usr' overwrites the same in 'sys' 102 | # Create the same file in the 'usr' and 'sys' branches 103 | os.makedirs(os.path.join(tmp_path, "usr", "j", "k", "l"), exist_ok=True) 104 | usr_file_path = os.path.join(tmp_path, "usr", "j", "k", "l", "file3.txt") 105 | os.makedirs(os.path.join(tmp_path, "sys", "j", "k", "l"), exist_ok=True) 106 | sys_file_path = os.path.join(tmp_path, "sys", "j", "k", "l", "file3.txt") 107 | with open(usr_file_path, "w", encoding="utf-8") as file: 108 | file.write("test") 109 | with open(sys_file_path, "w", encoding="utf-8") as file: 110 | file.write("test") 111 | assert namespace.list_files("j.k.l") == [usr_file_path] 112 | 113 | 114 | def test_list_names(tmp_path): 115 | os.makedirs(os.path.join(tmp_path, "usr", "a", "b", "c")) 116 | os.makedirs(os.path.join(tmp_path, "org", "a", "b", "d")) 117 | os.makedirs(os.path.join(tmp_path, "sys", "a", "e")) 118 | 119 | namespace = Namespace(tmp_path) 120 | 121 | # Test listing child commands 122 | commands = namespace.list_names("a") 123 | assert commands == ["a.b", "a.e"] 124 | 125 | # Test listing all descendant commands 126 | commands = namespace.list_names("a", recursive=True) 127 | assert commands == ["a.b", "a.b.c", "a.b.d", "a.e"] 128 | 129 | # Test listing commands of an invalid name 130 | with pytest.raises(ValueError): 131 | namespace.list_names("b") 132 | 133 | # Test listing commands when there are no commands 134 | commands = namespace.list_names("a.e") 135 | assert len(commands) == 0 136 | 137 | # Test listing commands of the root 138 | commands = namespace.list_names() 139 | assert commands == ["a"] 140 | -------------------------------------------------------------------------------- /devchat/ide/service.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from .idea_services import IdeaIDEService 4 | from .rpc import rpc_method 5 | from .types import Location, LocationWithText, SymbolNode 6 | from .vscode_services import selected_range, visible_range 7 | 8 | 9 | class IDEService: 10 | """ 11 | Client for IDE service 12 | 13 | Usage: 14 | client = IDEService() 15 | res = client.ide_language() 16 | res = client.ide_logging("info", "some message") 17 | """ 18 | 19 | def __init__(self): 20 | self._result = None 21 | 22 | @rpc_method 23 | def get_lsp_brige_port(self) -> str: 24 | """ 25 | Get the LSP bridge port. 26 | 27 | :return: str 28 | """ 29 | return self._result 30 | 31 | @rpc_method 32 | def install_python_env(self, command_name: str, requirements_file: str) -> str: 33 | """ 34 | A method to install a Python environment with the provided command name 35 | and requirements file, returning python path installed. 36 | Command name is the name of the environment to be installed. 37 | """ 38 | return self._result 39 | 40 | @rpc_method 41 | def update_slash_commands(self) -> bool: 42 | """ 43 | Update the slash commands and return a boolean indicating the success of the operation. 44 | """ 45 | return self._result 46 | 47 | @rpc_method 48 | def ide_language(self) -> str: 49 | """ 50 | Returns the current IDE language setting for the user. 51 | - zh: Chinese 52 | - en: English 53 | """ 54 | return self._result 55 | 56 | @rpc_method 57 | def ide_logging(self, level: str, message: str) -> bool: 58 | """ 59 | Logs a message to the IDE. 60 | level: "info" | "warn" | "error" | "debug" 61 | """ 62 | return self._result 63 | 64 | @rpc_method 65 | def get_document_symbols(self, abspath: str) -> List[SymbolNode]: 66 | """ 67 | Retrieves the document symbols for a given file. 68 | 69 | Args: 70 | abspath: The absolute path to the file whose symbols are to be retrieved. 71 | 72 | Returns: 73 | A list of SymbolNode objects representing the symbols found in the document. 74 | """ 75 | return [SymbolNode.parse_obj(node) for node in self._result] 76 | 77 | @rpc_method 78 | def find_type_def_locations(self, abspath: str, line: int, character: int) -> List[Location]: 79 | """ 80 | Finds the location of type definitions within a file. 81 | 82 | Args: 83 | abspath: The absolute path to the file to be searched. 84 | line: The line number within the file to begin the search. 85 | character: The character position within the line to begin the search. 86 | 87 | Returns: 88 | A list of Location objects representing the locations of type definitions found. 89 | """ 90 | return [Location.parse_obj(loc) for loc in self._result] 91 | 92 | @rpc_method 93 | def find_def_locations(self, abspath: str, line: int, character: int) -> List[Location]: 94 | return [Location.parse_obj(loc) for loc in self._result] 95 | 96 | @rpc_method 97 | def ide_name(self) -> str: 98 | """Returns the name of the IDE. 99 | 100 | This method is a remote procedure call (RPC) that fetches the name of the IDE being used. 101 | 102 | Returns: 103 | The name of the IDE as a string. For example, "vscode" or "pycharm". 104 | """ 105 | return self._result 106 | 107 | @rpc_method 108 | def diff_apply(self, filepath, content) -> bool: 109 | """ 110 | Applies a given diff to a file. 111 | 112 | This method uses the content provided to apply changes to the file 113 | specified by the filepath. It's an RPC method that achieves file synchronization 114 | by updating the local version of the file with the changes described in the 115 | content parameter. 116 | 117 | Args: 118 | filepath: The path to the file that needs to be updated. 119 | content: A string containing the new code that should be applied to the file. 120 | 121 | Returns: 122 | A boolean indicating if the diff was successfully applied. 123 | """ 124 | return self._result 125 | 126 | def get_visible_range(self) -> LocationWithText: 127 | """ 128 | Determines and returns the visible range of code in the current IDE. 129 | 130 | Returns: 131 | A tuple denoting the visible range if the IDE is VSCode, or defers to 132 | IdeaIDEService's get_visible_range method for other IDEs. 133 | """ 134 | if self.ide_name() == "vscode": 135 | return visible_range() 136 | return IdeaIDEService().get_visible_range() 137 | 138 | def get_selected_range(self) -> LocationWithText: 139 | """ 140 | Retrieves the selected range of code in the current IDE. 141 | 142 | Returns: 143 | Calls and returns the result of `selected_range()` if the IDE is VSCode, 144 | otherwise, it defers to IdeaIDEService's `get_selected_range()` method. 145 | """ 146 | if self.ide_name() == "vscode": 147 | return selected_range() 148 | return IdeaIDEService().get_selected_range() 149 | 150 | @rpc_method 151 | def get_diagnostics_in_range(self, fileName: str, startLine: int, endLine: int) -> List[str]: 152 | """ 153 | Retrieves diagnostics for a specific range of code in the current IDE. 154 | 155 | Returns: 156 | A list of diagnostic messages for the specified range. 157 | """ 158 | return self._result 159 | 160 | @rpc_method 161 | def get_collapsed_code(self, fileName: str, startLine: int, endLine: int) -> str: 162 | """ 163 | Retrives collapsed code exclude specfic range of code in the current IDE. 164 | 165 | Returns: 166 | The collapsed code. 167 | """ 168 | return self._result 169 | 170 | @rpc_method 171 | def get_extension_tools_path(self) -> str: 172 | """ 173 | Retrives extension tools path. 174 | 175 | Returns: 176 | The extension tools path. 177 | """ 178 | return self._result 179 | -------------------------------------------------------------------------------- /devchat/workflow/step.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import shlex 4 | import subprocess 5 | import sys 6 | import threading 7 | from enum import Enum 8 | from typing import Dict, List, Tuple 9 | 10 | from .path import WORKFLOWS_BASE 11 | from .schema import RuntimeParameter, WorkflowConfig 12 | 13 | 14 | class BuiltInVars(str, Enum): 15 | """ 16 | Built-in variables within the workflow step command. 17 | """ 18 | 19 | devchat_python = "$devchat_python" 20 | command_path = "$command_path" 21 | user_input = "$input" 22 | workflow_python = "$workflow_python" 23 | 24 | 25 | class BuiltInEnvs(str, Enum): 26 | """ 27 | Built-in environment variables for the step subprocess. 28 | """ 29 | 30 | llm_model = "LLM_MODEL" 31 | parent_hash = "PARENT_HASH" 32 | context_contents = "CONTEXT_CONTENTS" 33 | 34 | 35 | class WorkflowStep: 36 | def __init__(self, **kwargs): 37 | """ 38 | Initialize a workflow step with the given configuration. 39 | """ 40 | self._kwargs = kwargs 41 | 42 | @property 43 | def command_raw(self) -> str: 44 | """ 45 | The raw command string from the config. 46 | """ 47 | return self._kwargs.get("run", "") 48 | 49 | def _setup_env(self, wf_config: WorkflowConfig, rt_param: RuntimeParameter) -> Dict[str, str]: 50 | """ 51 | Setup the environment variables for the subprocess. 52 | """ 53 | _1 = wf_config 54 | command_raw = self.command_raw 55 | 56 | env = os.environ.copy() 57 | 58 | # set PYTHONPATH for the subprocess 59 | python_path = env.get("PYTHONPATH", "") 60 | devchat_python_path = env.get("DEVCHAT_PYTHONPATH", python_path) 61 | new_paths = [WORKFLOWS_BASE] 62 | if (BuiltInVars.devchat_python in command_raw) and devchat_python_path: 63 | # only add devchat pythonpath when it's used in the command 64 | new_paths.append(devchat_python_path) 65 | 66 | paths = [os.path.normpath(p) for p in new_paths] 67 | paths = [p.replace("\\", "\\\\") for p in paths] 68 | joined = os.pathsep.join(paths) 69 | 70 | env["PYTHONPATH"] = joined 71 | env[BuiltInEnvs.llm_model] = rt_param.model_name or "" 72 | env[BuiltInEnvs.parent_hash] = rt_param.parent_hash or "" 73 | env[BuiltInEnvs.context_contents] = "" 74 | if rt_param.history_messages: 75 | # convert dict to json string 76 | env[BuiltInEnvs.context_contents] = json.dumps(rt_param.history_messages) 77 | 78 | return env 79 | 80 | def _validate_and_interpolate( 81 | self, wf_config: WorkflowConfig, rt_param: RuntimeParameter 82 | ) -> List[str]: 83 | """ 84 | Validate the step configuration and interpolate variables in the command. 85 | 86 | Return the command parts as a list of strings. 87 | """ 88 | command_raw = self.command_raw 89 | parts = shlex.split(command_raw) 90 | 91 | # if the command_raw use $workflow_python, 92 | # it must be set in workflow config 93 | if BuiltInVars.workflow_python in command_raw: 94 | if not rt_param.workflow_python: 95 | raise ValueError( 96 | "The command uses $workflow_python, " "but the workflow_python is not set yet." 97 | ) 98 | 99 | args = [] 100 | for p in parts: 101 | arg = p 102 | 103 | if p.startswith(BuiltInVars.workflow_python): 104 | if not rt_param.workflow_python: 105 | raise ValueError( 106 | "The command uses $workflow_python, " 107 | "but the workflow_python is not set yet." 108 | ) 109 | arg = arg.replace(BuiltInVars.workflow_python, rt_param.workflow_python) 110 | 111 | if p.startswith(BuiltInVars.devchat_python): 112 | arg = arg.replace(BuiltInVars.devchat_python, rt_param.devchat_python) 113 | 114 | if p.startswith(BuiltInVars.command_path): 115 | # NOTE: 在文档中说明 command.yml 中表示路径采用 POSIX 标准 116 | # 即,使用 / 分隔路径,而非 \ (Windows) 117 | path_parts = p.split("/") 118 | # replace "$command_path" with the root path in path_parts 119 | arg = os.path.join(wf_config.root_path, *path_parts[1:]) 120 | 121 | if BuiltInVars.user_input in p: 122 | arg = arg.replace(BuiltInVars.user_input, rt_param.user_input) 123 | 124 | args.append(arg) 125 | 126 | return args 127 | 128 | def run(self, wf_config: WorkflowConfig, rt_param: RuntimeParameter) -> Tuple[int, str, str]: 129 | """ 130 | Run the step in a subprocess. 131 | 132 | Returns the return code, stdout, and stderr. 133 | """ 134 | # setup the environment variables 135 | env = self._setup_env(wf_config, rt_param) 136 | 137 | command_args = self._validate_and_interpolate(wf_config, rt_param) 138 | 139 | def _pipe_reader(pipe, data, out_file): 140 | """ 141 | Read from the pipe, then write and save the data. 142 | """ 143 | while pipe: 144 | pipe_data = pipe.read(1) 145 | if pipe_data == "": 146 | break 147 | data["data"] += pipe_data 148 | print(pipe_data, end="", file=out_file, flush=True) 149 | 150 | with subprocess.Popen( 151 | command_args, 152 | stdout=subprocess.PIPE, 153 | stderr=subprocess.PIPE, 154 | env=env, 155 | text=True, 156 | ) as proc: 157 | stdout_data, stderr_data = {"data": ""}, {"data": ""} 158 | stdout_thread = threading.Thread( 159 | target=_pipe_reader, args=(proc.stdout, stdout_data, sys.stdout) 160 | ) 161 | stderr_thread = threading.Thread( 162 | target=_pipe_reader, args=(proc.stderr, stderr_data, sys.stderr) 163 | ) 164 | stdout_thread.start() 165 | stderr_thread.start() 166 | stdout_thread.join() 167 | stderr_thread.join() 168 | 169 | proc.wait() 170 | return_code = proc.returncode 171 | return return_code, stdout_data["data"], stderr_data["data"] 172 | -------------------------------------------------------------------------------- /devchat/engine/util.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import sys 4 | from typing import Dict, List 5 | 6 | from devchat._cli.utils import init_dir 7 | from devchat.utils import get_logger 8 | 9 | from .command_parser import Command, CommandParser 10 | from .namespace import Namespace 11 | 12 | logger = get_logger(__name__) 13 | 14 | 15 | DEFAULT_MODEL = "gpt-3.5-turbo" 16 | 17 | 18 | class CommandUtil: 19 | @staticmethod 20 | def __command_parser(): 21 | _, user_chat_dir = init_dir() 22 | workflows_dir = os.path.join(user_chat_dir, "workflows") 23 | if not os.path.exists(workflows_dir) or not os.path.isdir(workflows_dir): 24 | return None 25 | 26 | namespace = Namespace(workflows_dir) 27 | commander = CommandParser(namespace) 28 | return commander 29 | 30 | @staticmethod 31 | def load_command(command: str): 32 | commander = CommandUtil.__command_parser() 33 | if not commander: 34 | return None 35 | return commander.parse(command) 36 | 37 | @staticmethod 38 | def load_commands() -> List[Command]: 39 | commander = CommandUtil.__command_parser() 40 | if not commander: 41 | return [] 42 | 43 | command_names = commander.namespace.list_names("", True) 44 | commands = [(name, commander.parse(name)) for name in command_names] 45 | return [cmd for cmd in commands if cmd[1]] 46 | 47 | 48 | class ToolUtil: 49 | @staticmethod 50 | def __make_function_parameters(command: Command): 51 | properties = {} 52 | required = [] 53 | 54 | if command.parameters: 55 | for key, value in command.parameters.items(): 56 | properties[key] = {} 57 | for key1, value1 in value.dict().items(): 58 | if key1 not in ["type", "description", "enum"] or value1 is None: 59 | continue 60 | properties[key][key1] = value1 61 | required.append(key) 62 | elif command.steps[0]["run"].find("$input") > 0: 63 | properties["input"] = {"type": "string", "description": "input text"} 64 | required.append("input") 65 | 66 | return properties, required 67 | 68 | @staticmethod 69 | def make_function(command: Command, command_name: str): 70 | properties, required = ToolUtil.__make_function_parameters(command) 71 | command_name = command_name.replace(".", "---") 72 | 73 | return { 74 | "type": "function", 75 | "function": { 76 | "name": command_name, 77 | "description": command.description, 78 | "parameters": { 79 | "type": "object", 80 | "properties": properties, 81 | "required": required, 82 | }, 83 | }, 84 | } 85 | 86 | @staticmethod 87 | def select_function_by_llm( 88 | history_messages: List[Dict], tools: List[Dict], model: str = DEFAULT_MODEL 89 | ): 90 | import httpx 91 | import openai 92 | 93 | proxy_url = os.environ.get("DEVCHAT_PROXY", "") 94 | proxy_setting = ( 95 | {"proxy": {"https://": proxy_url, "http://": proxy_url}} if proxy_url else {} 96 | ) 97 | 98 | client = openai.OpenAI( 99 | api_key=os.environ.get("OPENAI_API_KEY", None), 100 | base_url=os.environ.get("OPENAI_API_BASE", None), 101 | http_client=httpx.Client(**proxy_setting, trust_env=False), 102 | ) 103 | 104 | try: 105 | response = client.chat.completions.create( 106 | messages=history_messages, model=model, stream=False, tools=tools 107 | ) 108 | 109 | respose_message = response.dict()["choices"][0]["message"] 110 | if not respose_message["tool_calls"]: 111 | return None 112 | tool_call = respose_message["tool_calls"][0]["function"] 113 | if tool_call["name"] != tools[0]["function"]["name"]: 114 | error_msg = ( 115 | "The LLM returned an invalid function name. " 116 | f"Expected: {tools[0]['function']['name']}, " 117 | f"Actual: {tool_call['name']}" 118 | ) 119 | print(error_msg, file=sys.stderr, flush=True) 120 | return None 121 | return { 122 | "name": tool_call["name"].replace("---", "."), 123 | "arguments": json.loads(tool_call["arguments"]), 124 | } 125 | except (ConnectionError, openai.APIConnectionError) as err: 126 | print("ConnectionError:", err, file=sys.stderr, flush=True) 127 | return None 128 | except openai.APIError as err: 129 | print("openai APIError:", err.type, file=sys.stderr, flush=True) 130 | logger.exception("Call command by LLM error: %s", err) 131 | return None 132 | except Exception as err: 133 | print("Exception:", err, file=sys.stderr, flush=True) 134 | logger.exception("Call command by LLM error: %s", err) 135 | return None 136 | 137 | @staticmethod 138 | def _create_tool(command_name: str, command: Command) -> dict: 139 | properties = {} 140 | required = [] 141 | if command.parameters: 142 | for key, value in command.parameters.items(): 143 | properties[key] = {} 144 | for key1, value1 in value.dict().items(): 145 | if key1 not in ["type", "description", "enum"] or value1 is None: 146 | continue 147 | properties[key][key1] = value1 148 | required.append(key) 149 | elif command.steps[0]["run"].find("$input") > 0: 150 | properties["input"] = {"type": "string", "description": "input text"} 151 | required.append("input") 152 | 153 | command_name = command_name.replace(".", "---") 154 | return { 155 | "type": "function", 156 | "function": { 157 | "name": command_name, 158 | "description": command.description, 159 | "parameters": { 160 | "type": "object", 161 | "properties": properties, 162 | "required": required, 163 | }, 164 | }, 165 | } 166 | -------------------------------------------------------------------------------- /devchat/_cli/run.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional, Tuple 2 | 3 | import click 4 | 5 | from devchat.utils import rmtree 6 | 7 | 8 | @click.command( 9 | help="The 'command' argument is the name of the command to run or get information about." 10 | ) 11 | @click.argument("command", required=False, default="") 12 | @click.option( 13 | "--list", 14 | "list_flag", 15 | is_flag=True, 16 | default=False, 17 | help="List all specified commands in JSON format.", 18 | ) 19 | @click.option( 20 | "--recursive", 21 | "-r", 22 | "recursive_flag", 23 | is_flag=True, 24 | default=True, 25 | help="List commands recursively.", 26 | ) 27 | @click.option( 28 | "--update-sys", 29 | "update_sys_flag", 30 | is_flag=True, 31 | default=False, 32 | help="Pull the `sys` command directory from the DevChat repository.", 33 | ) 34 | @click.option("-p", "--parent", help="Input the parent prompt hash to continue the conversation.") 35 | @click.option( 36 | "-r", 37 | "--reference", 38 | multiple=True, 39 | help="Input one or more specific previous prompts to include in the current prompt.", 40 | ) 41 | @click.option( 42 | "-i", "--instruct", multiple=True, help="Add one or more files to the prompt as instructions." 43 | ) 44 | @click.option( 45 | "-c", "--context", multiple=True, help="Add one or more files to the prompt as a context." 46 | ) 47 | @click.option("-m", "--model", help="Specify the model to use for the prompt.") 48 | @click.option( 49 | "--config", 50 | "config_str", 51 | help="Specify a JSON string to overwrite the default configuration for this prompt.", 52 | ) 53 | def run( 54 | command: str, 55 | list_flag: bool, 56 | recursive_flag: bool, 57 | update_sys_flag: bool, 58 | parent: Optional[str], 59 | reference: Optional[List[str]], 60 | instruct: Optional[List[str]], 61 | context: Optional[List[str]], 62 | model: Optional[str], 63 | config_str: Optional[str] = None, 64 | ): 65 | """ 66 | Operate the workflow engine of DevChat. 67 | """ 68 | import json 69 | import os 70 | import sys 71 | 72 | from devchat._cli.router import llm_commmand 73 | from devchat._cli.utils import handle_errors, init_dir 74 | from devchat.engine import CommandParser, Namespace 75 | from devchat.utils import get_logger 76 | 77 | logger = get_logger(__name__) 78 | 79 | _, user_chat_dir = init_dir() 80 | with handle_errors(): 81 | workflows_dir = os.path.join(user_chat_dir, "workflows") 82 | if not os.path.exists(workflows_dir): 83 | os.makedirs(workflows_dir) 84 | if not os.path.isdir(workflows_dir): 85 | print(f"Error: Failed to find workflows directory: {workflows_dir}", file=sys.stderr) 86 | sys.exit(1) 87 | 88 | namespace = Namespace(workflows_dir) 89 | commander = CommandParser(namespace) 90 | 91 | if update_sys_flag: 92 | sys_dir = os.path.join(workflows_dir, "sys") 93 | git_urls = [ 94 | ("https://gitlab.com/devchat-ai/workflows.git", "main"), 95 | ("https://github.com/devchat-ai/workflows.git", "main"), 96 | ] 97 | zip_urls = [ 98 | "https://gitlab.com/devchat-ai/workflows/-/archive/main/workflows-main.zip", 99 | "https://codeload.github.com/devchat-ai/workflows/zip/refs/heads/main", 100 | ] 101 | _clone_or_pull_git_repo(sys_dir, git_urls, zip_urls) 102 | return 103 | 104 | if list_flag: 105 | commands = [] 106 | for name in namespace.list_names(command, recursive_flag): 107 | cmd = commander.parse(name) 108 | if not cmd: 109 | logger.warning("Existing command directory failed to parse: %s", name) 110 | continue 111 | commands.append({"name": name, "description": cmd.description, "path": cmd.path}) 112 | print(json.dumps(commands, indent=2)) 113 | return 114 | 115 | if command: 116 | llm_commmand(command, parent, reference, instruct, context, model, config_str) 117 | return 118 | 119 | 120 | def __make_files_writable(directory): 121 | """ 122 | Recursively make all files in the directory writable. 123 | """ 124 | import os 125 | import stat 126 | 127 | for root, _1, files in os.walk(directory): 128 | for name in files: 129 | filepath = os.path.join(root, name) 130 | if not os.access(filepath, os.W_OK): 131 | os.chmod(filepath, stat.S_IWUSR) 132 | 133 | 134 | def _clone_or_pull_git_repo(target_dir: str, repo_urls: List[Tuple[str, str]], zip_urls: List[str]): 135 | """ 136 | Clone a Git repository to a specified location, or pull it if it already exists. 137 | 138 | :param target_dir: The path where the repository should be cloned. 139 | :param repo_urls: A list of possible Git repository URLs. 140 | """ 141 | import os 142 | import shutil 143 | 144 | from devchat._cli.utils import clone_git_repo, download_and_extract_workflow 145 | from devchat.utils import get_logger 146 | 147 | logger = get_logger(__name__) 148 | 149 | if shutil.which("git") is None: 150 | # If Git is not installed, download and extract the workflow 151 | for url in zip_urls: 152 | try: 153 | download_and_extract_workflow(url, target_dir) 154 | break 155 | except Exception as err: 156 | logger.exception("Failed to download and extract workflow: %s", err) 157 | return 158 | 159 | if os.path.exists(target_dir): 160 | bak_dir = target_dir + "_bak" 161 | new_dir = target_dir + "_old" 162 | if os.path.exists(new_dir): 163 | rmtree(new_dir) 164 | if os.path.exists(bak_dir): 165 | rmtree(bak_dir) 166 | print(f"{target_dir} is already exists. Moved to {new_dir}") 167 | clone_git_repo(bak_dir, repo_urls) 168 | try: 169 | shutil.move(target_dir, new_dir) 170 | except Exception: 171 | __make_files_writable(target_dir) 172 | shutil.move(target_dir, new_dir) 173 | try: 174 | shutil.move(bak_dir, target_dir) 175 | except Exception: 176 | __make_files_writable(bak_dir) 177 | shutil.move(bak_dir, target_dir) 178 | else: 179 | clone_git_repo(target_dir, repo_urls) 180 | 181 | print(f"Updated {target_dir}") 182 | --------------------------------------------------------------------------------