├── archon ├── __init__.py ├── langgraph.json ├── refiner_agents │ ├── prompt_refiner_agent.py │ ├── tools_refiner_agent.py │ └── agent_refiner_agent.py ├── pydantic_ai_coder.py └── agent_tools.py ├── iterations ├── v2-agentic-workflow │ ├── .gitignore │ ├── requirements.txt │ ├── langgraph.json │ ├── .env.example │ ├── site_pages.sql │ ├── ollama_site_pages.sql │ ├── streamlit_ui.py │ └── README.md ├── v3-mcp-support │ ├── archon │ │ ├── __init__.py │ │ └── langgraph.json │ ├── mcp-config.json │ ├── utils │ │ ├── utils.py │ │ ├── site_pages.sql │ │ └── ollama_site_pages.sql │ ├── .env.example │ ├── setup_mcp.py │ ├── graph_service.py │ ├── mcp_server.py │ ├── requirements.txt │ └── streamlit_ui.py ├── v4-streamlit-ui-overhaul │ ├── archon │ │ ├── __init__.py │ │ └── langgraph.json │ ├── mcp │ │ ├── requirements.txt │ │ ├── Dockerfile │ │ ├── .dockerignore │ │ └── mcp_server.py │ ├── .streamlit │ │ └── config.toml │ ├── public │ │ ├── Archon.png │ │ └── ArchonLightGrey.png │ ├── .gitignore │ ├── .dockerignore │ ├── Dockerfile │ ├── .env.example │ ├── utils │ │ ├── site_pages.sql │ │ └── utils.py │ ├── graph_service.py │ ├── mcp_server.py │ ├── requirements.txt │ └── run_docker.py ├── v5-parallel-specialized-agents │ ├── archon │ │ ├── __init__.py │ │ ├── langgraph.json │ │ ├── refiner_agents │ │ │ ├── prompt_refiner_agent.py │ │ │ ├── tools_refiner_agent.py │ │ │ └── agent_refiner_agent.py │ │ ├── pydantic_ai_coder.py │ │ └── agent_tools.py │ ├── mcp │ │ ├── requirements.txt │ │ ├── Dockerfile │ │ ├── .dockerignore │ │ └── mcp_server.py │ ├── streamlit_pages │ │ ├── __init__.py │ │ ├── styles.py │ │ ├── chat.py │ │ └── intro.py │ ├── public │ │ ├── Archon.png │ │ ├── ArchonGraph.png │ │ └── ArchonLightGrey.png │ ├── .dockerignore │ ├── Dockerfile │ ├── .env.example │ ├── utils │ │ └── site_pages.sql │ ├── graph_service.py │ ├── requirements.txt │ ├── streamlit_ui.py │ ├── run_docker.py │ └── README.md └── v1-single-agent │ ├── requirements.txt │ ├── .env.example │ ├── site_pages.sql │ ├── README.md │ └── streamlit_ui.py ├── mcp ├── requirements.txt ├── Dockerfile ├── .dockerignore └── mcp_server.py ├── .gitattributes ├── public ├── Archon.png ├── ArchonGraph.png └── ArchonLightGrey.png ├── streamlit_pages ├── __init__.py ├── styles.py ├── chat.py └── intro.py ├── .streamlit └── config.toml ├── .gitignore ├── .github ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── feature_request.md │ └── bug_report.md ├── dependabot.yml └── workflows │ └── build.yml ├── .dockerignore ├── Dockerfile ├── LICENSE ├── .env.example ├── utils └── site_pages.sql ├── graph_service.py ├── requirements.txt ├── streamlit_ui.py └── run_docker.py /archon/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /iterations/v2-agentic-workflow/.gitignore: -------------------------------------------------------------------------------- 1 | .env -------------------------------------------------------------------------------- /iterations/v3-mcp-support/archon/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/archon/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/archon/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mcp/requirements.txt: -------------------------------------------------------------------------------- 1 | mcp==1.2.1 2 | python-dotenv==1.0.1 3 | requests==2.32.3 -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /public/Archon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/Archon/main/public/Archon.png -------------------------------------------------------------------------------- /streamlit_pages/__init__.py: -------------------------------------------------------------------------------- 1 | # This file makes the streamlit_ui directory a Python package 2 | -------------------------------------------------------------------------------- /public/ArchonGraph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/Archon/main/public/ArchonGraph.png -------------------------------------------------------------------------------- /public/ArchonLightGrey.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/Archon/main/public/ArchonLightGrey.png -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/mcp/requirements.txt: -------------------------------------------------------------------------------- 1 | mcp==1.2.1 2 | python-dotenv==1.0.1 3 | requests==2.32.3 -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/mcp/requirements.txt: -------------------------------------------------------------------------------- 1 | mcp==1.2.1 2 | python-dotenv==1.0.1 3 | requests==2.32.3 -------------------------------------------------------------------------------- /.streamlit/config.toml: -------------------------------------------------------------------------------- 1 | [client] 2 | showErrorDetails = "none" 3 | 4 | [theme] 5 | primaryColor = "#EB2D8C" 6 | base="dark" -------------------------------------------------------------------------------- /iterations/v1-single-agent/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/Archon/main/iterations/v1-single-agent/requirements.txt -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/streamlit_pages/__init__.py: -------------------------------------------------------------------------------- 1 | # This file makes the streamlit_ui directory a Python package 2 | -------------------------------------------------------------------------------- /iterations/v2-agentic-workflow/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/Archon/main/iterations/v2-agentic-workflow/requirements.txt -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/.streamlit/config.toml: -------------------------------------------------------------------------------- 1 | [client] 2 | showErrorDetails = "none" 3 | 4 | [theme] 5 | primaryColor = "#FF69B4" 6 | base="dark" -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/public/Archon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/Archon/main/iterations/v4-streamlit-ui-overhaul/public/Archon.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Folders 2 | workbench 3 | __pycache__ 4 | venv 5 | .langgraph_api 6 | 7 | # Files 8 | .env 9 | .env.temp 10 | .env.test 11 | env_vars.json -------------------------------------------------------------------------------- /archon/langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": ["."], 3 | "graphs": { 4 | "agent": "./archon_graph.py:agentic_flow" 5 | }, 6 | "env": "../.env" 7 | } 8 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/public/Archon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/Archon/main/iterations/v5-parallel-specialized-agents/public/Archon.png -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/public/ArchonLightGrey.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/Archon/main/iterations/v4-streamlit-ui-overhaul/public/ArchonLightGrey.png -------------------------------------------------------------------------------- /iterations/v2-agentic-workflow/langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": ["."], 3 | "graphs": { 4 | "agent": "./archon_graph.py:agentic_flow" 5 | }, 6 | "env": ".env" 7 | } 8 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/public/ArchonGraph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/Archon/main/iterations/v5-parallel-specialized-agents/public/ArchonGraph.png -------------------------------------------------------------------------------- /iterations/v3-mcp-support/archon/langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": ["."], 3 | "graphs": { 4 | "agent": "./archon_graph.py:agentic_flow" 5 | }, 6 | "env": "../.env" 7 | } 8 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/public/ArchonLightGrey.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/Archon/main/iterations/v5-parallel-specialized-agents/public/ArchonLightGrey.png -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/.gitignore: -------------------------------------------------------------------------------- 1 | # Folders 2 | workbench 3 | __pycache__ 4 | venv 5 | .langgraph_api 6 | 7 | # Files 8 | .env 9 | .env.temp 10 | .env.test 11 | env_vars.json -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/archon/langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": ["."], 3 | "graphs": { 4 | "agent": "./archon_graph.py:agentic_flow" 5 | }, 6 | "env": "../.env" 7 | } 8 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/archon/langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": ["."], 3 | "graphs": { 4 | "agent": "./archon_graph.py:agentic_flow" 5 | }, 6 | "env": "../.env" 7 | } 8 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Archon Community 4 | url: https://thinktank.ottomator.ai/c/archon/30 5 | about: Please ask questions and start conversations about Archon here in the oTTomator Think Tank! -------------------------------------------------------------------------------- /iterations/v3-mcp-support/mcp-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "archon": { 4 | "command": "[path to Archon]\\archon\\venv\\Scripts\\python.exe", 5 | "args": [ 6 | "[path to Archon]\\archon\\mcp_server.py" 7 | ] 8 | } 9 | } 10 | } -------------------------------------------------------------------------------- /mcp/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12-slim 2 | 3 | WORKDIR /app 4 | 5 | # Copy requirements file and install dependencies 6 | COPY requirements.txt . 7 | RUN pip install --no-cache-dir -r requirements.txt 8 | 9 | # Copy the MCP server files 10 | COPY . . 11 | 12 | # Expose port for MCP server 13 | EXPOSE 8100 14 | 15 | # Command to run the MCP server 16 | CMD ["python", "mcp_server.py"] 17 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/mcp/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12-slim 2 | 3 | WORKDIR /app 4 | 5 | # Copy requirements file and install dependencies 6 | COPY requirements.txt . 7 | RUN pip install --no-cache-dir -r requirements.txt 8 | 9 | # Copy the MCP server files 10 | COPY . . 11 | 12 | # Expose port for MCP server 13 | EXPOSE 8100 14 | 15 | # Command to run the MCP server 16 | CMD ["python", "mcp_server.py"] 17 | -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/mcp/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12-slim 2 | 3 | WORKDIR /app 4 | 5 | # Copy requirements file and install dependencies 6 | COPY requirements.txt . 7 | RUN pip install --no-cache-dir -r requirements.txt 8 | 9 | # Copy the MCP server files 10 | COPY . . 11 | 12 | # Expose port for MCP server 13 | EXPOSE 8100 14 | 15 | # Set environment variables 16 | ENV PYTHONUNBUFFERED=1 17 | 18 | # Command to run the MCP server 19 | CMD ["python", "mcp_server.py"] 20 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Suggest an idea for Archon 4 | title: '[FEATURE] ' 5 | labels: enhancement 6 | assignees: '' 7 | --- 8 | 9 | ## Describe the feature you'd like and why 10 | A clear and concise description of what you want to happen. 11 | 12 | ## User Impact 13 | Who would benefit from this feature and how? 14 | 15 | ## Implementation Details (optional) 16 | Any thoughts on how this might be implemented? 17 | 18 | ## Additional context 19 | Add any other screenshots, mockups, or context about the feature request here. -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore specified folders 2 | iterations/ 3 | venv/ 4 | .langgraph_api/ 5 | .github/ 6 | __pycache__/ 7 | .env 8 | 9 | # Git related 10 | .git/ 11 | .gitignore 12 | .gitattributes 13 | 14 | # Python cache 15 | *.pyc 16 | *.pyo 17 | *.pyd 18 | .Python 19 | *.so 20 | .pytest_cache/ 21 | 22 | # Environment files 23 | .env.local 24 | .env.development.local 25 | .env.test.local 26 | .env.production.local 27 | 28 | # Logs 29 | *.log 30 | 31 | # IDE specific files 32 | .idea/ 33 | .vscode/ 34 | *.swp 35 | *.swo 36 | 37 | # Keep the example env file for reference 38 | !.env.example 39 | -------------------------------------------------------------------------------- /mcp/.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore specified folders 2 | iterations/ 3 | venv/ 4 | .langgraph_api/ 5 | .github/ 6 | __pycache__/ 7 | .env 8 | 9 | # Git related 10 | .git/ 11 | .gitignore 12 | .gitattributes 13 | 14 | # Python cache 15 | *.pyc 16 | *.pyo 17 | *.pyd 18 | .Python 19 | *.so 20 | .pytest_cache/ 21 | 22 | # Environment files 23 | .env.local 24 | .env.development.local 25 | .env.test.local 26 | .env.production.local 27 | 28 | # Logs 29 | *.log 30 | 31 | # IDE specific files 32 | .idea/ 33 | .vscode/ 34 | *.swp 35 | *.swo 36 | 37 | # Keep the example env file for reference 38 | !.env.example 39 | -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore specified folders 2 | iterations/ 3 | venv/ 4 | .langgraph_api/ 5 | .github/ 6 | __pycache__/ 7 | .env 8 | 9 | # Git related 10 | .git/ 11 | .gitignore 12 | .gitattributes 13 | 14 | # Python cache 15 | *.pyc 16 | *.pyo 17 | *.pyd 18 | .Python 19 | *.so 20 | .pytest_cache/ 21 | 22 | # Environment files 23 | .env.local 24 | .env.development.local 25 | .env.test.local 26 | .env.production.local 27 | 28 | # Logs 29 | *.log 30 | 31 | # IDE specific files 32 | .idea/ 33 | .vscode/ 34 | *.swp 35 | *.swo 36 | 37 | # Keep the example env file for reference 38 | !.env.example 39 | -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/mcp/.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore specified folders 2 | iterations/ 3 | venv/ 4 | .langgraph_api/ 5 | .github/ 6 | __pycache__/ 7 | .env 8 | 9 | # Git related 10 | .git/ 11 | .gitignore 12 | .gitattributes 13 | 14 | # Python cache 15 | *.pyc 16 | *.pyo 17 | *.pyd 18 | .Python 19 | *.so 20 | .pytest_cache/ 21 | 22 | # Environment files 23 | .env.local 24 | .env.development.local 25 | .env.test.local 26 | .env.production.local 27 | 28 | # Logs 29 | *.log 30 | 31 | # IDE specific files 32 | .idea/ 33 | .vscode/ 34 | *.swp 35 | *.swo 36 | 37 | # Keep the example env file for reference 38 | !.env.example 39 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore specified folders 2 | iterations/ 3 | venv/ 4 | .langgraph_api/ 5 | .github/ 6 | __pycache__/ 7 | .env 8 | 9 | # Git related 10 | .git/ 11 | .gitignore 12 | .gitattributes 13 | 14 | # Python cache 15 | *.pyc 16 | *.pyo 17 | *.pyd 18 | .Python 19 | *.so 20 | .pytest_cache/ 21 | 22 | # Environment files 23 | .env.local 24 | .env.development.local 25 | .env.test.local 26 | .env.production.local 27 | 28 | # Logs 29 | *.log 30 | 31 | # IDE specific files 32 | .idea/ 33 | .vscode/ 34 | *.swp 35 | *.swo 36 | 37 | # Keep the example env file for reference 38 | !.env.example 39 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/mcp/.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore specified folders 2 | iterations/ 3 | venv/ 4 | .langgraph_api/ 5 | .github/ 6 | __pycache__/ 7 | .env 8 | 9 | # Git related 10 | .git/ 11 | .gitignore 12 | .gitattributes 13 | 14 | # Python cache 15 | *.pyc 16 | *.pyo 17 | *.pyd 18 | .Python 19 | *.so 20 | .pytest_cache/ 21 | 22 | # Environment files 23 | .env.local 24 | .env.development.local 25 | .env.test.local 26 | .env.production.local 27 | 28 | # Logs 29 | *.log 30 | 31 | # IDE specific files 32 | .idea/ 33 | .vscode/ 34 | *.swp 35 | *.swo 36 | 37 | # Keep the example env file for reference 38 | !.env.example 39 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12-slim 2 | 3 | WORKDIR /app 4 | 5 | # Install system dependencies 6 | RUN apt-get update && apt-get install -y --no-install-recommends \ 7 | build-essential \ 8 | && rm -rf /var/lib/apt/lists/* 9 | 10 | # Copy requirements first for better caching 11 | COPY requirements.txt . 12 | RUN pip install --no-cache-dir -r requirements.txt 13 | 14 | # Copy the rest of the application 15 | COPY . . 16 | 17 | # Set environment variables 18 | ENV PYTHONUNBUFFERED=1 19 | ENV PYTHONPATH=/app 20 | 21 | # Expose port for Streamlit 22 | EXPOSE 8501 23 | 24 | # Expose port for the Archon Service (started within Streamlit) 25 | EXPOSE 8100 26 | 27 | # Set the entrypoint to run Streamlit directly 28 | CMD ["streamlit", "run", "streamlit_ui.py", "--server.port=8501", "--server.address=0.0.0.0"] 29 | -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12-slim 2 | 3 | WORKDIR /app 4 | 5 | # Install system dependencies 6 | RUN apt-get update && apt-get install -y --no-install-recommends \ 7 | build-essential \ 8 | && rm -rf /var/lib/apt/lists/* 9 | 10 | # Copy requirements first for better caching 11 | COPY requirements.txt . 12 | RUN pip install --no-cache-dir -r requirements.txt 13 | 14 | # Copy the rest of the application 15 | COPY . . 16 | 17 | # Set environment variables 18 | ENV PYTHONUNBUFFERED=1 19 | ENV PYTHONPATH=/app 20 | 21 | # Expose port for Streamlit 22 | EXPOSE 8501 23 | 24 | # Expose port for the Archon Service (started within Streamlit) 25 | EXPOSE 8100 26 | 27 | # Set the entrypoint to run Streamlit directly 28 | CMD ["streamlit", "run", "streamlit_ui.py", "--server.port=8501", "--server.address=0.0.0.0"] 29 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12-slim 2 | 3 | WORKDIR /app 4 | 5 | # Install system dependencies 6 | RUN apt-get update && apt-get install -y --no-install-recommends \ 7 | build-essential \ 8 | && rm -rf /var/lib/apt/lists/* 9 | 10 | # Copy requirements first for better caching 11 | COPY requirements.txt . 12 | RUN pip install --no-cache-dir -r requirements.txt 13 | 14 | # Copy the rest of the application 15 | COPY . . 16 | 17 | # Set environment variables 18 | ENV PYTHONUNBUFFERED=1 19 | ENV PYTHONPATH=/app 20 | 21 | # Expose port for Streamlit 22 | EXPOSE 8501 23 | 24 | # Expose port for the Archon Service (started within Streamlit) 25 | EXPOSE 8100 26 | 27 | # Set the entrypoint to run Streamlit directly 28 | CMD ["streamlit", "run", "streamlit_ui.py", "--server.port=8501", "--server.address=0.0.0.0"] 29 | -------------------------------------------------------------------------------- /iterations/v1-single-agent/.env.example: -------------------------------------------------------------------------------- 1 | # Get your Open AI API Key by following these instructions - 2 | # https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key 3 | # You only need this environment variable set if you are using GPT (and not Ollama) 4 | OPENAI_API_KEY= 5 | 6 | # For the Supabase version (sample_supabase_agent.py), set your Supabase URL and Service Key. 7 | # Get your SUPABASE_URL from the API section of your Supabase project settings - 8 | # https://supabase.com/dashboard/project//settings/api 9 | SUPABASE_URL= 10 | 11 | # Get your SUPABASE_SERVICE_KEY from the API section of your Supabase project settings - 12 | # https://supabase.com/dashboard/project//settings/api 13 | # On this page it is called the service_role secret. 14 | SUPABASE_SERVICE_KEY= 15 | 16 | # The LLM you want to use from OpenAI. See the list of models here: 17 | # https://platform.openai.com/docs/models 18 | # Example: gpt-4o-mini 19 | LLM_MODEL= -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 oTTomator and Archon contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Create a report to help improve Archon 4 | title: '[BUG] ' 5 | labels: bug 6 | assignees: '' 7 | --- 8 | 9 | ## Description 10 | A clear and concise description of the issue. 11 | 12 | ## Steps to Reproduce 13 | 1. Go to '...' 14 | 2. Click on '....' 15 | 3. Scroll down to '....' 16 | 4. See error 17 | 18 | ## Expected Behavior 19 | A clear and concise description of what you expected to happen. 20 | 21 | ## Actual Behavior 22 | A clear and concise description of what actually happened. 23 | 24 | ## Screenshots 25 | If applicable, add screenshots to help explain your problem. 26 | 27 | ## Environment 28 | - OS: [e.g. Windows 10, macOS Monterey, Ubuntu 22.04] 29 | - Python Version: [e.g. Python 3.13, Python 3.12] 30 | - Using MCP or Streamlit (or something else) 31 | 32 | ## Additional Context 33 | Add any other context about the problem here, such as: 34 | - Does this happen consistently or intermittently? 35 | - Were there any recent changes that might be related? 36 | - Any workarounds you've discovered? 37 | 38 | ## Possible Solution 39 | If you have suggestions on how to fix the issue or what might be causing it. -------------------------------------------------------------------------------- /archon/refiner_agents/prompt_refiner_agent.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations as _annotations 2 | 3 | import logfire 4 | import os 5 | import sys 6 | from pydantic_ai import Agent 7 | from dotenv import load_dotenv 8 | from pydantic_ai.models.anthropic import AnthropicModel 9 | from pydantic_ai.models.openai import OpenAIModel 10 | from supabase import Client 11 | 12 | # Add the parent directory to sys.path to allow importing from the parent directory 13 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 14 | from utils.utils import get_env_var 15 | from archon.agent_prompts import prompt_refiner_prompt 16 | 17 | load_dotenv() 18 | 19 | provider = get_env_var('LLM_PROVIDER') or 'OpenAI' 20 | llm = get_env_var('PRIMARY_MODEL') or 'gpt-4o-mini' 21 | base_url = get_env_var('BASE_URL') or 'https://api.openai.com/v1' 22 | api_key = get_env_var('LLM_API_KEY') or 'no-llm-api-key-provided' 23 | 24 | model = AnthropicModel(llm, api_key=api_key) if provider == "Anthropic" else OpenAIModel(llm, base_url=base_url, api_key=api_key) 25 | 26 | logfire.configure(send_to_logfire='if-token-present') 27 | 28 | prompt_refiner_agent = Agent( 29 | model, 30 | system_prompt=prompt_refiner_prompt 31 | ) -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/archon/refiner_agents/prompt_refiner_agent.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations as _annotations 2 | 3 | import logfire 4 | import os 5 | import sys 6 | from pydantic_ai import Agent 7 | from dotenv import load_dotenv 8 | from pydantic_ai.models.anthropic import AnthropicModel 9 | from pydantic_ai.models.openai import OpenAIModel 10 | from supabase import Client 11 | 12 | # Add the parent directory to sys.path to allow importing from the parent directory 13 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 14 | from utils.utils import get_env_var 15 | from archon.agent_prompts import prompt_refiner_prompt 16 | 17 | load_dotenv() 18 | 19 | provider = get_env_var('LLM_PROVIDER') or 'OpenAI' 20 | llm = get_env_var('PRIMARY_MODEL') or 'gpt-4o-mini' 21 | base_url = get_env_var('BASE_URL') or 'https://api.openai.com/v1' 22 | api_key = get_env_var('LLM_API_KEY') or 'no-llm-api-key-provided' 23 | 24 | model = AnthropicModel(llm, api_key=api_key) if provider == "Anthropic" else OpenAIModel(llm, base_url=base_url, api_key=api_key) 25 | 26 | logfire.configure(send_to_logfire='if-token-present') 27 | 28 | prompt_refiner_agent = Agent( 29 | model, 30 | system_prompt=prompt_refiner_prompt 31 | ) -------------------------------------------------------------------------------- /iterations/v3-mcp-support/utils/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from datetime import datetime 3 | from functools import wraps 4 | import inspect 5 | 6 | def write_to_log(message: str): 7 | """Write a message to the logs.txt file in the workbench directory. 8 | 9 | Args: 10 | message: The message to log 11 | """ 12 | # Get the directory one level up from the current file 13 | current_dir = os.path.dirname(os.path.abspath(__file__)) 14 | parent_dir = os.path.dirname(current_dir) 15 | workbench_dir = os.path.join(parent_dir, "workbench") 16 | log_path = os.path.join(workbench_dir, "logs.txt") 17 | os.makedirs(workbench_dir, exist_ok=True) 18 | 19 | timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 20 | log_entry = f"[{timestamp}] {message}\n" 21 | 22 | with open(log_path, "a", encoding="utf-8") as f: 23 | f.write(log_entry) 24 | 25 | def log_node_execution(func): 26 | """Decorator to log the start and end of graph node execution. 27 | 28 | Args: 29 | func: The async function to wrap 30 | """ 31 | @wraps(func) 32 | async def wrapper(*args, **kwargs): 33 | func_name = func.__name__ 34 | write_to_log(f"Starting node: {func_name}") 35 | try: 36 | result = await func(*args, **kwargs) 37 | write_to_log(f"Completed node: {func_name}") 38 | return result 39 | except Exception as e: 40 | write_to_log(f"Error in node {func_name}: {str(e)}") 41 | raise 42 | return wrapper 43 | -------------------------------------------------------------------------------- /iterations/v2-agentic-workflow/.env.example: -------------------------------------------------------------------------------- 1 | # Base URL for the OpenAI instance (default is https://api.openai.com/v1) 2 | # OpenAI: https://api.openai.com/v1 3 | # Ollama (example): http://localhost:11434/v1 4 | # OpenRouter: https://openrouter.ai/api/v1 5 | BASE_URL= 6 | 7 | # Get your Open AI API Key by following these instructions - 8 | # https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key 9 | # Even if using OpenRouter/Ollama, you still need to set this for the embedding model. 10 | # Future versions of Archon will be more flexible with this. 11 | OPENAI_API_KEY= 12 | 13 | # For OpenAI: https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key 14 | # For OpenRouter: https://openrouter.ai/keys 15 | LLM_API_KEY= 16 | 17 | # For the Supabase version (sample_supabase_agent.py), set your Supabase URL and Service Key. 18 | # Get your SUPABASE_URL from the API section of your Supabase project settings - 19 | # https://supabase.com/dashboard/project//settings/api 20 | SUPABASE_URL= 21 | 22 | # Get your SUPABASE_SERVICE_KEY from the API section of your Supabase project settings - 23 | # https://supabase.com/dashboard/project//settings/api 24 | # On this page it is called the service_role secret. 25 | SUPABASE_SERVICE_KEY= 26 | 27 | # The LLM you want to use for the reasoner (o3-mini, R1, QwQ, etc.). 28 | # Example: o3-mini 29 | # Example: deepseek-r1:7b-8k 30 | REASONER_MODEL= 31 | 32 | # The LLM you want to use for the primary agent/coder. 33 | # Example: qwen2.5:14b-instruct-8k 34 | PRIMARY_MODEL= 35 | 36 | # Embedding model you want to use (nomic-embed-text:latest, text-embedding-3-small) 37 | # Example: nomic-embed-text:latest 38 | EMBEDDING_MODEL= 39 | -------------------------------------------------------------------------------- /iterations/v3-mcp-support/.env.example: -------------------------------------------------------------------------------- 1 | # Base URL for the OpenAI instance (default is https://api.openai.com/v1) 2 | # OpenAI: https://api.openai.com/v1 3 | # Ollama (example): http://localhost:11434/v1 4 | # OpenRouter: https://openrouter.ai/api/v1 5 | BASE_URL= 6 | 7 | # For OpenAI: https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key 8 | # For OpenRouter: https://openrouter.ai/keys 9 | # For Ollama, no need to set this unless you specifically configured an API key 10 | LLM_API_KEY= 11 | 12 | # Get your Open AI API Key by following these instructions - 13 | # https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key 14 | # Even if using OpenRouter, you still need to set this for the embedding model. 15 | # No need to set this if using Ollama. 16 | OPENAI_API_KEY= 17 | 18 | # For the Supabase version (sample_supabase_agent.py), set your Supabase URL and Service Key. 19 | # Get your SUPABASE_URL from the API section of your Supabase project settings - 20 | # https://supabase.com/dashboard/project//settings/api 21 | SUPABASE_URL= 22 | 23 | # Get your SUPABASE_SERVICE_KEY from the API section of your Supabase project settings - 24 | # https://supabase.com/dashboard/project//settings/api 25 | # On this page it is called the service_role secret. 26 | SUPABASE_SERVICE_KEY= 27 | 28 | # The LLM you want to use for the reasoner (o3-mini, R1, QwQ, etc.). 29 | # Example: o3-mini 30 | # Example: deepseek-r1:7b-8k 31 | REASONER_MODEL= 32 | 33 | # The LLM you want to use for the primary agent/coder. 34 | # Example: gpt-4o-mini 35 | # Example: qwen2.5:14b-instruct-8k 36 | PRIMARY_MODEL= 37 | 38 | # Embedding model you want to use 39 | # Example for Ollama: nomic-embed-text 40 | # Example for OpenAI: text-embedding-3-small 41 | EMBEDDING_MODEL= -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 5 | 6 | version: 2 7 | updates: 8 | 9 | # Check for updates to GitHub Actions 10 | - package-ecosystem: "github-actions" 11 | directory: "/" 12 | schedule: 13 | interval: "weekly" 14 | 15 | # Check for updates to Python packages in root (actual iteration) 16 | - package-ecosystem: "pip" 17 | directory: "/" 18 | schedule: 19 | interval: "weekly" 20 | 21 | # Check for updates to Python packages in mcp 22 | - package-ecosystem: "pip" 23 | directory: "/mcp" 24 | schedule: 25 | interval: "weekly" 26 | 27 | # Check for updates in Dockerfile 28 | - package-ecosystem: "docker" 29 | directory: "/" 30 | schedule: 31 | interval: "weekly" 32 | 33 | # Check for updates in MCP Dockerfile 34 | - package-ecosystem: "docker" 35 | directory: "/mcp" 36 | schedule: 37 | interval: "weekly" 38 | 39 | 40 | # Aditional: Structure to maintain previous iterations 41 | 42 | # Update Version 1: Single Agent 43 | # - package-ecosystem: "pip" 44 | # directory: "/iterations/v1-single-agent" 45 | # schedule: 46 | # interval: "monthly" 47 | 48 | # Update Version 2: Agentic Workflow 49 | # - package-ecosystem: "pip" 50 | # directory: "/iterations/v2-agentic-workflow" 51 | # schedule: 52 | # interval: "monthly" 53 | 54 | # Upate Version 3: MCP Support 55 | # - package-ecosystem: "pip" 56 | # directory: "/iterations/v3-mcp-support" 57 | # schedule: 58 | # interval: "monthly" 59 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Base URL for the OpenAI instance (default is https://api.openai.com/v1) 2 | # OpenAI: https://api.openai.com/v1 3 | # Ollama (example): http://localhost:11434/v1 4 | # OpenRouter: https://openrouter.ai/api/v1 5 | # Anthropic: https://api.anthropic.com/v1 6 | BASE_URL= 7 | 8 | # For OpenAI: https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key 9 | # For Anthropic: https://console.anthropic.com/account/keys 10 | # For OpenRouter: https://openrouter.ai/keys 11 | # For Ollama, no need to set this unless you specifically configured an API key 12 | LLM_API_KEY= 13 | 14 | # Get your Open AI API Key by following these instructions - 15 | # https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key 16 | # Even if using Anthropic or OpenRouter, you still need to set this for the embedding model. 17 | # No need to set this if using Ollama. 18 | OPENAI_API_KEY= 19 | 20 | # For the Supabase version (sample_supabase_agent.py), set your Supabase URL and Service Key. 21 | # Get your SUPABASE_URL from the API section of your Supabase project settings - 22 | # https://supabase.com/dashboard/project//settings/api 23 | SUPABASE_URL= 24 | 25 | # Get your SUPABASE_SERVICE_KEY from the API section of your Supabase project settings - 26 | # https://supabase.com/dashboard/project//settings/api 27 | # On this page it is called the service_role secret. 28 | SUPABASE_SERVICE_KEY= 29 | 30 | # The LLM you want to use for the reasoner (o3-mini, R1, QwQ, etc.). 31 | # Example: o3-mini 32 | # Example: deepseek-r1:7b-8k 33 | REASONER_MODEL= 34 | 35 | # The LLM you want to use for the primary agent/coder. 36 | # Example: gpt-4o-mini 37 | # Example: qwen2.5:14b-instruct-8k 38 | PRIMARY_MODEL= 39 | 40 | # Embedding model you want to use 41 | # Example for Ollama: nomic-embed-text 42 | # Example for OpenAI: text-embedding-3-small 43 | EMBEDDING_MODEL= -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/.env.example: -------------------------------------------------------------------------------- 1 | # Base URL for the OpenAI instance (default is https://api.openai.com/v1) 2 | # OpenAI: https://api.openai.com/v1 3 | # Ollama (example): http://localhost:11434/v1 4 | # OpenRouter: https://openrouter.ai/api/v1 5 | # Anthropic: https://api.anthropic.com/v1 6 | BASE_URL= 7 | 8 | # For OpenAI: https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key 9 | # For Anthropic: https://console.anthropic.com/account/keys 10 | # For OpenRouter: https://openrouter.ai/keys 11 | # For Ollama, no need to set this unless you specifically configured an API key 12 | LLM_API_KEY= 13 | 14 | # Get your Open AI API Key by following these instructions - 15 | # https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key 16 | # Even if using Anthropic or OpenRouter, you still need to set this for the embedding model. 17 | # No need to set this if using Ollama. 18 | OPENAI_API_KEY= 19 | 20 | # For the Supabase version (sample_supabase_agent.py), set your Supabase URL and Service Key. 21 | # Get your SUPABASE_URL from the API section of your Supabase project settings - 22 | # https://supabase.com/dashboard/project//settings/api 23 | SUPABASE_URL= 24 | 25 | # Get your SUPABASE_SERVICE_KEY from the API section of your Supabase project settings - 26 | # https://supabase.com/dashboard/project//settings/api 27 | # On this page it is called the service_role secret. 28 | SUPABASE_SERVICE_KEY= 29 | 30 | # The LLM you want to use for the reasoner (o3-mini, R1, QwQ, etc.). 31 | # Example: o3-mini 32 | # Example: deepseek-r1:7b-8k 33 | REASONER_MODEL= 34 | 35 | # The LLM you want to use for the primary agent/coder. 36 | # Example: gpt-4o-mini 37 | # Example: qwen2.5:14b-instruct-8k 38 | PRIMARY_MODEL= 39 | 40 | # Embedding model you want to use 41 | # Example for Ollama: nomic-embed-text 42 | # Example for OpenAI: text-embedding-3-small 43 | EMBEDDING_MODEL= -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/.env.example: -------------------------------------------------------------------------------- 1 | # Base URL for the OpenAI instance (default is https://api.openai.com/v1) 2 | # OpenAI: https://api.openai.com/v1 3 | # Ollama (example): http://localhost:11434/v1 4 | # OpenRouter: https://openrouter.ai/api/v1 5 | # Anthropic: https://api.anthropic.com/v1 6 | BASE_URL= 7 | 8 | # For OpenAI: https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key 9 | # For Anthropic: https://console.anthropic.com/account/keys 10 | # For OpenRouter: https://openrouter.ai/keys 11 | # For Ollama, no need to set this unless you specifically configured an API key 12 | LLM_API_KEY= 13 | 14 | # Get your Open AI API Key by following these instructions - 15 | # https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key 16 | # Even if using Anthropic or OpenRouter, you still need to set this for the embedding model. 17 | # No need to set this if using Ollama. 18 | OPENAI_API_KEY= 19 | 20 | # For the Supabase version (sample_supabase_agent.py), set your Supabase URL and Service Key. 21 | # Get your SUPABASE_URL from the API section of your Supabase project settings - 22 | # https://supabase.com/dashboard/project//settings/api 23 | SUPABASE_URL= 24 | 25 | # Get your SUPABASE_SERVICE_KEY from the API section of your Supabase project settings - 26 | # https://supabase.com/dashboard/project//settings/api 27 | # On this page it is called the service_role secret. 28 | SUPABASE_SERVICE_KEY= 29 | 30 | # The LLM you want to use for the reasoner (o3-mini, R1, QwQ, etc.). 31 | # Example: o3-mini 32 | # Example: deepseek-r1:7b-8k 33 | REASONER_MODEL= 34 | 35 | # The LLM you want to use for the primary agent/coder. 36 | # Example: gpt-4o-mini 37 | # Example: qwen2.5:14b-instruct-8k 38 | PRIMARY_MODEL= 39 | 40 | # Embedding model you want to use 41 | # Example for Ollama: nomic-embed-text 42 | # Example for OpenAI: text-embedding-3-small 43 | EMBEDDING_MODEL= -------------------------------------------------------------------------------- /utils/site_pages.sql: -------------------------------------------------------------------------------- 1 | -- Enable the pgvector extension 2 | create extension if not exists vector; 3 | 4 | -- Create the documentation chunks table 5 | create table site_pages ( 6 | id bigserial primary key, 7 | url varchar not null, 8 | chunk_number integer not null, 9 | title varchar not null, 10 | summary varchar not null, 11 | content text not null, -- Added content column 12 | metadata jsonb not null default '{}'::jsonb, -- Added metadata column 13 | embedding vector(1536), -- OpenAI embeddings are 1536 dimensions 14 | created_at timestamp with time zone default timezone('utc'::text, now()) not null, 15 | 16 | -- Add a unique constraint to prevent duplicate chunks for the same URL 17 | unique(url, chunk_number) 18 | ); 19 | 20 | -- Create an index for better vector similarity search performance 21 | create index on site_pages using ivfflat (embedding vector_cosine_ops); 22 | 23 | -- Create an index on metadata for faster filtering 24 | create index idx_site_pages_metadata on site_pages using gin (metadata); 25 | 26 | -- Create a function to search for documentation chunks 27 | create function match_site_pages ( 28 | query_embedding vector(1536), 29 | match_count int default 10, 30 | filter jsonb DEFAULT '{}'::jsonb 31 | ) returns table ( 32 | id bigint, 33 | url varchar, 34 | chunk_number integer, 35 | title varchar, 36 | summary varchar, 37 | content text, 38 | metadata jsonb, 39 | similarity float 40 | ) 41 | language plpgsql 42 | as $$ 43 | #variable_conflict use_column 44 | begin 45 | return query 46 | select 47 | id, 48 | url, 49 | chunk_number, 50 | title, 51 | summary, 52 | content, 53 | metadata, 54 | 1 - (site_pages.embedding <=> query_embedding) as similarity 55 | from site_pages 56 | where metadata @> filter 57 | order by site_pages.embedding <=> query_embedding 58 | limit match_count; 59 | end; 60 | $$; 61 | 62 | -- Everything above will work for any PostgreSQL database. The below commands are for Supabase security 63 | 64 | -- Enable RLS on the table 65 | alter table site_pages enable row level security; 66 | 67 | -- Create a policy that allows anyone to read 68 | create policy "Allow public read access" 69 | on site_pages 70 | for select 71 | to public 72 | using (true); -------------------------------------------------------------------------------- /iterations/v1-single-agent/site_pages.sql: -------------------------------------------------------------------------------- 1 | -- Enable the pgvector extension 2 | create extension if not exists vector; 3 | 4 | -- Create the documentation chunks table 5 | create table site_pages ( 6 | id bigserial primary key, 7 | url varchar not null, 8 | chunk_number integer not null, 9 | title varchar not null, 10 | summary varchar not null, 11 | content text not null, -- Added content column 12 | metadata jsonb not null default '{}'::jsonb, -- Added metadata column 13 | embedding vector(1536), -- OpenAI embeddings are 1536 dimensions 14 | created_at timestamp with time zone default timezone('utc'::text, now()) not null, 15 | 16 | -- Add a unique constraint to prevent duplicate chunks for the same URL 17 | unique(url, chunk_number) 18 | ); 19 | 20 | -- Create an index for better vector similarity search performance 21 | create index on site_pages using ivfflat (embedding vector_cosine_ops); 22 | 23 | -- Create an index on metadata for faster filtering 24 | create index idx_site_pages_metadata on site_pages using gin (metadata); 25 | 26 | -- Create a function to search for documentation chunks 27 | create function match_site_pages ( 28 | query_embedding vector(1536), 29 | match_count int default 10, 30 | filter jsonb DEFAULT '{}'::jsonb 31 | ) returns table ( 32 | id bigint, 33 | url varchar, 34 | chunk_number integer, 35 | title varchar, 36 | summary varchar, 37 | content text, 38 | metadata jsonb, 39 | similarity float 40 | ) 41 | language plpgsql 42 | as $$ 43 | #variable_conflict use_column 44 | begin 45 | return query 46 | select 47 | id, 48 | url, 49 | chunk_number, 50 | title, 51 | summary, 52 | content, 53 | metadata, 54 | 1 - (site_pages.embedding <=> query_embedding) as similarity 55 | from site_pages 56 | where metadata @> filter 57 | order by site_pages.embedding <=> query_embedding 58 | limit match_count; 59 | end; 60 | $$; 61 | 62 | -- Everything above will work for any PostgreSQL database. The below commands are for Supabase security 63 | 64 | -- Enable RLS on the table 65 | alter table site_pages enable row level security; 66 | 67 | -- Create a policy that allows anyone to read 68 | create policy "Allow public read access" 69 | on site_pages 70 | for select 71 | to public 72 | using (true); -------------------------------------------------------------------------------- /iterations/v2-agentic-workflow/site_pages.sql: -------------------------------------------------------------------------------- 1 | -- Enable the pgvector extension 2 | create extension if not exists vector; 3 | 4 | -- Create the documentation chunks table 5 | create table site_pages ( 6 | id bigserial primary key, 7 | url varchar not null, 8 | chunk_number integer not null, 9 | title varchar not null, 10 | summary varchar not null, 11 | content text not null, -- Added content column 12 | metadata jsonb not null default '{}'::jsonb, -- Added metadata column 13 | embedding vector(1536), -- OpenAI embeddings are 1536 dimensions 14 | created_at timestamp with time zone default timezone('utc'::text, now()) not null, 15 | 16 | -- Add a unique constraint to prevent duplicate chunks for the same URL 17 | unique(url, chunk_number) 18 | ); 19 | 20 | -- Create an index for better vector similarity search performance 21 | create index on site_pages using ivfflat (embedding vector_cosine_ops); 22 | 23 | -- Create an index on metadata for faster filtering 24 | create index idx_site_pages_metadata on site_pages using gin (metadata); 25 | 26 | -- Create a function to search for documentation chunks 27 | create function match_site_pages ( 28 | query_embedding vector(1536), 29 | match_count int default 10, 30 | filter jsonb DEFAULT '{}'::jsonb 31 | ) returns table ( 32 | id bigint, 33 | url varchar, 34 | chunk_number integer, 35 | title varchar, 36 | summary varchar, 37 | content text, 38 | metadata jsonb, 39 | similarity float 40 | ) 41 | language plpgsql 42 | as $$ 43 | #variable_conflict use_column 44 | begin 45 | return query 46 | select 47 | id, 48 | url, 49 | chunk_number, 50 | title, 51 | summary, 52 | content, 53 | metadata, 54 | 1 - (site_pages.embedding <=> query_embedding) as similarity 55 | from site_pages 56 | where metadata @> filter 57 | order by site_pages.embedding <=> query_embedding 58 | limit match_count; 59 | end; 60 | $$; 61 | 62 | -- Everything above will work for any PostgreSQL database. The below commands are for Supabase security 63 | 64 | -- Enable RLS on the table 65 | alter table site_pages enable row level security; 66 | 67 | -- Create a policy that allows anyone to read 68 | create policy "Allow public read access" 69 | on site_pages 70 | for select 71 | to public 72 | using (true); -------------------------------------------------------------------------------- /iterations/v3-mcp-support/utils/site_pages.sql: -------------------------------------------------------------------------------- 1 | -- Enable the pgvector extension 2 | create extension if not exists vector; 3 | 4 | -- Create the documentation chunks table 5 | create table site_pages ( 6 | id bigserial primary key, 7 | url varchar not null, 8 | chunk_number integer not null, 9 | title varchar not null, 10 | summary varchar not null, 11 | content text not null, -- Added content column 12 | metadata jsonb not null default '{}'::jsonb, -- Added metadata column 13 | embedding vector(1536), -- OpenAI embeddings are 1536 dimensions 14 | created_at timestamp with time zone default timezone('utc'::text, now()) not null, 15 | 16 | -- Add a unique constraint to prevent duplicate chunks for the same URL 17 | unique(url, chunk_number) 18 | ); 19 | 20 | -- Create an index for better vector similarity search performance 21 | create index on site_pages using ivfflat (embedding vector_cosine_ops); 22 | 23 | -- Create an index on metadata for faster filtering 24 | create index idx_site_pages_metadata on site_pages using gin (metadata); 25 | 26 | -- Create a function to search for documentation chunks 27 | create function match_site_pages ( 28 | query_embedding vector(1536), 29 | match_count int default 10, 30 | filter jsonb DEFAULT '{}'::jsonb 31 | ) returns table ( 32 | id bigint, 33 | url varchar, 34 | chunk_number integer, 35 | title varchar, 36 | summary varchar, 37 | content text, 38 | metadata jsonb, 39 | similarity float 40 | ) 41 | language plpgsql 42 | as $$ 43 | #variable_conflict use_column 44 | begin 45 | return query 46 | select 47 | id, 48 | url, 49 | chunk_number, 50 | title, 51 | summary, 52 | content, 53 | metadata, 54 | 1 - (site_pages.embedding <=> query_embedding) as similarity 55 | from site_pages 56 | where metadata @> filter 57 | order by site_pages.embedding <=> query_embedding 58 | limit match_count; 59 | end; 60 | $$; 61 | 62 | -- Everything above will work for any PostgreSQL database. The below commands are for Supabase security 63 | 64 | -- Enable RLS on the table 65 | alter table site_pages enable row level security; 66 | 67 | -- Create a policy that allows anyone to read 68 | create policy "Allow public read access" 69 | on site_pages 70 | for select 71 | to public 72 | using (true); -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/utils/site_pages.sql: -------------------------------------------------------------------------------- 1 | -- Enable the pgvector extension 2 | create extension if not exists vector; 3 | 4 | -- Create the documentation chunks table 5 | create table site_pages ( 6 | id bigserial primary key, 7 | url varchar not null, 8 | chunk_number integer not null, 9 | title varchar not null, 10 | summary varchar not null, 11 | content text not null, -- Added content column 12 | metadata jsonb not null default '{}'::jsonb, -- Added metadata column 13 | embedding vector(1536), -- OpenAI embeddings are 1536 dimensions 14 | created_at timestamp with time zone default timezone('utc'::text, now()) not null, 15 | 16 | -- Add a unique constraint to prevent duplicate chunks for the same URL 17 | unique(url, chunk_number) 18 | ); 19 | 20 | -- Create an index for better vector similarity search performance 21 | create index on site_pages using ivfflat (embedding vector_cosine_ops); 22 | 23 | -- Create an index on metadata for faster filtering 24 | create index idx_site_pages_metadata on site_pages using gin (metadata); 25 | 26 | -- Create a function to search for documentation chunks 27 | create function match_site_pages ( 28 | query_embedding vector(1536), 29 | match_count int default 10, 30 | filter jsonb DEFAULT '{}'::jsonb 31 | ) returns table ( 32 | id bigint, 33 | url varchar, 34 | chunk_number integer, 35 | title varchar, 36 | summary varchar, 37 | content text, 38 | metadata jsonb, 39 | similarity float 40 | ) 41 | language plpgsql 42 | as $$ 43 | #variable_conflict use_column 44 | begin 45 | return query 46 | select 47 | id, 48 | url, 49 | chunk_number, 50 | title, 51 | summary, 52 | content, 53 | metadata, 54 | 1 - (site_pages.embedding <=> query_embedding) as similarity 55 | from site_pages 56 | where metadata @> filter 57 | order by site_pages.embedding <=> query_embedding 58 | limit match_count; 59 | end; 60 | $$; 61 | 62 | -- Everything above will work for any PostgreSQL database. The below commands are for Supabase security 63 | 64 | -- Enable RLS on the table 65 | alter table site_pages enable row level security; 66 | 67 | -- Create a policy that allows anyone to read 68 | create policy "Allow public read access" 69 | on site_pages 70 | for select 71 | to public 72 | using (true); -------------------------------------------------------------------------------- /iterations/v2-agentic-workflow/ollama_site_pages.sql: -------------------------------------------------------------------------------- 1 | -- Enable the pgvector extension 2 | create extension if not exists vector; 3 | 4 | -- Create the documentation chunks table 5 | create table site_pages ( 6 | id bigserial primary key, 7 | url varchar not null, 8 | chunk_number integer not null, 9 | title varchar not null, 10 | summary varchar not null, 11 | content text not null, -- Added content column 12 | metadata jsonb not null default '{}'::jsonb, -- Added metadata column 13 | embedding vector(768), -- Ollama nomic-embed-text embeddings are 768 dimensions 14 | created_at timestamp with time zone default timezone('utc'::text, now()) not null, 15 | 16 | -- Add a unique constraint to prevent duplicate chunks for the same URL 17 | unique(url, chunk_number) 18 | ); 19 | 20 | -- Create an index for better vector similarity search performance 21 | create index on site_pages using ivfflat (embedding vector_cosine_ops); 22 | 23 | -- Create an index on metadata for faster filtering 24 | create index idx_site_pages_metadata on site_pages using gin (metadata); 25 | 26 | -- Create a function to search for documentation chunks 27 | create function match_site_pages ( 28 | query_embedding vector(768), 29 | match_count int default 10, 30 | filter jsonb DEFAULT '{}'::jsonb 31 | ) returns table ( 32 | id bigint, 33 | url varchar, 34 | chunk_number integer, 35 | title varchar, 36 | summary varchar, 37 | content text, 38 | metadata jsonb, 39 | similarity float 40 | ) 41 | language plpgsql 42 | as $$ 43 | #variable_conflict use_column 44 | begin 45 | return query 46 | select 47 | id, 48 | url, 49 | chunk_number, 50 | title, 51 | summary, 52 | content, 53 | metadata, 54 | 1 - (site_pages.embedding <=> query_embedding) as similarity 55 | from site_pages 56 | where metadata @> filter 57 | order by site_pages.embedding <=> query_embedding 58 | limit match_count; 59 | end; 60 | $$; 61 | 62 | -- Everything above will work for any PostgreSQL database. The below commands are for Supabase security 63 | 64 | -- Enable RLS on the table 65 | alter table site_pages enable row level security; 66 | 67 | -- Create a policy that allows anyone to read 68 | create policy "Allow public read access" 69 | on site_pages 70 | for select 71 | to public 72 | using (true); -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/utils/site_pages.sql: -------------------------------------------------------------------------------- 1 | -- Enable the pgvector extension 2 | create extension if not exists vector; 3 | 4 | -- Create the documentation chunks table 5 | create table site_pages ( 6 | id bigserial primary key, 7 | url varchar not null, 8 | chunk_number integer not null, 9 | title varchar not null, 10 | summary varchar not null, 11 | content text not null, -- Added content column 12 | metadata jsonb not null default '{}'::jsonb, -- Added metadata column 13 | embedding vector(1536), -- OpenAI embeddings are 1536 dimensions 14 | created_at timestamp with time zone default timezone('utc'::text, now()) not null, 15 | 16 | -- Add a unique constraint to prevent duplicate chunks for the same URL 17 | unique(url, chunk_number) 18 | ); 19 | 20 | -- Create an index for better vector similarity search performance 21 | create index on site_pages using ivfflat (embedding vector_cosine_ops); 22 | 23 | -- Create an index on metadata for faster filtering 24 | create index idx_site_pages_metadata on site_pages using gin (metadata); 25 | 26 | -- Create a function to search for documentation chunks 27 | create function match_site_pages ( 28 | query_embedding vector(1536), 29 | match_count int default 10, 30 | filter jsonb DEFAULT '{}'::jsonb 31 | ) returns table ( 32 | id bigint, 33 | url varchar, 34 | chunk_number integer, 35 | title varchar, 36 | summary varchar, 37 | content text, 38 | metadata jsonb, 39 | similarity float 40 | ) 41 | language plpgsql 42 | as $$ 43 | #variable_conflict use_column 44 | begin 45 | return query 46 | select 47 | id, 48 | url, 49 | chunk_number, 50 | title, 51 | summary, 52 | content, 53 | metadata, 54 | 1 - (site_pages.embedding <=> query_embedding) as similarity 55 | from site_pages 56 | where metadata @> filter 57 | order by site_pages.embedding <=> query_embedding 58 | limit match_count; 59 | end; 60 | $$; 61 | 62 | -- Everything above will work for any PostgreSQL database. The below commands are for Supabase security 63 | 64 | -- Enable RLS on the table 65 | alter table site_pages enable row level security; 66 | 67 | -- Create a policy that allows anyone to read 68 | create policy "Allow public read access" 69 | on site_pages 70 | for select 71 | to public 72 | using (true); -------------------------------------------------------------------------------- /iterations/v3-mcp-support/utils/ollama_site_pages.sql: -------------------------------------------------------------------------------- 1 | -- Enable the pgvector extension 2 | create extension if not exists vector; 3 | 4 | -- Create the documentation chunks table 5 | create table site_pages ( 6 | id bigserial primary key, 7 | url varchar not null, 8 | chunk_number integer not null, 9 | title varchar not null, 10 | summary varchar not null, 11 | content text not null, -- Added content column 12 | metadata jsonb not null default '{}'::jsonb, -- Added metadata column 13 | embedding vector(768), -- Ollama nomic-embed-text embeddings are 768 dimensions 14 | created_at timestamp with time zone default timezone('utc'::text, now()) not null, 15 | 16 | -- Add a unique constraint to prevent duplicate chunks for the same URL 17 | unique(url, chunk_number) 18 | ); 19 | 20 | -- Create an index for better vector similarity search performance 21 | create index on site_pages using ivfflat (embedding vector_cosine_ops); 22 | 23 | -- Create an index on metadata for faster filtering 24 | create index idx_site_pages_metadata on site_pages using gin (metadata); 25 | 26 | -- Create a function to search for documentation chunks 27 | create function match_site_pages ( 28 | query_embedding vector(768), 29 | match_count int default 10, 30 | filter jsonb DEFAULT '{}'::jsonb 31 | ) returns table ( 32 | id bigint, 33 | url varchar, 34 | chunk_number integer, 35 | title varchar, 36 | summary varchar, 37 | content text, 38 | metadata jsonb, 39 | similarity float 40 | ) 41 | language plpgsql 42 | as $$ 43 | #variable_conflict use_column 44 | begin 45 | return query 46 | select 47 | id, 48 | url, 49 | chunk_number, 50 | title, 51 | summary, 52 | content, 53 | metadata, 54 | 1 - (site_pages.embedding <=> query_embedding) as similarity 55 | from site_pages 56 | where metadata @> filter 57 | order by site_pages.embedding <=> query_embedding 58 | limit match_count; 59 | end; 60 | $$; 61 | 62 | -- Everything above will work for any PostgreSQL database. The below commands are for Supabase security 63 | 64 | -- Enable RLS on the table 65 | alter table site_pages enable row level security; 66 | 67 | -- Create a policy that allows anyone to read 68 | create policy "Allow public read access" 69 | on site_pages 70 | for select 71 | to public 72 | using (true); -------------------------------------------------------------------------------- /iterations/v3-mcp-support/setup_mcp.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import subprocess 4 | import sys 5 | 6 | def setup_venv(): 7 | # Get the absolute path to the current directory 8 | base_path = os.path.abspath(os.path.dirname(__file__)) 9 | venv_path = os.path.join(base_path, 'venv') 10 | venv_created = False 11 | 12 | # Create virtual environment if it doesn't exist 13 | if not os.path.exists(venv_path): 14 | print("Creating virtual environment...") 15 | subprocess.run([sys.executable, '-m', 'venv', venv_path], check=True) 16 | print("Virtual environment created successfully!") 17 | venv_created = True 18 | else: 19 | print("Virtual environment already exists.") 20 | 21 | # Install requirements if we just created the venv 22 | if venv_created: 23 | print("\nInstalling requirements...") 24 | # Use the venv's pip to install requirements 25 | pip_path = os.path.join(venv_path, 'Scripts', 'pip.exe') 26 | requirements_path = os.path.join(base_path, 'requirements.txt') 27 | subprocess.run([pip_path, 'install', '-r', requirements_path], check=True) 28 | print("Requirements installed successfully!") 29 | 30 | def generate_mcp_config(): 31 | # Get the absolute path to the current directory 32 | base_path = os.path.abspath(os.path.dirname(__file__)) 33 | 34 | # Construct the paths 35 | python_path = os.path.join(base_path, 'venv', 'Scripts', 'python.exe') 36 | server_script_path = os.path.join(base_path, 'mcp_server.py') 37 | 38 | # Create the config dictionary 39 | config = { 40 | "mcpServers": { 41 | "archon": { 42 | "command": python_path, 43 | "args": [server_script_path] 44 | } 45 | } 46 | } 47 | 48 | # Write the config to a file 49 | config_path = os.path.join(base_path, 'mcp-config.json') 50 | with open(config_path, 'w') as f: 51 | json.dump(config, f, indent=2) 52 | 53 | print(f"\nMCP configuration has been written to: {config_path}") 54 | print(f"\nMCP configuration for Cursor:\n\n{python_path} {server_script_path}") 55 | print("\nMCP configuration for Windsurf/Claude Desktop:") 56 | print(json.dumps(config, indent=2)) 57 | 58 | if __name__ == '__main__': 59 | setup_venv() 60 | generate_mcp_config() 61 | -------------------------------------------------------------------------------- /iterations/v3-mcp-support/graph_service.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException 2 | from pydantic import BaseModel 3 | from typing import Optional, Dict, Any 4 | from archon.archon_graph import agentic_flow 5 | from langgraph.types import Command 6 | from utils.utils import write_to_log 7 | 8 | app = FastAPI() 9 | 10 | class InvokeRequest(BaseModel): 11 | message: str 12 | thread_id: str 13 | is_first_message: bool = False 14 | config: Optional[Dict[str, Any]] = None 15 | 16 | @app.get("/health") 17 | async def health_check(): 18 | """Health check endpoint""" 19 | return {"status": "ok"} 20 | 21 | @app.post("/invoke") 22 | async def invoke_agent(request: InvokeRequest): 23 | """Process a message through the agentic flow and return the complete response. 24 | 25 | The agent streams the response but this API endpoint waits for the full output 26 | before returning so it's a synchronous operation for MCP. 27 | Another endpoint will be made later to fully stream the response from the API. 28 | 29 | Args: 30 | request: The InvokeRequest containing message and thread info 31 | 32 | Returns: 33 | dict: Contains the complete response from the agent 34 | """ 35 | try: 36 | config = request.config or { 37 | "configurable": { 38 | "thread_id": request.thread_id 39 | } 40 | } 41 | 42 | response = "" 43 | if request.is_first_message: 44 | write_to_log(f"Processing first message for thread {request.thread_id}") 45 | async for msg in agentic_flow.astream( 46 | {"latest_user_message": request.message}, 47 | config, 48 | stream_mode="custom" 49 | ): 50 | response += str(msg) 51 | else: 52 | write_to_log(f"Processing continuation for thread {request.thread_id}") 53 | async for msg in agentic_flow.astream( 54 | Command(resume=request.message), 55 | config, 56 | stream_mode="custom" 57 | ): 58 | response += str(msg) 59 | 60 | write_to_log(f"Final response for thread {request.thread_id}: {response}") 61 | return {"response": response} 62 | 63 | except Exception as e: 64 | write_to_log(f"Error processing message for thread {request.thread_id}: {str(e)}") 65 | raise HTTPException(status_code=500, detail=str(e)) 66 | 67 | if __name__ == "__main__": 68 | import uvicorn 69 | uvicorn.run(app, host="127.0.0.1", port=8100) 70 | -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/graph_service.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException 2 | from pydantic import BaseModel 3 | from typing import Optional, Dict, Any 4 | from archon.archon_graph import agentic_flow 5 | from langgraph.types import Command 6 | from utils.utils import write_to_log 7 | 8 | app = FastAPI() 9 | 10 | class InvokeRequest(BaseModel): 11 | message: str 12 | thread_id: str 13 | is_first_message: bool = False 14 | config: Optional[Dict[str, Any]] = None 15 | 16 | @app.get("/health") 17 | async def health_check(): 18 | """Health check endpoint""" 19 | return {"status": "ok"} 20 | 21 | @app.post("/invoke") 22 | async def invoke_agent(request: InvokeRequest): 23 | """Process a message through the agentic flow and return the complete response. 24 | 25 | The agent streams the response but this API endpoint waits for the full output 26 | before returning so it's a synchronous operation for MCP. 27 | Another endpoint will be made later to fully stream the response from the API. 28 | 29 | Args: 30 | request: The InvokeRequest containing message and thread info 31 | 32 | Returns: 33 | dict: Contains the complete response from the agent 34 | """ 35 | try: 36 | config = request.config or { 37 | "configurable": { 38 | "thread_id": request.thread_id 39 | } 40 | } 41 | 42 | response = "" 43 | if request.is_first_message: 44 | write_to_log(f"Processing first message for thread {request.thread_id}") 45 | async for msg in agentic_flow.astream( 46 | {"latest_user_message": request.message}, 47 | config, 48 | stream_mode="custom" 49 | ): 50 | response += str(msg) 51 | else: 52 | write_to_log(f"Processing continuation for thread {request.thread_id}") 53 | async for msg in agentic_flow.astream( 54 | Command(resume=request.message), 55 | config, 56 | stream_mode="custom" 57 | ): 58 | response += str(msg) 59 | 60 | write_to_log(f"Final response for thread {request.thread_id}: {response}") 61 | return {"response": response} 62 | 63 | except Exception as e: 64 | write_to_log(f"Error processing message for thread {request.thread_id}: {str(e)}") 65 | raise HTTPException(status_code=500, detail=str(e)) 66 | 67 | if __name__ == "__main__": 68 | import uvicorn 69 | uvicorn.run(app, host="0.0.0.0", port=8100) 70 | -------------------------------------------------------------------------------- /graph_service.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException 2 | from pydantic import BaseModel 3 | from typing import Optional, Dict, Any 4 | from archon.archon_graph import agentic_flow 5 | from langgraph.types import Command 6 | from utils.utils import write_to_log 7 | 8 | app = FastAPI() 9 | 10 | class InvokeRequest(BaseModel): 11 | message: str 12 | thread_id: str 13 | is_first_message: bool = False 14 | config: Optional[Dict[str, Any]] = None 15 | 16 | @app.get("/health") 17 | async def health_check(): 18 | """Health check endpoint""" 19 | return {"status": "ok"} 20 | 21 | @app.post("/invoke") 22 | async def invoke_agent(request: InvokeRequest): 23 | """Process a message through the agentic flow and return the complete response. 24 | 25 | The agent streams the response but this API endpoint waits for the full output 26 | before returning so it's a synchronous operation for MCP. 27 | Another endpoint will be made later to fully stream the response from the API. 28 | 29 | Args: 30 | request: The InvokeRequest containing message and thread info 31 | 32 | Returns: 33 | dict: Contains the complete response from the agent 34 | """ 35 | try: 36 | config = request.config or { 37 | "configurable": { 38 | "thread_id": request.thread_id 39 | } 40 | } 41 | 42 | response = "" 43 | if request.is_first_message: 44 | write_to_log(f"Processing first message for thread {request.thread_id}") 45 | async for msg in agentic_flow.astream( 46 | {"latest_user_message": request.message}, 47 | config, 48 | stream_mode="custom" 49 | ): 50 | response += str(msg) 51 | else: 52 | write_to_log(f"Processing continuation for thread {request.thread_id}") 53 | async for msg in agentic_flow.astream( 54 | Command(resume=request.message), 55 | config, 56 | stream_mode="custom" 57 | ): 58 | response += str(msg) 59 | 60 | write_to_log(f"Final response for thread {request.thread_id}: {response}") 61 | return {"response": response} 62 | 63 | except Exception as e: 64 | print(f"Exception invoking Archon for thread {request.thread_id}: {str(e)}") 65 | write_to_log(f"Error processing message for thread {request.thread_id}: {str(e)}") 66 | raise HTTPException(status_code=500, detail=str(e)) 67 | 68 | if __name__ == "__main__": 69 | import uvicorn 70 | uvicorn.run(app, host="0.0.0.0", port=8100) 71 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/graph_service.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException 2 | from pydantic import BaseModel 3 | from typing import Optional, Dict, Any 4 | from archon.archon_graph import agentic_flow 5 | from langgraph.types import Command 6 | from utils.utils import write_to_log 7 | 8 | app = FastAPI() 9 | 10 | class InvokeRequest(BaseModel): 11 | message: str 12 | thread_id: str 13 | is_first_message: bool = False 14 | config: Optional[Dict[str, Any]] = None 15 | 16 | @app.get("/health") 17 | async def health_check(): 18 | """Health check endpoint""" 19 | return {"status": "ok"} 20 | 21 | @app.post("/invoke") 22 | async def invoke_agent(request: InvokeRequest): 23 | """Process a message through the agentic flow and return the complete response. 24 | 25 | The agent streams the response but this API endpoint waits for the full output 26 | before returning so it's a synchronous operation for MCP. 27 | Another endpoint will be made later to fully stream the response from the API. 28 | 29 | Args: 30 | request: The InvokeRequest containing message and thread info 31 | 32 | Returns: 33 | dict: Contains the complete response from the agent 34 | """ 35 | try: 36 | config = request.config or { 37 | "configurable": { 38 | "thread_id": request.thread_id 39 | } 40 | } 41 | 42 | response = "" 43 | if request.is_first_message: 44 | write_to_log(f"Processing first message for thread {request.thread_id}") 45 | async for msg in agentic_flow.astream( 46 | {"latest_user_message": request.message}, 47 | config, 48 | stream_mode="custom" 49 | ): 50 | response += str(msg) 51 | else: 52 | write_to_log(f"Processing continuation for thread {request.thread_id}") 53 | async for msg in agentic_flow.astream( 54 | Command(resume=request.message), 55 | config, 56 | stream_mode="custom" 57 | ): 58 | response += str(msg) 59 | 60 | write_to_log(f"Final response for thread {request.thread_id}: {response}") 61 | return {"response": response} 62 | 63 | except Exception as e: 64 | print(f"Exception invoking Archon for thread {request.thread_id}: {str(e)}") 65 | write_to_log(f"Error processing message for thread {request.thread_id}: {str(e)}") 66 | raise HTTPException(status_code=500, detail=str(e)) 67 | 68 | if __name__ == "__main__": 69 | import uvicorn 70 | uvicorn.run(app, host="0.0.0.0", port=8100) 71 | -------------------------------------------------------------------------------- /iterations/v3-mcp-support/mcp_server.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import asyncio 4 | import threading 5 | from mcp.server.fastmcp import FastMCP 6 | import requests 7 | from typing import Dict, List 8 | import uuid 9 | from utils.utils import write_to_log 10 | from graph_service import app 11 | import uvicorn 12 | 13 | 14 | # Initialize FastMCP server 15 | mcp = FastMCP("archon") 16 | 17 | 18 | # Store active threads 19 | active_threads: Dict[str, List[str]] = {} 20 | 21 | 22 | # FastAPI service URL 23 | GRAPH_SERVICE_URL = "http://127.0.0.1:8100" 24 | 25 | 26 | @mcp.tool() 27 | async def create_thread() -> str: 28 | """Create a new conversation thread for Archon. 29 | Always call this tool before invoking Archon for the first time in a conversation. 30 | (if you don't already have a thread ID) 31 | 32 | Returns: 33 | str: A unique thread ID for the conversation 34 | """ 35 | thread_id = str(uuid.uuid4()) 36 | active_threads[thread_id] = [] 37 | write_to_log(f"Created new thread: {thread_id}") 38 | return thread_id 39 | 40 | 41 | def _make_request(thread_id: str, user_input: str, config: dict) -> str: 42 | """Make synchronous request to graph service""" 43 | response = requests.post( 44 | f"{GRAPH_SERVICE_URL}/invoke", 45 | json={ 46 | "message": user_input, 47 | "thread_id": thread_id, 48 | "is_first_message": not active_threads[thread_id], 49 | "config": config 50 | } 51 | ) 52 | response.raise_for_status() 53 | return response.json() 54 | 55 | 56 | @mcp.tool() 57 | async def run_agent(thread_id: str, user_input: str) -> str: 58 | """Run the Archon agent with user input. 59 | Only use this tool after you have called create_thread in this conversation to get a unique thread ID. 60 | If you already created a thread ID in this conversation, do not create another one. Reuse the same ID. 61 | After you receive the code from Archon, always implement it into the codebase unless asked not to. 62 | 63 | Args: 64 | thread_id: The conversation thread ID 65 | user_input: The user's message to process 66 | 67 | Returns: 68 | str: The agent's response which generally includes the code for the agent 69 | """ 70 | if thread_id not in active_threads: 71 | write_to_log(f"Error: Thread not found - {thread_id}") 72 | raise ValueError("Thread not found") 73 | 74 | write_to_log(f"Processing message for thread {thread_id}: {user_input}") 75 | 76 | config = { 77 | "configurable": { 78 | "thread_id": thread_id 79 | } 80 | } 81 | 82 | try: 83 | result = await asyncio.to_thread(_make_request, thread_id, user_input, config) 84 | active_threads[thread_id].append(user_input) 85 | return result['response'] 86 | 87 | except Exception as e: 88 | raise 89 | 90 | 91 | if __name__ == "__main__": 92 | write_to_log("Starting MCP server") 93 | 94 | # Run MCP server 95 | mcp.run(transport='stdio') 96 | -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/mcp_server.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import asyncio 4 | import threading 5 | from mcp.server.fastmcp import FastMCP 6 | import requests 7 | from typing import Dict, List 8 | import uuid 9 | from utils.utils import write_to_log 10 | from graph_service import app 11 | import uvicorn 12 | 13 | 14 | # Initialize FastMCP server 15 | mcp = FastMCP("archon") 16 | 17 | 18 | # Store active threads 19 | active_threads: Dict[str, List[str]] = {} 20 | 21 | 22 | # FastAPI service URL 23 | GRAPH_SERVICE_URL = "http://127.0.0.1:8100" 24 | 25 | 26 | @mcp.tool() 27 | async def create_thread() -> str: 28 | """Create a new conversation thread for Archon. 29 | Always call this tool before invoking Archon for the first time in a conversation. 30 | (if you don't already have a thread ID) 31 | 32 | Returns: 33 | str: A unique thread ID for the conversation 34 | """ 35 | thread_id = str(uuid.uuid4()) 36 | active_threads[thread_id] = [] 37 | write_to_log(f"Created new thread: {thread_id}") 38 | return thread_id 39 | 40 | 41 | def _make_request(thread_id: str, user_input: str, config: dict) -> str: 42 | """Make synchronous request to graph service""" 43 | response = requests.post( 44 | f"{GRAPH_SERVICE_URL}/invoke", 45 | json={ 46 | "message": user_input, 47 | "thread_id": thread_id, 48 | "is_first_message": not active_threads[thread_id], 49 | "config": config 50 | } 51 | ) 52 | response.raise_for_status() 53 | return response.json() 54 | 55 | 56 | @mcp.tool() 57 | async def run_agent(thread_id: str, user_input: str) -> str: 58 | """Run the Archon agent with user input. 59 | Only use this tool after you have called create_thread in this conversation to get a unique thread ID. 60 | If you already created a thread ID in this conversation, do not create another one. Reuse the same ID. 61 | After you receive the code from Archon, always implement it into the codebase unless asked not to. 62 | 63 | Args: 64 | thread_id: The conversation thread ID 65 | user_input: The user's message to process 66 | 67 | Returns: 68 | str: The agent's response which generally includes the code for the agent 69 | """ 70 | if thread_id not in active_threads: 71 | write_to_log(f"Error: Thread not found - {thread_id}") 72 | raise ValueError("Thread not found") 73 | 74 | write_to_log(f"Processing message for thread {thread_id}: {user_input}") 75 | 76 | config = { 77 | "configurable": { 78 | "thread_id": thread_id 79 | } 80 | } 81 | 82 | try: 83 | result = await asyncio.to_thread(_make_request, thread_id, user_input, config) 84 | active_threads[thread_id].append(user_input) 85 | return result['response'] 86 | 87 | except Exception as e: 88 | raise 89 | 90 | 91 | if __name__ == "__main__": 92 | write_to_log("Starting MCP server") 93 | 94 | # Run MCP server 95 | mcp.run(transport='stdio') 96 | -------------------------------------------------------------------------------- /streamlit_pages/styles.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the CSS styles for the Streamlit UI. 3 | """ 4 | 5 | import streamlit as st 6 | 7 | def load_css(): 8 | """ 9 | Load the custom CSS styles for the Archon UI. 10 | """ 11 | st.markdown(""" 12 | 94 | """, unsafe_allow_html=True) 95 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/streamlit_pages/styles.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the CSS styles for the Streamlit UI. 3 | """ 4 | 5 | import streamlit as st 6 | 7 | def load_css(): 8 | """ 9 | Load the custom CSS styles for the Archon UI. 10 | """ 11 | st.markdown(""" 12 | 94 | """, unsafe_allow_html=True) 95 | -------------------------------------------------------------------------------- /streamlit_pages/chat.py: -------------------------------------------------------------------------------- 1 | from langgraph.types import Command 2 | import streamlit as st 3 | import uuid 4 | import sys 5 | import os 6 | 7 | # Add the current directory to Python path 8 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 9 | from archon.archon_graph import agentic_flow 10 | 11 | @st.cache_resource 12 | def get_thread_id(): 13 | return str(uuid.uuid4()) 14 | 15 | thread_id = get_thread_id() 16 | 17 | async def run_agent_with_streaming(user_input: str): 18 | """ 19 | Run the agent with streaming text for the user_input prompt, 20 | while maintaining the entire conversation in `st.session_state.messages`. 21 | """ 22 | config = { 23 | "configurable": { 24 | "thread_id": thread_id 25 | } 26 | } 27 | 28 | # First message from user 29 | if len(st.session_state.messages) == 1: 30 | async for msg in agentic_flow.astream( 31 | {"latest_user_message": user_input}, config, stream_mode="custom" 32 | ): 33 | yield msg 34 | # Continue the conversation 35 | else: 36 | async for msg in agentic_flow.astream( 37 | Command(resume=user_input), config, stream_mode="custom" 38 | ): 39 | yield msg 40 | 41 | async def chat_tab(): 42 | """Display the chat interface for talking to Archon""" 43 | st.write("Describe to me an AI agent you want to build and I'll code it for you with Pydantic AI.") 44 | st.write("Example: Build me an AI agent that can search the web with the Brave API.") 45 | 46 | # Initialize chat history in session state if not present 47 | if "messages" not in st.session_state: 48 | st.session_state.messages = [] 49 | 50 | # Add a clear conversation button 51 | if st.button("Clear Conversation"): 52 | st.session_state.messages = [] 53 | st.rerun() 54 | 55 | # Display chat messages from history on app rerun 56 | for message in st.session_state.messages: 57 | message_type = message["type"] 58 | if message_type in ["human", "ai", "system"]: 59 | with st.chat_message(message_type): 60 | st.markdown(message["content"]) 61 | 62 | # Chat input for the user 63 | user_input = st.chat_input("What do you want to build today?") 64 | 65 | if user_input: 66 | # We append a new request to the conversation explicitly 67 | st.session_state.messages.append({"type": "human", "content": user_input}) 68 | 69 | # Display user prompt in the UI 70 | with st.chat_message("user"): 71 | st.markdown(user_input) 72 | 73 | # Display assistant response in chat message container 74 | response_content = "" 75 | with st.chat_message("assistant"): 76 | message_placeholder = st.empty() # Placeholder for updating the message 77 | 78 | # Add a spinner while loading 79 | with st.spinner("Archon is thinking..."): 80 | # Run the async generator to fetch responses 81 | async for chunk in run_agent_with_streaming(user_input): 82 | response_content += chunk 83 | # Update the placeholder with the current response content 84 | message_placeholder.markdown(response_content) 85 | 86 | st.session_state.messages.append({"type": "ai", "content": response_content}) -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build Archon 2 | 3 | on: 4 | push: 5 | branches: 6 | [ main, master ] 7 | pull_request: 8 | branches: 9 | [ main, master ] 10 | 11 | permissions: 12 | contents: read 13 | 14 | jobs: 15 | build-locally: 16 | runs-on: ubuntu-latest 17 | strategy: 18 | matrix: 19 | python-version: ['3.10', '3.11', '3.12', '3.13'] 20 | include: 21 | - python-version: '3.10' 22 | experimental: true 23 | - python-version: '3.12' 24 | experimental: true 25 | - python-version: '3.13' 26 | experimental: true 27 | fail-fast: false 28 | 29 | # Test on newer Python versions 30 | continue-on-error: ${{ matrix.experimental || false }} 31 | steps: 32 | - name: Checkout code 33 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 34 | 35 | - name: Set up Python ${{ matrix.python-version }} 36 | uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 37 | with: 38 | python-version: ${{ matrix.python-version }} 39 | 40 | - name: Install dependencies 41 | run: | 42 | python -m venv venv 43 | source venv/bin/activate 44 | pip install -r requirements.txt 45 | 46 | - name: Verify code compilation 47 | run: | 48 | source venv/bin/activate 49 | python -m compileall -f . 50 | 51 | build-docker: 52 | runs-on: ubuntu-latest 53 | strategy: 54 | matrix: 55 | python-version: ['3.10', '3.11', '3.12', '3.13'] 56 | include: 57 | - python-version: '3.10' 58 | experimental: true 59 | - python-version: '3.13' 60 | experimental: true 61 | fail-fast: false 62 | 63 | # Test on newer Python versions 64 | continue-on-error: ${{ matrix.experimental || false }} 65 | steps: 66 | - name: Checkout code 67 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 68 | 69 | - name: Set up Python ${{ matrix.python-version }} 70 | uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 71 | with: 72 | python-version: ${{ matrix.python-version }} 73 | 74 | - name: Modify run_docker.py for CI environment 75 | run: | 76 | cp run_docker.py run_docker_ci.py 77 | # Modify the script to just build and verify containers without running them indefinitely 78 | sed -i 's/"-d",/"-d", "--rm",/g' run_docker_ci.py 79 | 80 | - name: Run Docker setup script 81 | run: | 82 | chmod +x run_docker_ci.py 83 | python run_docker_ci.py 84 | 85 | - name: Verify containers are built 86 | run: | 87 | docker images | grep archon 88 | docker images | grep archon-mcp 89 | 90 | - name: Test container running status 91 | run: | 92 | docker ps -a | grep archon-container 93 | 94 | - name: Stop containers 95 | run: | 96 | docker stop archon-container || true 97 | docker rm archon-container || true 98 | docker stop archon-mcp || true 99 | docker rm archon-mcp || true 100 | docker ps -a | grep archon || echo "All containers successfully removed" 101 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/streamlit_pages/chat.py: -------------------------------------------------------------------------------- 1 | from langgraph.types import Command 2 | import streamlit as st 3 | import uuid 4 | import sys 5 | import os 6 | 7 | # Add the current directory to Python path 8 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 9 | from archon.archon_graph import agentic_flow 10 | 11 | @st.cache_resource 12 | def get_thread_id(): 13 | return str(uuid.uuid4()) 14 | 15 | thread_id = get_thread_id() 16 | 17 | async def run_agent_with_streaming(user_input: str): 18 | """ 19 | Run the agent with streaming text for the user_input prompt, 20 | while maintaining the entire conversation in `st.session_state.messages`. 21 | """ 22 | config = { 23 | "configurable": { 24 | "thread_id": thread_id 25 | } 26 | } 27 | 28 | # First message from user 29 | if len(st.session_state.messages) == 1: 30 | async for msg in agentic_flow.astream( 31 | {"latest_user_message": user_input}, config, stream_mode="custom" 32 | ): 33 | yield msg 34 | # Continue the conversation 35 | else: 36 | async for msg in agentic_flow.astream( 37 | Command(resume=user_input), config, stream_mode="custom" 38 | ): 39 | yield msg 40 | 41 | async def chat_tab(): 42 | """Display the chat interface for talking to Archon""" 43 | st.write("Describe to me an AI agent you want to build and I'll code it for you with Pydantic AI.") 44 | st.write("Example: Build me an AI agent that can search the web with the Brave API.") 45 | 46 | # Initialize chat history in session state if not present 47 | if "messages" not in st.session_state: 48 | st.session_state.messages = [] 49 | 50 | # Add a clear conversation button 51 | if st.button("Clear Conversation"): 52 | st.session_state.messages = [] 53 | st.rerun() 54 | 55 | # Display chat messages from history on app rerun 56 | for message in st.session_state.messages: 57 | message_type = message["type"] 58 | if message_type in ["human", "ai", "system"]: 59 | with st.chat_message(message_type): 60 | st.markdown(message["content"]) 61 | 62 | # Chat input for the user 63 | user_input = st.chat_input("What do you want to build today?") 64 | 65 | if user_input: 66 | # We append a new request to the conversation explicitly 67 | st.session_state.messages.append({"type": "human", "content": user_input}) 68 | 69 | # Display user prompt in the UI 70 | with st.chat_message("user"): 71 | st.markdown(user_input) 72 | 73 | # Display assistant response in chat message container 74 | response_content = "" 75 | with st.chat_message("assistant"): 76 | message_placeholder = st.empty() # Placeholder for updating the message 77 | 78 | # Add a spinner while loading 79 | with st.spinner("Archon is thinking..."): 80 | # Run the async generator to fetch responses 81 | async for chunk in run_agent_with_streaming(user_input): 82 | response_content += chunk 83 | # Update the placeholder with the current response content 84 | message_placeholder.markdown(response_content) 85 | 86 | st.session_state.messages.append({"type": "ai", "content": response_content}) -------------------------------------------------------------------------------- /archon/pydantic_ai_coder.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations as _annotations 2 | 3 | from dataclasses import dataclass 4 | from dotenv import load_dotenv 5 | import logfire 6 | import asyncio 7 | import httpx 8 | import os 9 | import sys 10 | import json 11 | from typing import List 12 | from pydantic import BaseModel 13 | from pydantic_ai import Agent, ModelRetry, RunContext 14 | from pydantic_ai.models.anthropic import AnthropicModel 15 | from pydantic_ai.models.openai import OpenAIModel 16 | from openai import AsyncOpenAI 17 | from supabase import Client 18 | 19 | # Add the parent directory to sys.path to allow importing from the parent directory 20 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 21 | from utils.utils import get_env_var 22 | from archon.agent_prompts import primary_coder_prompt 23 | from archon.agent_tools import ( 24 | retrieve_relevant_documentation_tool, 25 | list_documentation_pages_tool, 26 | get_page_content_tool 27 | ) 28 | 29 | load_dotenv() 30 | 31 | provider = get_env_var('LLM_PROVIDER') or 'OpenAI' 32 | llm = get_env_var('PRIMARY_MODEL') or 'gpt-4o-mini' 33 | base_url = get_env_var('BASE_URL') or 'https://api.openai.com/v1' 34 | api_key = get_env_var('LLM_API_KEY') or 'no-llm-api-key-provided' 35 | 36 | model = AnthropicModel(llm, api_key=api_key) if provider == "Anthropic" else OpenAIModel(llm, base_url=base_url, api_key=api_key) 37 | 38 | logfire.configure(send_to_logfire='if-token-present') 39 | 40 | @dataclass 41 | class PydanticAIDeps: 42 | supabase: Client 43 | embedding_client: AsyncOpenAI 44 | reasoner_output: str 45 | 46 | pydantic_ai_coder = Agent( 47 | model, 48 | system_prompt=primary_coder_prompt, 49 | deps_type=PydanticAIDeps, 50 | retries=2 51 | ) 52 | 53 | @pydantic_ai_coder.system_prompt 54 | def add_reasoner_output(ctx: RunContext[str]) -> str: 55 | return f""" 56 | \n\nAdditional thoughts/instructions from the reasoner LLM. 57 | This scope includes documentation pages for you to search as well: 58 | {ctx.deps.reasoner_output} 59 | """ 60 | 61 | @pydantic_ai_coder.tool 62 | async def retrieve_relevant_documentation(ctx: RunContext[PydanticAIDeps], user_query: str) -> str: 63 | """ 64 | Retrieve relevant documentation chunks based on the query with RAG. 65 | 66 | Args: 67 | ctx: The context including the Supabase client and OpenAI client 68 | user_query: The user's question or query 69 | 70 | Returns: 71 | A formatted string containing the top 4 most relevant documentation chunks 72 | """ 73 | return await retrieve_relevant_documentation_tool(ctx.deps.supabase, ctx.deps.embedding_client, user_query) 74 | 75 | @pydantic_ai_coder.tool 76 | async def list_documentation_pages(ctx: RunContext[PydanticAIDeps]) -> List[str]: 77 | """ 78 | Retrieve a list of all available Pydantic AI documentation pages. 79 | 80 | Returns: 81 | List[str]: List of unique URLs for all documentation pages 82 | """ 83 | return await list_documentation_pages_tool(ctx.deps.supabase) 84 | 85 | @pydantic_ai_coder.tool 86 | async def get_page_content(ctx: RunContext[PydanticAIDeps], url: str) -> str: 87 | """ 88 | Retrieve the full content of a specific documentation page by combining all its chunks. 89 | 90 | Args: 91 | ctx: The context including the Supabase client 92 | url: The URL of the page to retrieve 93 | 94 | Returns: 95 | str: The complete page content with all chunks combined in order 96 | """ 97 | return await get_page_content_tool(ctx.deps.supabase, url) -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/archon/pydantic_ai_coder.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations as _annotations 2 | 3 | from dataclasses import dataclass 4 | from dotenv import load_dotenv 5 | import logfire 6 | import asyncio 7 | import httpx 8 | import os 9 | import sys 10 | import json 11 | from typing import List 12 | from pydantic import BaseModel 13 | from pydantic_ai import Agent, ModelRetry, RunContext 14 | from pydantic_ai.models.anthropic import AnthropicModel 15 | from pydantic_ai.models.openai import OpenAIModel 16 | from openai import AsyncOpenAI 17 | from supabase import Client 18 | 19 | # Add the parent directory to sys.path to allow importing from the parent directory 20 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 21 | from utils.utils import get_env_var 22 | from archon.agent_prompts import primary_coder_prompt 23 | from archon.agent_tools import ( 24 | retrieve_relevant_documentation_tool, 25 | list_documentation_pages_tool, 26 | get_page_content_tool 27 | ) 28 | 29 | load_dotenv() 30 | 31 | provider = get_env_var('LLM_PROVIDER') or 'OpenAI' 32 | llm = get_env_var('PRIMARY_MODEL') or 'gpt-4o-mini' 33 | base_url = get_env_var('BASE_URL') or 'https://api.openai.com/v1' 34 | api_key = get_env_var('LLM_API_KEY') or 'no-llm-api-key-provided' 35 | 36 | model = AnthropicModel(llm, api_key=api_key) if provider == "Anthropic" else OpenAIModel(llm, base_url=base_url, api_key=api_key) 37 | 38 | logfire.configure(send_to_logfire='if-token-present') 39 | 40 | @dataclass 41 | class PydanticAIDeps: 42 | supabase: Client 43 | embedding_client: AsyncOpenAI 44 | reasoner_output: str 45 | 46 | pydantic_ai_coder = Agent( 47 | model, 48 | system_prompt=primary_coder_prompt, 49 | deps_type=PydanticAIDeps, 50 | retries=2 51 | ) 52 | 53 | @pydantic_ai_coder.system_prompt 54 | def add_reasoner_output(ctx: RunContext[str]) -> str: 55 | return f""" 56 | \n\nAdditional thoughts/instructions from the reasoner LLM. 57 | This scope includes documentation pages for you to search as well: 58 | {ctx.deps.reasoner_output} 59 | """ 60 | 61 | @pydantic_ai_coder.tool 62 | async def retrieve_relevant_documentation(ctx: RunContext[PydanticAIDeps], user_query: str) -> str: 63 | """ 64 | Retrieve relevant documentation chunks based on the query with RAG. 65 | 66 | Args: 67 | ctx: The context including the Supabase client and OpenAI client 68 | user_query: The user's question or query 69 | 70 | Returns: 71 | A formatted string containing the top 4 most relevant documentation chunks 72 | """ 73 | return await retrieve_relevant_documentation_tool(ctx.deps.supabase, ctx.deps.embedding_client, user_query) 74 | 75 | @pydantic_ai_coder.tool 76 | async def list_documentation_pages(ctx: RunContext[PydanticAIDeps]) -> List[str]: 77 | """ 78 | Retrieve a list of all available Pydantic AI documentation pages. 79 | 80 | Returns: 81 | List[str]: List of unique URLs for all documentation pages 82 | """ 83 | return await list_documentation_pages_tool(ctx.deps.supabase) 84 | 85 | @pydantic_ai_coder.tool 86 | async def get_page_content(ctx: RunContext[PydanticAIDeps], url: str) -> str: 87 | """ 88 | Retrieve the full content of a specific documentation page by combining all its chunks. 89 | 90 | Args: 91 | ctx: The context including the Supabase client 92 | url: The URL of the page to retrieve 93 | 94 | Returns: 95 | str: The complete page content with all chunks combined in order 96 | """ 97 | return await get_page_content_tool(ctx.deps.supabase, url) -------------------------------------------------------------------------------- /archon/refiner_agents/tools_refiner_agent.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations as _annotations 2 | 3 | from dataclasses import dataclass 4 | from dotenv import load_dotenv 5 | import logfire 6 | import asyncio 7 | import httpx 8 | import os 9 | import sys 10 | import json 11 | from typing import List 12 | from pydantic import BaseModel 13 | from pydantic_ai import Agent, ModelRetry, RunContext 14 | from pydantic_ai.models.anthropic import AnthropicModel 15 | from pydantic_ai.models.openai import OpenAIModel 16 | from openai import AsyncOpenAI 17 | from supabase import Client 18 | 19 | # Add the parent directory to sys.path to allow importing from the parent directory 20 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 21 | from utils.utils import get_env_var 22 | from archon.agent_prompts import tools_refiner_prompt 23 | from archon.agent_tools import ( 24 | retrieve_relevant_documentation_tool, 25 | list_documentation_pages_tool, 26 | get_page_content_tool 27 | ) 28 | 29 | load_dotenv() 30 | 31 | provider = get_env_var('LLM_PROVIDER') or 'OpenAI' 32 | llm = get_env_var('PRIMARY_MODEL') or 'gpt-4o-mini' 33 | base_url = get_env_var('BASE_URL') or 'https://api.openai.com/v1' 34 | api_key = get_env_var('LLM_API_KEY') or 'no-llm-api-key-provided' 35 | 36 | model = AnthropicModel(llm, api_key=api_key) if provider == "Anthropic" else OpenAIModel(llm, base_url=base_url, api_key=api_key) 37 | embedding_model = get_env_var('EMBEDDING_MODEL') or 'text-embedding-3-small' 38 | 39 | logfire.configure(send_to_logfire='if-token-present') 40 | 41 | @dataclass 42 | class ToolsRefinerDeps: 43 | supabase: Client 44 | embedding_client: AsyncOpenAI 45 | 46 | tools_refiner_agent = Agent( 47 | model, 48 | system_prompt=tools_refiner_prompt, 49 | deps_type=ToolsRefinerDeps, 50 | retries=2 51 | ) 52 | 53 | @tools_refiner_agent.tool 54 | async def retrieve_relevant_documentation(ctx: RunContext[ToolsRefinerDeps], query: str) -> str: 55 | """ 56 | Retrieve relevant documentation chunks based on the query with RAG. 57 | Make sure your searches always focus on implementing tools. 58 | 59 | Args: 60 | ctx: The context including the Supabase client and OpenAI client 61 | query: Your query to retrieve relevant documentation for implementing tools 62 | 63 | Returns: 64 | A formatted string containing the top 4 most relevant documentation chunks 65 | """ 66 | return await retrieve_relevant_documentation_tool(ctx.deps.supabase, ctx.deps.embedding_client, query) 67 | 68 | @tools_refiner_agent.tool 69 | async def list_documentation_pages(ctx: RunContext[ToolsRefinerDeps]) -> List[str]: 70 | """ 71 | Retrieve a list of all available Pydantic AI documentation pages. 72 | This will give you all pages available, but focus on the ones related to tools. 73 | 74 | Returns: 75 | List[str]: List of unique URLs for all documentation pages 76 | """ 77 | return await list_documentation_pages_tool(ctx.deps.supabase) 78 | 79 | @tools_refiner_agent.tool 80 | async def get_page_content(ctx: RunContext[ToolsRefinerDeps], url: str) -> str: 81 | """ 82 | Retrieve the full content of a specific documentation page by combining all its chunks. 83 | Only use this tool to get pages related to using tools with Pydantic AI. 84 | 85 | Args: 86 | ctx: The context including the Supabase client 87 | url: The URL of the page to retrieve 88 | 89 | Returns: 90 | str: The complete page content with all chunks combined in order 91 | """ 92 | return await get_page_content_tool(ctx.deps.supabase, url) -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/archon/refiner_agents/tools_refiner_agent.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations as _annotations 2 | 3 | from dataclasses import dataclass 4 | from dotenv import load_dotenv 5 | import logfire 6 | import asyncio 7 | import httpx 8 | import os 9 | import sys 10 | import json 11 | from typing import List 12 | from pydantic import BaseModel 13 | from pydantic_ai import Agent, ModelRetry, RunContext 14 | from pydantic_ai.models.anthropic import AnthropicModel 15 | from pydantic_ai.models.openai import OpenAIModel 16 | from openai import AsyncOpenAI 17 | from supabase import Client 18 | 19 | # Add the parent directory to sys.path to allow importing from the parent directory 20 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 21 | from utils.utils import get_env_var 22 | from archon.agent_prompts import tools_refiner_prompt 23 | from archon.agent_tools import ( 24 | retrieve_relevant_documentation_tool, 25 | list_documentation_pages_tool, 26 | get_page_content_tool 27 | ) 28 | 29 | load_dotenv() 30 | 31 | provider = get_env_var('LLM_PROVIDER') or 'OpenAI' 32 | llm = get_env_var('PRIMARY_MODEL') or 'gpt-4o-mini' 33 | base_url = get_env_var('BASE_URL') or 'https://api.openai.com/v1' 34 | api_key = get_env_var('LLM_API_KEY') or 'no-llm-api-key-provided' 35 | 36 | model = AnthropicModel(llm, api_key=api_key) if provider == "Anthropic" else OpenAIModel(llm, base_url=base_url, api_key=api_key) 37 | embedding_model = get_env_var('EMBEDDING_MODEL') or 'text-embedding-3-small' 38 | 39 | logfire.configure(send_to_logfire='if-token-present') 40 | 41 | @dataclass 42 | class ToolsRefinerDeps: 43 | supabase: Client 44 | embedding_client: AsyncOpenAI 45 | 46 | tools_refiner_agent = Agent( 47 | model, 48 | system_prompt=tools_refiner_prompt, 49 | deps_type=ToolsRefinerDeps, 50 | retries=2 51 | ) 52 | 53 | @tools_refiner_agent.tool 54 | async def retrieve_relevant_documentation(ctx: RunContext[ToolsRefinerDeps], query: str) -> str: 55 | """ 56 | Retrieve relevant documentation chunks based on the query with RAG. 57 | Make sure your searches always focus on implementing tools. 58 | 59 | Args: 60 | ctx: The context including the Supabase client and OpenAI client 61 | query: Your query to retrieve relevant documentation for implementing tools 62 | 63 | Returns: 64 | A formatted string containing the top 4 most relevant documentation chunks 65 | """ 66 | return await retrieve_relevant_documentation_tool(ctx.deps.supabase, ctx.deps.embedding_client, query) 67 | 68 | @tools_refiner_agent.tool 69 | async def list_documentation_pages(ctx: RunContext[ToolsRefinerDeps]) -> List[str]: 70 | """ 71 | Retrieve a list of all available Pydantic AI documentation pages. 72 | This will give you all pages available, but focus on the ones related to tools. 73 | 74 | Returns: 75 | List[str]: List of unique URLs for all documentation pages 76 | """ 77 | return await list_documentation_pages_tool(ctx.deps.supabase) 78 | 79 | @tools_refiner_agent.tool 80 | async def get_page_content(ctx: RunContext[ToolsRefinerDeps], url: str) -> str: 81 | """ 82 | Retrieve the full content of a specific documentation page by combining all its chunks. 83 | Only use this tool to get pages related to using tools with Pydantic AI. 84 | 85 | Args: 86 | ctx: The context including the Supabase client 87 | url: The URL of the page to retrieve 88 | 89 | Returns: 90 | str: The complete page content with all chunks combined in order 91 | """ 92 | return await get_page_content_tool(ctx.deps.supabase, url) -------------------------------------------------------------------------------- /archon/refiner_agents/agent_refiner_agent.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations as _annotations 2 | 3 | from dataclasses import dataclass 4 | from dotenv import load_dotenv 5 | import logfire 6 | import asyncio 7 | import httpx 8 | import os 9 | import sys 10 | import json 11 | from typing import List 12 | from pydantic import BaseModel 13 | from pydantic_ai import Agent, ModelRetry, RunContext 14 | from pydantic_ai.models.anthropic import AnthropicModel 15 | from pydantic_ai.models.openai import OpenAIModel 16 | from openai import AsyncOpenAI 17 | from supabase import Client 18 | 19 | # Add the parent directory to sys.path to allow importing from the parent directory 20 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 21 | from utils.utils import get_env_var 22 | from archon.agent_prompts import agent_refiner_prompt 23 | from archon.agent_tools import ( 24 | retrieve_relevant_documentation_tool, 25 | list_documentation_pages_tool, 26 | get_page_content_tool 27 | ) 28 | 29 | load_dotenv() 30 | 31 | provider = get_env_var('LLM_PROVIDER') or 'OpenAI' 32 | llm = get_env_var('PRIMARY_MODEL') or 'gpt-4o-mini' 33 | base_url = get_env_var('BASE_URL') or 'https://api.openai.com/v1' 34 | api_key = get_env_var('LLM_API_KEY') or 'no-llm-api-key-provided' 35 | 36 | model = AnthropicModel(llm, api_key=api_key) if provider == "Anthropic" else OpenAIModel(llm, base_url=base_url, api_key=api_key) 37 | embedding_model = get_env_var('EMBEDDING_MODEL') or 'text-embedding-3-small' 38 | 39 | logfire.configure(send_to_logfire='if-token-present') 40 | 41 | @dataclass 42 | class AgentRefinerDeps: 43 | supabase: Client 44 | embedding_client: AsyncOpenAI 45 | 46 | agent_refiner_agent = Agent( 47 | model, 48 | system_prompt=agent_refiner_prompt, 49 | deps_type=AgentRefinerDeps, 50 | retries=2 51 | ) 52 | 53 | @agent_refiner_agent.tool 54 | async def retrieve_relevant_documentation(ctx: RunContext[AgentRefinerDeps], query: str) -> str: 55 | """ 56 | Retrieve relevant documentation chunks based on the query with RAG. 57 | Make sure your searches always focus on implementing the agent itself. 58 | 59 | Args: 60 | ctx: The context including the Supabase client and OpenAI client 61 | query: Your query to retrieve relevant documentation for implementing agents 62 | 63 | Returns: 64 | A formatted string containing the top 4 most relevant documentation chunks 65 | """ 66 | return await retrieve_relevant_documentation_tool(ctx.deps.supabase, ctx.deps.embedding_client, query) 67 | 68 | @agent_refiner_agent.tool 69 | async def list_documentation_pages(ctx: RunContext[AgentRefinerDeps]) -> List[str]: 70 | """ 71 | Retrieve a list of all available Pydantic AI documentation pages. 72 | This will give you all pages available, but focus on the ones related to configuring agents and their dependencies. 73 | 74 | Returns: 75 | List[str]: List of unique URLs for all documentation pages 76 | """ 77 | return await list_documentation_pages_tool(ctx.deps.supabase) 78 | 79 | @agent_refiner_agent.tool 80 | async def get_page_content(ctx: RunContext[AgentRefinerDeps], url: str) -> str: 81 | """ 82 | Retrieve the full content of a specific documentation page by combining all its chunks. 83 | Only use this tool to get pages related to setting up agents with Pydantic AI. 84 | 85 | Args: 86 | ctx: The context including the Supabase client 87 | url: The URL of the page to retrieve 88 | 89 | Returns: 90 | str: The complete page content with all chunks combined in order 91 | """ 92 | return await get_page_content_tool(ctx.deps.supabase, url) -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/archon/refiner_agents/agent_refiner_agent.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations as _annotations 2 | 3 | from dataclasses import dataclass 4 | from dotenv import load_dotenv 5 | import logfire 6 | import asyncio 7 | import httpx 8 | import os 9 | import sys 10 | import json 11 | from typing import List 12 | from pydantic import BaseModel 13 | from pydantic_ai import Agent, ModelRetry, RunContext 14 | from pydantic_ai.models.anthropic import AnthropicModel 15 | from pydantic_ai.models.openai import OpenAIModel 16 | from openai import AsyncOpenAI 17 | from supabase import Client 18 | 19 | # Add the parent directory to sys.path to allow importing from the parent directory 20 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 21 | from utils.utils import get_env_var 22 | from archon.agent_prompts import agent_refiner_prompt 23 | from archon.agent_tools import ( 24 | retrieve_relevant_documentation_tool, 25 | list_documentation_pages_tool, 26 | get_page_content_tool 27 | ) 28 | 29 | load_dotenv() 30 | 31 | provider = get_env_var('LLM_PROVIDER') or 'OpenAI' 32 | llm = get_env_var('PRIMARY_MODEL') or 'gpt-4o-mini' 33 | base_url = get_env_var('BASE_URL') or 'https://api.openai.com/v1' 34 | api_key = get_env_var('LLM_API_KEY') or 'no-llm-api-key-provided' 35 | 36 | model = AnthropicModel(llm, api_key=api_key) if provider == "Anthropic" else OpenAIModel(llm, base_url=base_url, api_key=api_key) 37 | embedding_model = get_env_var('EMBEDDING_MODEL') or 'text-embedding-3-small' 38 | 39 | logfire.configure(send_to_logfire='if-token-present') 40 | 41 | @dataclass 42 | class AgentRefinerDeps: 43 | supabase: Client 44 | embedding_client: AsyncOpenAI 45 | 46 | agent_refiner_agent = Agent( 47 | model, 48 | system_prompt=agent_refiner_prompt, 49 | deps_type=AgentRefinerDeps, 50 | retries=2 51 | ) 52 | 53 | @agent_refiner_agent.tool 54 | async def retrieve_relevant_documentation(ctx: RunContext[AgentRefinerDeps], query: str) -> str: 55 | """ 56 | Retrieve relevant documentation chunks based on the query with RAG. 57 | Make sure your searches always focus on implementing the agent itself. 58 | 59 | Args: 60 | ctx: The context including the Supabase client and OpenAI client 61 | query: Your query to retrieve relevant documentation for implementing agents 62 | 63 | Returns: 64 | A formatted string containing the top 4 most relevant documentation chunks 65 | """ 66 | return await retrieve_relevant_documentation_tool(ctx.deps.supabase, ctx.deps.embedding_client, query) 67 | 68 | @agent_refiner_agent.tool 69 | async def list_documentation_pages(ctx: RunContext[AgentRefinerDeps]) -> List[str]: 70 | """ 71 | Retrieve a list of all available Pydantic AI documentation pages. 72 | This will give you all pages available, but focus on the ones related to configuring agents and their dependencies. 73 | 74 | Returns: 75 | List[str]: List of unique URLs for all documentation pages 76 | """ 77 | return await list_documentation_pages_tool(ctx.deps.supabase) 78 | 79 | @agent_refiner_agent.tool 80 | async def get_page_content(ctx: RunContext[AgentRefinerDeps], url: str) -> str: 81 | """ 82 | Retrieve the full content of a specific documentation page by combining all its chunks. 83 | Only use this tool to get pages related to setting up agents with Pydantic AI. 84 | 85 | Args: 86 | ctx: The context including the Supabase client 87 | url: The URL of the page to retrieve 88 | 89 | Returns: 90 | str: The complete page content with all chunks combined in order 91 | """ 92 | return await get_page_content_tool(ctx.deps.supabase, url) -------------------------------------------------------------------------------- /iterations/v1-single-agent/README.md: -------------------------------------------------------------------------------- 1 | # Archon V1 - Basic Pydantic AI Agent to Build other Pydantic AI Agents 2 | 3 | This is the first iteration of the Archon project - no use of LangGraph and built with a single AI agent to keep things very simple and introductory. 4 | 5 | An intelligent documentation crawler and RAG (Retrieval-Augmented Generation) agent built using Pydantic AI and Supabase that is capable of building other Pydantic AI agents. The agent crawls the Pydantic AI documentation, stores content in a vector database, and provides Pydantic AI agent code by retrieving and analyzing relevant documentation chunks. 6 | 7 | ## Features 8 | 9 | - Pydantic AI documentation crawling and chunking 10 | - Vector database storage with Supabase 11 | - Semantic search using OpenAI embeddings 12 | - RAG-based question answering 13 | - Support for code block preservation 14 | - Streamlit UI for interactive querying 15 | 16 | ## Prerequisites 17 | 18 | - Python 3.11+ 19 | - Supabase account and database 20 | - OpenAI API key 21 | - Streamlit (for web interface) 22 | 23 | ## Installation 24 | 25 | 1. Clone the repository: 26 | ```bash 27 | git clone https://github.com/coleam00/archon.git 28 | cd archon/iterations/v1-single-agent 29 | ``` 30 | 31 | 2. Install dependencies (recommended to use a Python virtual environment): 32 | ```bash 33 | python -m venv venv 34 | source venv/bin/activate # On Windows: venv\Scripts\activate 35 | pip install -r requirements.txt 36 | ``` 37 | 38 | 3. Set up environment variables: 39 | - Rename `.env.example` to `.env` 40 | - Edit `.env` with your API keys and preferences: 41 | ```env 42 | OPENAI_API_KEY=your_openai_api_key 43 | SUPABASE_URL=your_supabase_url 44 | SUPABASE_SERVICE_KEY=your_supabase_service_key 45 | LLM_MODEL=gpt-4o-mini # or your preferred OpenAI model 46 | ``` 47 | 48 | ## Usage 49 | 50 | ### Database Setup 51 | 52 | Execute the SQL commands in `site_pages.sql` to: 53 | 1. Create the necessary tables 54 | 2. Enable vector similarity search 55 | 3. Set up Row Level Security policies 56 | 57 | In Supabase, do this by going to the "SQL Editor" tab and pasting in the SQL into the editor there. Then click "Run". 58 | 59 | ### Crawl Documentation 60 | 61 | To crawl and store documentation in the vector database: 62 | 63 | ```bash 64 | python crawl_pydantic_ai_docs.py 65 | ``` 66 | 67 | This will: 68 | 1. Fetch URLs from the documentation sitemap 69 | 2. Crawl each page and split into chunks 70 | 3. Generate embeddings and store in Supabase 71 | 72 | ### Streamlit Web Interface 73 | 74 | For an interactive web interface to query the documentation: 75 | 76 | ```bash 77 | streamlit run streamlit_ui.py 78 | ``` 79 | 80 | The interface will be available at `http://localhost:8501` 81 | 82 | ## Configuration 83 | 84 | ### Database Schema 85 | 86 | The Supabase database uses the following schema: 87 | ```sql 88 | CREATE TABLE site_pages ( 89 | id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), 90 | url TEXT, 91 | chunk_number INTEGER, 92 | title TEXT, 93 | summary TEXT, 94 | content TEXT, 95 | metadata JSONB, 96 | embedding VECTOR(1536) 97 | ); 98 | ``` 99 | 100 | ### Chunking Configuration 101 | 102 | You can configure chunking parameters in `crawl_pydantic_ai_docs.py`: 103 | ```python 104 | chunk_size = 5000 # Characters per chunk 105 | ``` 106 | 107 | The chunker intelligently preserves: 108 | - Code blocks 109 | - Paragraph boundaries 110 | - Sentence boundaries 111 | 112 | ## Project Structure 113 | 114 | - `crawl_pydantic_ai_docs.py`: Documentation crawler and processor 115 | - `pydantic_ai_expert.py`: RAG agent implementation 116 | - `streamlit_ui.py`: Web interface 117 | - `site_pages.sql`: Database setup commands 118 | - `requirements.txt`: Project dependencies 119 | 120 | ## Contributing 121 | 122 | Contributions are welcome! Please feel free to submit a Pull Request. -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/mcp/mcp_server.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import FastMCP 2 | from datetime import datetime 3 | from dotenv import load_dotenv 4 | from typing import Dict, List 5 | import threading 6 | import requests 7 | import asyncio 8 | import uuid 9 | import sys 10 | import os 11 | 12 | # Load environment variables from .env file 13 | load_dotenv() 14 | 15 | # Initialize FastMCP server 16 | mcp = FastMCP("archon") 17 | 18 | # Store active threads 19 | active_threads: Dict[str, List[str]] = {} 20 | 21 | # FastAPI service URL 22 | GRAPH_SERVICE_URL = os.getenv("GRAPH_SERVICE_URL", "http://localhost:8100") 23 | 24 | def write_to_log(message: str): 25 | """Write a message to the logs.txt file in the workbench directory. 26 | 27 | Args: 28 | message: The message to log 29 | """ 30 | # Get the directory one level up from the current file 31 | current_dir = os.path.dirname(os.path.abspath(__file__)) 32 | parent_dir = os.path.dirname(current_dir) 33 | workbench_dir = os.path.join(parent_dir, "workbench") 34 | log_path = os.path.join(workbench_dir, "logs.txt") 35 | os.makedirs(workbench_dir, exist_ok=True) 36 | 37 | timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 38 | log_entry = f"[{timestamp}] {message}\n" 39 | 40 | with open(log_path, "a", encoding="utf-8") as f: 41 | f.write(log_entry) 42 | 43 | @mcp.tool() 44 | async def create_thread() -> str: 45 | """Create a new conversation thread for Archon. 46 | Always call this tool before invoking Archon for the first time in a conversation. 47 | (if you don't already have a thread ID) 48 | 49 | Returns: 50 | str: A unique thread ID for the conversation 51 | """ 52 | thread_id = str(uuid.uuid4()) 53 | active_threads[thread_id] = [] 54 | write_to_log(f"Created new thread: {thread_id}") 55 | return thread_id 56 | 57 | 58 | def _make_request(thread_id: str, user_input: str, config: dict) -> str: 59 | """Make synchronous request to graph service""" 60 | response = requests.post( 61 | f"{GRAPH_SERVICE_URL}/invoke", 62 | json={ 63 | "message": user_input, 64 | "thread_id": thread_id, 65 | "is_first_message": not active_threads[thread_id], 66 | "config": config 67 | } 68 | ) 69 | response.raise_for_status() 70 | return response.json() 71 | 72 | 73 | @mcp.tool() 74 | async def run_agent(thread_id: str, user_input: str) -> str: 75 | """Run the Archon agent with user input. 76 | Only use this tool after you have called create_thread in this conversation to get a unique thread ID. 77 | If you already created a thread ID in this conversation, do not create another one. Reuse the same ID. 78 | After you receive the code from Archon, always implement it into the codebase unless asked not to. 79 | 80 | Args: 81 | thread_id: The conversation thread ID 82 | user_input: The user's message to process 83 | 84 | Returns: 85 | str: The agent's response which generally includes the code for the agent 86 | """ 87 | if thread_id not in active_threads: 88 | write_to_log(f"Error: Thread not found - {thread_id}") 89 | raise ValueError("Thread not found") 90 | 91 | write_to_log(f"Processing message for thread {thread_id}: {user_input}") 92 | 93 | config = { 94 | "configurable": { 95 | "thread_id": thread_id 96 | } 97 | } 98 | 99 | try: 100 | result = await asyncio.to_thread(_make_request, thread_id, user_input, config) 101 | active_threads[thread_id].append(user_input) 102 | return result['response'] 103 | 104 | except Exception as e: 105 | raise 106 | 107 | 108 | if __name__ == "__main__": 109 | write_to_log("Starting MCP server") 110 | 111 | # Run MCP server 112 | mcp.run(transport='stdio') 113 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiofiles==24.1.0 2 | aiohappyeyeballs==2.4.4 3 | aiohttp==3.11.11 4 | aiosignal==1.3.2 5 | aiosqlite==0.20.0 6 | altair==5.5.0 7 | annotated-types==0.7.0 8 | anthropic==0.42.0 9 | anyio==4.8.0 10 | attrs==24.3.0 11 | beautifulsoup4==4.12.3 12 | blinker==1.9.0 13 | cachetools==5.5.0 14 | certifi==2024.12.14 15 | cffi==1.17.1 16 | charset-normalizer==3.4.1 17 | click==8.1.8 18 | cohere==5.13.12 19 | colorama==0.4.6 20 | Crawl4AI==0.4.247 21 | cryptography==43.0.3 22 | Deprecated==1.2.15 23 | deprecation==2.1.0 24 | distro==1.9.0 25 | dnspython==2.7.0 26 | email_validator==2.2.0 27 | eval_type_backport==0.2.2 28 | executing==2.1.0 29 | fake-http-header==0.3.5 30 | fastapi==0.115.8 31 | fastapi-cli==0.0.7 32 | fastavro==1.10.0 33 | filelock==3.16.1 34 | frozenlist==1.5.0 35 | fsspec==2024.12.0 36 | gitdb==4.0.12 37 | GitPython==3.1.44 38 | google-auth==2.37.0 39 | googleapis-common-protos==1.66.0 40 | gotrue==2.11.1 41 | greenlet==3.1.1 42 | griffe==1.5.4 43 | groq==0.15.0 44 | h11==0.14.0 45 | h2==4.1.0 46 | hpack==4.0.0 47 | html2text==2024.2.26 48 | httpcore==1.0.7 49 | httptools==0.6.4 50 | httpx==0.27.2 51 | httpx-sse==0.4.0 52 | huggingface-hub==0.27.1 53 | hyperframe==6.0.1 54 | idna==3.10 55 | importlib_metadata==8.5.0 56 | iniconfig==2.0.0 57 | itsdangerous==2.2.0 58 | Jinja2==3.1.5 59 | jiter==0.8.2 60 | joblib==1.4.2 61 | jsonpatch==1.33 62 | jsonpath-python==1.0.6 63 | jsonpointer==3.0.0 64 | jsonschema==4.23.0 65 | jsonschema-specifications==2024.10.1 66 | jsonschema_rs==0.25.1 67 | langchain-core==0.3.33 68 | langgraph==0.2.69 69 | langgraph-checkpoint==2.0.10 70 | langgraph-cli==0.1.71 71 | langgraph-sdk==0.1.51 72 | langsmith==0.3.6 73 | litellm==1.57.8 74 | logfire==3.1.0 75 | logfire-api==3.1.0 76 | lxml==5.3.0 77 | markdown-it-py==3.0.0 78 | MarkupSafe==3.0.2 79 | mcp==1.2.1 80 | mdurl==0.1.2 81 | mistralai==1.2.6 82 | mockito==1.5.3 83 | msgpack==1.1.0 84 | multidict==6.1.0 85 | mypy-extensions==1.0.0 86 | narwhals==1.21.1 87 | nltk==3.9.1 88 | numpy==2.2.1 89 | openai==1.59.6 90 | opentelemetry-api==1.29.0 91 | opentelemetry-exporter-otlp-proto-common==1.29.0 92 | opentelemetry-exporter-otlp-proto-http==1.29.0 93 | opentelemetry-instrumentation==0.50b0 94 | opentelemetry-proto==1.29.0 95 | opentelemetry-sdk==1.29.0 96 | opentelemetry-semantic-conventions==0.50b0 97 | orjson==3.10.15 98 | packaging==24.2 99 | pandas==2.2.3 100 | pillow==10.4.0 101 | playwright==1.49.1 102 | pluggy==1.5.0 103 | postgrest==0.19.1 104 | propcache==0.2.1 105 | protobuf==5.29.3 106 | psutil==6.1.1 107 | pyarrow==18.1.0 108 | pyasn1==0.6.1 109 | pyasn1_modules==0.4.1 110 | pycparser==2.22 111 | pydantic==2.10.5 112 | pydantic-ai==0.0.22 113 | pydantic-ai-slim==0.0.22 114 | pydantic-extra-types==2.10.2 115 | pydantic-graph==0.0.22 116 | pydantic-settings==2.7.1 117 | pydantic_core==2.27.2 118 | pydeck==0.9.1 119 | pyee==12.0.0 120 | Pygments==2.19.1 121 | PyJWT==2.10.1 122 | pyOpenSSL==24.3.0 123 | pytest==8.3.4 124 | pytest-mockito==0.0.4 125 | python-dateutil==2.9.0.post0 126 | python-dotenv==1.0.1 127 | python-multipart==0.0.20 128 | pytz==2024.2 129 | PyYAML==6.0.2 130 | rank-bm25==0.2.2 131 | realtime==2.1.0 132 | referencing==0.35.1 133 | regex==2024.11.6 134 | requests==2.32.3 135 | requests-toolbelt==1.0.0 136 | rich==13.9.4 137 | rich-toolkit==0.13.2 138 | rpds-py==0.22.3 139 | rsa==4.9 140 | shellingham==1.5.4 141 | six==1.17.0 142 | smmap==5.0.2 143 | sniffio==1.3.1 144 | snowballstemmer==2.2.0 145 | soupsieve==2.6 146 | sse-starlette==2.1.3 147 | starlette==0.45.3 148 | storage3==0.11.0 149 | streamlit==1.41.1 150 | StrEnum==0.4.15 151 | structlog==24.4.0 152 | supabase==2.11.0 153 | supafunc==0.9.0 154 | tenacity==9.0.0 155 | tf-playwright-stealth==1.1.0 156 | tiktoken==0.8.0 157 | tokenizers==0.21.0 158 | toml==0.10.2 159 | tornado==6.4.2 160 | tqdm==4.67.1 161 | typer==0.15.1 162 | types-requests==2.32.0.20241016 163 | typing-inspect==0.9.0 164 | typing_extensions==4.12.2 165 | tzdata==2024.2 166 | ujson==5.10.0 167 | urllib3==2.3.0 168 | uvicorn==0.34.0 169 | watchdog==6.0.0 170 | watchfiles==1.0.4 171 | websockets==13.1 172 | wrapt==1.17.1 173 | xxhash==3.5.0 174 | yarl==1.18.3 175 | zipp==3.21.0 176 | zstandard==0.23.0 177 | -------------------------------------------------------------------------------- /iterations/v3-mcp-support/requirements.txt: -------------------------------------------------------------------------------- 1 | aiofiles==24.1.0 2 | aiohappyeyeballs==2.4.4 3 | aiohttp==3.11.11 4 | aiosignal==1.3.2 5 | aiosqlite==0.20.0 6 | altair==5.5.0 7 | annotated-types==0.7.0 8 | anthropic==0.42.0 9 | anyio==4.8.0 10 | attrs==24.3.0 11 | beautifulsoup4==4.12.3 12 | blinker==1.9.0 13 | cachetools==5.5.0 14 | certifi==2024.12.14 15 | cffi==1.17.1 16 | charset-normalizer==3.4.1 17 | click==8.1.8 18 | cohere==5.13.12 19 | colorama==0.4.6 20 | Crawl4AI==0.4.247 21 | cryptography==43.0.3 22 | Deprecated==1.2.15 23 | deprecation==2.1.0 24 | distro==1.9.0 25 | dnspython==2.7.0 26 | email_validator==2.2.0 27 | eval_type_backport==0.2.2 28 | executing==2.1.0 29 | fake-http-header==0.3.5 30 | fastapi==0.115.8 31 | fastapi-cli==0.0.7 32 | fastavro==1.10.0 33 | filelock==3.16.1 34 | frozenlist==1.5.0 35 | fsspec==2024.12.0 36 | gitdb==4.0.12 37 | GitPython==3.1.44 38 | google-auth==2.37.0 39 | googleapis-common-protos==1.66.0 40 | gotrue==2.11.1 41 | greenlet==3.1.1 42 | griffe==1.5.4 43 | groq==0.15.0 44 | h11==0.14.0 45 | h2==4.1.0 46 | hpack==4.0.0 47 | httpcore==1.0.7 48 | httptools==0.6.4 49 | httpx==0.27.2 50 | httpx-sse==0.4.0 51 | huggingface-hub==0.27.1 52 | hyperframe==6.0.1 53 | idna==3.10 54 | importlib_metadata==8.5.0 55 | iniconfig==2.0.0 56 | itsdangerous==2.2.0 57 | Jinja2==3.1.5 58 | jiter==0.8.2 59 | joblib==1.4.2 60 | jsonpatch==1.33 61 | jsonpath-python==1.0.6 62 | jsonpointer==3.0.0 63 | jsonschema==4.23.0 64 | jsonschema-specifications==2024.10.1 65 | jsonschema_rs==0.25.1 66 | langchain-core==0.3.33 67 | langgraph==0.2.69 68 | langgraph-api==0.0.22 69 | langgraph-checkpoint==2.0.10 70 | langgraph-cli==0.1.71 71 | langgraph-sdk==0.1.51 72 | langsmith==0.3.6 73 | litellm==1.57.8 74 | logfire==3.1.0 75 | logfire-api==3.1.0 76 | lxml==5.3.0 77 | markdown-it-py==3.0.0 78 | MarkupSafe==3.0.2 79 | mcp==1.2.1 80 | mdurl==0.1.2 81 | mistralai==1.2.6 82 | mockito==1.5.3 83 | msgpack==1.1.0 84 | multidict==6.1.0 85 | mypy-extensions==1.0.0 86 | narwhals==1.21.1 87 | nltk==3.9.1 88 | numpy==2.2.1 89 | openai==1.59.6 90 | opentelemetry-api==1.29.0 91 | opentelemetry-exporter-otlp-proto-common==1.29.0 92 | opentelemetry-exporter-otlp-proto-http==1.29.0 93 | opentelemetry-instrumentation==0.50b0 94 | opentelemetry-proto==1.29.0 95 | opentelemetry-sdk==1.29.0 96 | opentelemetry-semantic-conventions==0.50b0 97 | orjson==3.10.15 98 | packaging==24.2 99 | pandas==2.2.3 100 | pillow==10.4.0 101 | playwright==1.49.1 102 | pluggy==1.5.0 103 | postgrest==0.19.1 104 | propcache==0.2.1 105 | protobuf==5.29.3 106 | psutil==6.1.1 107 | pyarrow==18.1.0 108 | pyasn1==0.6.1 109 | pyasn1_modules==0.4.1 110 | pycparser==2.22 111 | pydantic==2.10.5 112 | pydantic-ai==0.0.22 113 | pydantic-ai-slim==0.0.22 114 | pydantic-extra-types==2.10.2 115 | pydantic-graph==0.0.22 116 | pydantic-settings==2.7.1 117 | pydantic_core==2.27.2 118 | pydeck==0.9.1 119 | pyee==12.0.0 120 | Pygments==2.19.1 121 | PyJWT==2.10.1 122 | pyOpenSSL==24.3.0 123 | pytest==8.3.4 124 | pytest-mockito==0.0.4 125 | python-dateutil==2.9.0.post0 126 | python-dotenv==1.0.1 127 | python-multipart==0.0.20 128 | pytz==2024.2 129 | PyYAML==6.0.2 130 | rank-bm25==0.2.2 131 | realtime==2.1.0 132 | referencing==0.35.1 133 | regex==2024.11.6 134 | requests==2.32.3 135 | requests-toolbelt==1.0.0 136 | rich==13.9.4 137 | rich-toolkit==0.13.2 138 | rpds-py==0.22.3 139 | rsa==4.9 140 | shellingham==1.5.4 141 | six==1.17.0 142 | smmap==5.0.2 143 | sniffio==1.3.1 144 | snowballstemmer==2.2.0 145 | soupsieve==2.6 146 | sse-starlette==2.1.3 147 | starlette==0.45.3 148 | storage3==0.11.0 149 | streamlit==1.41.1 150 | StrEnum==0.4.15 151 | structlog==24.4.0 152 | supabase==2.11.0 153 | supafunc==0.9.0 154 | tenacity==9.0.0 155 | tf-playwright-stealth==1.1.0 156 | tiktoken==0.8.0 157 | tokenizers==0.21.0 158 | toml==0.10.2 159 | tornado==6.4.2 160 | tqdm==4.67.1 161 | typer==0.15.1 162 | types-requests==2.32.0.20241016 163 | typing-inspect==0.9.0 164 | typing_extensions==4.12.2 165 | tzdata==2024.2 166 | ujson==5.10.0 167 | urllib3==2.3.0 168 | uvicorn==0.34.0 169 | watchdog==6.0.0 170 | watchfiles==1.0.4 171 | websockets==13.1 172 | wrapt==1.17.1 173 | xxhash==3.5.0 174 | yarl==1.18.3 175 | zipp==3.21.0 176 | zstandard==0.23.0 177 | -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/requirements.txt: -------------------------------------------------------------------------------- 1 | aiofiles==24.1.0 2 | aiohappyeyeballs==2.4.4 3 | aiohttp==3.11.11 4 | aiosignal==1.3.2 5 | aiosqlite==0.20.0 6 | altair==5.5.0 7 | annotated-types==0.7.0 8 | anthropic==0.42.0 9 | anyio==4.8.0 10 | attrs==24.3.0 11 | beautifulsoup4==4.12.3 12 | blinker==1.9.0 13 | cachetools==5.5.0 14 | certifi==2024.12.14 15 | cffi==1.17.1 16 | charset-normalizer==3.4.1 17 | click==8.1.8 18 | cohere==5.13.12 19 | colorama==0.4.6 20 | Crawl4AI==0.4.247 21 | cryptography==43.0.3 22 | Deprecated==1.2.15 23 | deprecation==2.1.0 24 | distro==1.9.0 25 | dnspython==2.7.0 26 | email_validator==2.2.0 27 | eval_type_backport==0.2.2 28 | executing==2.1.0 29 | fake-http-header==0.3.5 30 | fastapi==0.115.8 31 | fastapi-cli==0.0.7 32 | fastavro==1.10.0 33 | filelock==3.16.1 34 | frozenlist==1.5.0 35 | fsspec==2024.12.0 36 | gitdb==4.0.12 37 | GitPython==3.1.44 38 | google-auth==2.37.0 39 | googleapis-common-protos==1.66.0 40 | gotrue==2.11.1 41 | greenlet==3.1.1 42 | griffe==1.5.4 43 | groq==0.15.0 44 | h11==0.14.0 45 | h2==4.1.0 46 | hpack==4.0.0 47 | html2text==2024.2.26 48 | httpcore==1.0.7 49 | httptools==0.6.4 50 | httpx==0.27.2 51 | httpx-sse==0.4.0 52 | huggingface-hub==0.27.1 53 | hyperframe==6.0.1 54 | idna==3.10 55 | importlib_metadata==8.5.0 56 | iniconfig==2.0.0 57 | itsdangerous==2.2.0 58 | Jinja2==3.1.5 59 | jiter==0.8.2 60 | joblib==1.4.2 61 | jsonpatch==1.33 62 | jsonpath-python==1.0.6 63 | jsonpointer==3.0.0 64 | jsonschema==4.23.0 65 | jsonschema-specifications==2024.10.1 66 | jsonschema_rs==0.25.1 67 | langchain-core==0.3.33 68 | langgraph==0.2.69 69 | langgraph-checkpoint==2.0.10 70 | langgraph-cli==0.1.71 71 | langgraph-sdk==0.1.51 72 | langsmith==0.3.6 73 | litellm==1.57.8 74 | logfire==3.1.0 75 | logfire-api==3.1.0 76 | lxml==5.3.0 77 | markdown-it-py==3.0.0 78 | MarkupSafe==3.0.2 79 | mcp==1.2.1 80 | mdurl==0.1.2 81 | mistralai==1.2.6 82 | mockito==1.5.3 83 | msgpack==1.1.0 84 | multidict==6.1.0 85 | mypy-extensions==1.0.0 86 | narwhals==1.21.1 87 | nltk==3.9.1 88 | numpy==2.2.1 89 | openai==1.59.6 90 | opentelemetry-api==1.29.0 91 | opentelemetry-exporter-otlp-proto-common==1.29.0 92 | opentelemetry-exporter-otlp-proto-http==1.29.0 93 | opentelemetry-instrumentation==0.50b0 94 | opentelemetry-proto==1.29.0 95 | opentelemetry-sdk==1.29.0 96 | opentelemetry-semantic-conventions==0.50b0 97 | orjson==3.10.15 98 | packaging==24.2 99 | pandas==2.2.3 100 | pillow==10.4.0 101 | playwright==1.49.1 102 | pluggy==1.5.0 103 | postgrest==0.19.1 104 | propcache==0.2.1 105 | protobuf==5.29.3 106 | psutil==6.1.1 107 | pyarrow==18.1.0 108 | pyasn1==0.6.1 109 | pyasn1_modules==0.4.1 110 | pycparser==2.22 111 | pydantic==2.10.5 112 | pydantic-ai==0.0.22 113 | pydantic-ai-slim==0.0.22 114 | pydantic-extra-types==2.10.2 115 | pydantic-graph==0.0.22 116 | pydantic-settings==2.7.1 117 | pydantic_core==2.27.2 118 | pydeck==0.9.1 119 | pyee==12.0.0 120 | Pygments==2.19.1 121 | PyJWT==2.10.1 122 | pyOpenSSL==24.3.0 123 | pytest==8.3.4 124 | pytest-mockito==0.0.4 125 | python-dateutil==2.9.0.post0 126 | python-dotenv==1.0.1 127 | python-multipart==0.0.20 128 | pytz==2024.2 129 | PyYAML==6.0.2 130 | rank-bm25==0.2.2 131 | realtime==2.1.0 132 | referencing==0.35.1 133 | regex==2024.11.6 134 | requests==2.32.3 135 | requests-toolbelt==1.0.0 136 | rich==13.9.4 137 | rich-toolkit==0.13.2 138 | rpds-py==0.22.3 139 | rsa==4.9 140 | shellingham==1.5.4 141 | six==1.17.0 142 | smmap==5.0.2 143 | sniffio==1.3.1 144 | snowballstemmer==2.2.0 145 | soupsieve==2.6 146 | sse-starlette==2.1.3 147 | starlette==0.45.3 148 | storage3==0.11.0 149 | streamlit==1.41.1 150 | StrEnum==0.4.15 151 | structlog==24.4.0 152 | supabase==2.11.0 153 | supafunc==0.9.0 154 | tenacity==9.0.0 155 | tf-playwright-stealth==1.1.0 156 | tiktoken==0.8.0 157 | tokenizers==0.21.0 158 | toml==0.10.2 159 | tornado==6.4.2 160 | tqdm==4.67.1 161 | typer==0.15.1 162 | types-requests==2.32.0.20241016 163 | typing-inspect==0.9.0 164 | typing_extensions==4.12.2 165 | tzdata==2024.2 166 | ujson==5.10.0 167 | urllib3==2.3.0 168 | uvicorn==0.34.0 169 | watchdog==6.0.0 170 | watchfiles==1.0.4 171 | websockets==13.1 172 | wrapt==1.17.1 173 | xxhash==3.5.0 174 | yarl==1.18.3 175 | zipp==3.21.0 176 | zstandard==0.23.0 177 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/requirements.txt: -------------------------------------------------------------------------------- 1 | aiofiles==24.1.0 2 | aiohappyeyeballs==2.4.4 3 | aiohttp==3.11.11 4 | aiosignal==1.3.2 5 | aiosqlite==0.20.0 6 | altair==5.5.0 7 | annotated-types==0.7.0 8 | anthropic==0.42.0 9 | anyio==4.8.0 10 | attrs==24.3.0 11 | beautifulsoup4==4.12.3 12 | blinker==1.9.0 13 | cachetools==5.5.0 14 | certifi==2024.12.14 15 | cffi==1.17.1 16 | charset-normalizer==3.4.1 17 | click==8.1.8 18 | cohere==5.13.12 19 | colorama==0.4.6 20 | Crawl4AI==0.4.247 21 | cryptography==43.0.3 22 | Deprecated==1.2.15 23 | deprecation==2.1.0 24 | distro==1.9.0 25 | dnspython==2.7.0 26 | email_validator==2.2.0 27 | eval_type_backport==0.2.2 28 | executing==2.1.0 29 | fake-http-header==0.3.5 30 | fastapi==0.115.8 31 | fastapi-cli==0.0.7 32 | fastavro==1.10.0 33 | filelock==3.16.1 34 | frozenlist==1.5.0 35 | fsspec==2024.12.0 36 | gitdb==4.0.12 37 | GitPython==3.1.44 38 | google-auth==2.37.0 39 | googleapis-common-protos==1.66.0 40 | gotrue==2.11.1 41 | greenlet==3.1.1 42 | griffe==1.5.4 43 | groq==0.15.0 44 | h11==0.14.0 45 | h2==4.1.0 46 | hpack==4.0.0 47 | html2text==2024.2.26 48 | httpcore==1.0.7 49 | httptools==0.6.4 50 | httpx==0.27.2 51 | httpx-sse==0.4.0 52 | huggingface-hub==0.27.1 53 | hyperframe==6.0.1 54 | idna==3.10 55 | importlib_metadata==8.5.0 56 | iniconfig==2.0.0 57 | itsdangerous==2.2.0 58 | Jinja2==3.1.5 59 | jiter==0.8.2 60 | joblib==1.4.2 61 | jsonpatch==1.33 62 | jsonpath-python==1.0.6 63 | jsonpointer==3.0.0 64 | jsonschema==4.23.0 65 | jsonschema-specifications==2024.10.1 66 | jsonschema_rs==0.25.1 67 | langchain-core==0.3.33 68 | langgraph==0.2.69 69 | langgraph-checkpoint==2.0.10 70 | langgraph-cli==0.1.71 71 | langgraph-sdk==0.1.51 72 | langsmith==0.3.6 73 | litellm==1.57.8 74 | logfire==3.1.0 75 | logfire-api==3.1.0 76 | lxml==5.3.0 77 | markdown-it-py==3.0.0 78 | MarkupSafe==3.0.2 79 | mcp==1.2.1 80 | mdurl==0.1.2 81 | mistralai==1.2.6 82 | mockito==1.5.3 83 | msgpack==1.1.0 84 | multidict==6.1.0 85 | mypy-extensions==1.0.0 86 | narwhals==1.21.1 87 | nltk==3.9.1 88 | numpy==2.2.1 89 | openai==1.59.6 90 | opentelemetry-api==1.29.0 91 | opentelemetry-exporter-otlp-proto-common==1.29.0 92 | opentelemetry-exporter-otlp-proto-http==1.29.0 93 | opentelemetry-instrumentation==0.50b0 94 | opentelemetry-proto==1.29.0 95 | opentelemetry-sdk==1.29.0 96 | opentelemetry-semantic-conventions==0.50b0 97 | orjson==3.10.15 98 | packaging==24.2 99 | pandas==2.2.3 100 | pillow==10.4.0 101 | playwright==1.49.1 102 | pluggy==1.5.0 103 | postgrest==0.19.1 104 | propcache==0.2.1 105 | protobuf==5.29.3 106 | psutil==6.1.1 107 | pyarrow==18.1.0 108 | pyasn1==0.6.1 109 | pyasn1_modules==0.4.1 110 | pycparser==2.22 111 | pydantic==2.10.5 112 | pydantic-ai==0.0.22 113 | pydantic-ai-slim==0.0.22 114 | pydantic-extra-types==2.10.2 115 | pydantic-graph==0.0.22 116 | pydantic-settings==2.7.1 117 | pydantic_core==2.27.2 118 | pydeck==0.9.1 119 | pyee==12.0.0 120 | Pygments==2.19.1 121 | PyJWT==2.10.1 122 | pyOpenSSL==24.3.0 123 | pytest==8.3.4 124 | pytest-mockito==0.0.4 125 | python-dateutil==2.9.0.post0 126 | python-dotenv==1.0.1 127 | python-multipart==0.0.20 128 | pytz==2024.2 129 | PyYAML==6.0.2 130 | rank-bm25==0.2.2 131 | realtime==2.1.0 132 | referencing==0.35.1 133 | regex==2024.11.6 134 | requests==2.32.3 135 | requests-toolbelt==1.0.0 136 | rich==13.9.4 137 | rich-toolkit==0.13.2 138 | rpds-py==0.22.3 139 | rsa==4.9 140 | shellingham==1.5.4 141 | six==1.17.0 142 | smmap==5.0.2 143 | sniffio==1.3.1 144 | snowballstemmer==2.2.0 145 | soupsieve==2.6 146 | sse-starlette==2.1.3 147 | starlette==0.45.3 148 | storage3==0.11.0 149 | streamlit==1.41.1 150 | StrEnum==0.4.15 151 | structlog==24.4.0 152 | supabase==2.11.0 153 | supafunc==0.9.0 154 | tenacity==9.0.0 155 | tf-playwright-stealth==1.1.0 156 | tiktoken==0.8.0 157 | tokenizers==0.21.0 158 | toml==0.10.2 159 | tornado==6.4.2 160 | tqdm==4.67.1 161 | typer==0.15.1 162 | types-requests==2.32.0.20241016 163 | typing-inspect==0.9.0 164 | typing_extensions==4.12.2 165 | tzdata==2024.2 166 | ujson==5.10.0 167 | urllib3==2.3.0 168 | uvicorn==0.34.0 169 | watchdog==6.0.0 170 | watchfiles==1.0.4 171 | websockets==13.1 172 | wrapt==1.17.1 173 | xxhash==3.5.0 174 | yarl==1.18.3 175 | zipp==3.21.0 176 | zstandard==0.23.0 177 | -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/utils/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from datetime import datetime 3 | from functools import wraps 4 | import inspect 5 | import json 6 | from typing import Optional 7 | from dotenv import load_dotenv 8 | 9 | # Load environment variables from .env file 10 | load_dotenv() 11 | 12 | def write_to_log(message: str): 13 | """Write a message to the logs.txt file in the workbench directory. 14 | 15 | Args: 16 | message: The message to log 17 | """ 18 | # Get the directory one level up from the current file 19 | current_dir = os.path.dirname(os.path.abspath(__file__)) 20 | parent_dir = os.path.dirname(current_dir) 21 | workbench_dir = os.path.join(parent_dir, "workbench") 22 | log_path = os.path.join(workbench_dir, "logs.txt") 23 | os.makedirs(workbench_dir, exist_ok=True) 24 | 25 | timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 26 | log_entry = f"[{timestamp}] {message}\n" 27 | 28 | with open(log_path, "a", encoding="utf-8") as f: 29 | f.write(log_entry) 30 | 31 | def get_env_var(var_name: str) -> Optional[str]: 32 | """Get an environment variable from the saved JSON file or from environment variables. 33 | 34 | Args: 35 | var_name: The name of the environment variable to retrieve 36 | 37 | Returns: 38 | The value of the environment variable or None if not found 39 | """ 40 | # Path to the JSON file storing environment variables 41 | current_dir = os.path.dirname(os.path.abspath(__file__)) 42 | parent_dir = os.path.dirname(current_dir) 43 | env_file_path = os.path.join(current_dir, "env_vars.json") 44 | 45 | # First try to get from JSON file 46 | if os.path.exists(env_file_path): 47 | try: 48 | with open(env_file_path, "r") as f: 49 | env_vars = json.load(f) 50 | if var_name in env_vars and env_vars[var_name]: 51 | return env_vars[var_name] 52 | except (json.JSONDecodeError, IOError) as e: 53 | write_to_log(f"Error reading env_vars.json: {str(e)}") 54 | 55 | # If not found in JSON, try to get from environment variables 56 | return os.environ.get(var_name) 57 | 58 | def save_env_var(var_name: str, value: str) -> bool: 59 | """Save an environment variable to the JSON file. 60 | 61 | Args: 62 | var_name: The name of the environment variable 63 | value: The value to save 64 | 65 | Returns: 66 | True if successful, False otherwise 67 | """ 68 | # Path to the JSON file storing environment variables 69 | current_dir = os.path.dirname(os.path.abspath(__file__)) 70 | env_file_path = os.path.join(current_dir, "env_vars.json") 71 | 72 | # Load existing env vars or create empty dict 73 | env_vars = {} 74 | if os.path.exists(env_file_path): 75 | try: 76 | with open(env_file_path, "r") as f: 77 | env_vars = json.load(f) 78 | except (json.JSONDecodeError, IOError) as e: 79 | write_to_log(f"Error reading env_vars.json: {str(e)}") 80 | # Continue with empty dict if file is corrupted 81 | 82 | # Update the variable 83 | env_vars[var_name] = value 84 | 85 | # Save back to file 86 | try: 87 | with open(env_file_path, "w") as f: 88 | json.dump(env_vars, f, indent=2) 89 | return True 90 | except IOError as e: 91 | write_to_log(f"Error writing to env_vars.json: {str(e)}") 92 | return False 93 | 94 | def log_node_execution(func): 95 | """Decorator to log the start and end of graph node execution. 96 | 97 | Args: 98 | func: The async function to wrap 99 | """ 100 | @wraps(func) 101 | async def wrapper(*args, **kwargs): 102 | func_name = func.__name__ 103 | write_to_log(f"Starting node: {func_name}") 104 | try: 105 | result = await func(*args, **kwargs) 106 | write_to_log(f"Completed node: {func_name}") 107 | return result 108 | except Exception as e: 109 | write_to_log(f"Error in node {func_name}: {str(e)}") 110 | raise 111 | return wrapper 112 | -------------------------------------------------------------------------------- /iterations/v4-streamlit-ui-overhaul/run_docker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Simple script to build and run Archon Docker containers. 4 | """ 5 | 6 | import os 7 | import subprocess 8 | import platform 9 | import time 10 | from pathlib import Path 11 | 12 | def run_command(command, cwd=None): 13 | """Run a command and print output in real-time.""" 14 | print(f"Running: {' '.join(command)}") 15 | process = subprocess.Popen( 16 | command, 17 | stdout=subprocess.PIPE, 18 | stderr=subprocess.STDOUT, 19 | text=False, 20 | cwd=cwd 21 | ) 22 | 23 | for line in process.stdout: 24 | try: 25 | decoded_line = line.decode('utf-8', errors='replace') 26 | print(decoded_line.strip()) 27 | except Exception as e: 28 | print(f"Error processing output: {e}") 29 | 30 | process.wait() 31 | return process.returncode 32 | 33 | def check_docker(): 34 | """Check if Docker is installed and running.""" 35 | try: 36 | subprocess.run( 37 | ["docker", "--version"], 38 | check=True, 39 | stdout=subprocess.PIPE, 40 | stderr=subprocess.PIPE 41 | ) 42 | return True 43 | except (subprocess.SubprocessError, FileNotFoundError): 44 | print("Error: Docker is not installed or not in PATH") 45 | return False 46 | 47 | def main(): 48 | """Main function to build and run Archon containers.""" 49 | # Check if Docker is available 50 | if not check_docker(): 51 | return 1 52 | 53 | # Get the base directory 54 | base_dir = Path(__file__).parent.absolute() 55 | 56 | # Check for .env file 57 | env_file = base_dir / ".env" 58 | env_args = [] 59 | if env_file.exists(): 60 | print(f"Using environment file: {env_file}") 61 | env_args = ["--env-file", str(env_file)] 62 | else: 63 | print("No .env file found. Continuing without environment variables.") 64 | 65 | # Build the MCP container 66 | print("\n=== Building Archon MCP container ===") 67 | mcp_dir = base_dir / "mcp" 68 | if run_command(["docker", "build", "-t", "archon-mcp:latest", "."], cwd=mcp_dir) != 0: 69 | print("Error building MCP container") 70 | return 1 71 | 72 | # Build the main Archon container 73 | print("\n=== Building main Archon container ===") 74 | if run_command(["docker", "build", "-t", "archon:latest", "."], cwd=base_dir) != 0: 75 | print("Error building main Archon container") 76 | return 1 77 | 78 | # Check if the container is already running 79 | try: 80 | result = subprocess.run( 81 | ["docker", "ps", "-q", "--filter", "name=archon-container"], 82 | check=True, 83 | capture_output=True, 84 | text=True 85 | ) 86 | if result.stdout.strip(): 87 | print("\n=== Stopping existing Archon container ===") 88 | run_command(["docker", "stop", "archon-container"]) 89 | run_command(["docker", "rm", "archon-container"]) 90 | except subprocess.SubprocessError: 91 | pass 92 | 93 | # Run the Archon container 94 | print("\n=== Starting Archon container ===") 95 | cmd = [ 96 | "docker", "run", "-d", 97 | "--name", "archon-container", 98 | "-p", "8501:8501", 99 | "-p", "8100:8100", 100 | "--add-host", "host.docker.internal:host-gateway" 101 | ] 102 | 103 | # Add environment variables if .env exists 104 | if env_args: 105 | cmd.extend(env_args) 106 | 107 | # Add image name 108 | cmd.append("archon:latest") 109 | 110 | if run_command(cmd) != 0: 111 | print("Error starting Archon container") 112 | return 1 113 | 114 | # Wait a moment for the container to start 115 | time.sleep(2) 116 | 117 | # Print success message 118 | print("\n=== Archon is now running! ===") 119 | print("-> Access the Streamlit UI at: http://localhost:8501") 120 | print("-> MCP container is ready to use - see the MCP tab in the UI.") 121 | print("\nTo stop Archon, run: docker stop archon-container && docker rm archon-container") 122 | 123 | return 0 124 | 125 | if __name__ == "__main__": 126 | exit(main()) 127 | -------------------------------------------------------------------------------- /iterations/v2-agentic-workflow/streamlit_ui.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Literal, TypedDict 3 | from langgraph.types import Command 4 | from openai import AsyncOpenAI 5 | from supabase import Client 6 | import streamlit as st 7 | import logfire 8 | import asyncio 9 | import json 10 | import uuid 11 | import os 12 | 13 | # Import all the message part classes 14 | from pydantic_ai.messages import ( 15 | ModelMessage, 16 | ModelRequest, 17 | ModelResponse, 18 | SystemPromptPart, 19 | UserPromptPart, 20 | TextPart, 21 | ToolCallPart, 22 | ToolReturnPart, 23 | RetryPromptPart, 24 | ModelMessagesTypeAdapter 25 | ) 26 | 27 | from archon_graph import agentic_flow 28 | 29 | # Load environment variables 30 | from dotenv import load_dotenv 31 | load_dotenv() 32 | 33 | openai_client=None 34 | 35 | base_url = os.getenv('BASE_URL', 'https://api.openai.com/v1') 36 | api_key = os.getenv('LLM_API_KEY', 'no-llm-api-key-provided') 37 | is_ollama = "localhost" in base_url.lower() 38 | 39 | if is_ollama: 40 | openai_client = AsyncOpenAI(base_url=base_url,api_key=api_key) 41 | else: 42 | openai_client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY")) 43 | 44 | supabase: Client = Client( 45 | os.getenv("SUPABASE_URL"), 46 | os.getenv("SUPABASE_SERVICE_KEY") 47 | ) 48 | 49 | # Configure logfire to suppress warnings (optional) 50 | logfire.configure(send_to_logfire='never') 51 | 52 | @st.cache_resource 53 | def get_thread_id(): 54 | return str(uuid.uuid4()) 55 | 56 | thread_id = get_thread_id() 57 | 58 | async def run_agent_with_streaming(user_input: str): 59 | """ 60 | Run the agent with streaming text for the user_input prompt, 61 | while maintaining the entire conversation in `st.session_state.messages`. 62 | """ 63 | config = { 64 | "configurable": { 65 | "thread_id": thread_id 66 | } 67 | } 68 | 69 | # First message from user 70 | if len(st.session_state.messages) == 1: 71 | async for msg in agentic_flow.astream( 72 | {"latest_user_message": user_input}, config, stream_mode="custom" 73 | ): 74 | yield msg 75 | # Continue the conversation 76 | else: 77 | async for msg in agentic_flow.astream( 78 | Command(resume=user_input), config, stream_mode="custom" 79 | ): 80 | yield msg 81 | 82 | 83 | async def main(): 84 | st.title("Archon - Agent Builder") 85 | st.write("Describe to me an AI agent you want to build and I'll code it for you with Pydantic AI.") 86 | st.write("Example: Build me an AI agent that can search the web with the Brave API.") 87 | 88 | # Initialize chat history in session state if not present 89 | if "messages" not in st.session_state: 90 | st.session_state.messages = [] 91 | 92 | # Display chat messages from history on app rerun 93 | for message in st.session_state.messages: 94 | message_type = message["type"] 95 | if message_type in ["human", "ai", "system"]: 96 | with st.chat_message(message_type): 97 | st.markdown(message["content"]) 98 | 99 | # Chat input for the user 100 | user_input = st.chat_input("What do you want to build today?") 101 | 102 | if user_input: 103 | # We append a new request to the conversation explicitly 104 | st.session_state.messages.append({"type": "human", "content": user_input}) 105 | 106 | # Display user prompt in the UI 107 | with st.chat_message("user"): 108 | st.markdown(user_input) 109 | 110 | # Display assistant response in chat message container 111 | response_content = "" 112 | with st.chat_message("assistant"): 113 | message_placeholder = st.empty() # Placeholder for updating the message 114 | # Run the async generator to fetch responses 115 | async for chunk in run_agent_with_streaming(user_input): 116 | response_content += chunk 117 | # Update the placeholder with the current response content 118 | message_placeholder.markdown(response_content) 119 | 120 | st.session_state.messages.append({"type": "ai", "content": response_content}) 121 | 122 | 123 | if __name__ == "__main__": 124 | asyncio.run(main()) 125 | -------------------------------------------------------------------------------- /iterations/v3-mcp-support/streamlit_ui.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Literal, TypedDict 3 | from langgraph.types import Command 4 | from openai import AsyncOpenAI 5 | from supabase import Client 6 | import streamlit as st 7 | import logfire 8 | import asyncio 9 | import json 10 | import uuid 11 | import os 12 | import sys 13 | 14 | # Import all the message part classes 15 | from pydantic_ai.messages import ( 16 | ModelMessage, 17 | ModelRequest, 18 | ModelResponse, 19 | SystemPromptPart, 20 | UserPromptPart, 21 | TextPart, 22 | ToolCallPart, 23 | ToolReturnPart, 24 | RetryPromptPart, 25 | ModelMessagesTypeAdapter 26 | ) 27 | 28 | # Add the current directory to Python path 29 | sys.path.append(os.path.dirname(os.path.abspath(__file__))) 30 | from archon.archon_graph import agentic_flow 31 | 32 | # Load environment variables 33 | from dotenv import load_dotenv 34 | load_dotenv() 35 | 36 | 37 | openai_client=None 38 | base_url = os.getenv('BASE_URL', 'https://api.openai.com/v1') 39 | api_key = os.getenv('LLM_API_KEY', 'no-llm-api-key-provided') 40 | is_ollama = "localhost" in base_url.lower() 41 | 42 | if is_ollama: 43 | openai_client = AsyncOpenAI(base_url=base_url,api_key=api_key) 44 | else: 45 | openai_client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY")) 46 | 47 | supabase: Client = Client( 48 | os.getenv("SUPABASE_URL"), 49 | os.getenv("SUPABASE_SERVICE_KEY") 50 | ) 51 | 52 | # Configure logfire to suppress warnings (optional) 53 | logfire.configure(send_to_logfire='never') 54 | 55 | @st.cache_resource 56 | def get_thread_id(): 57 | return str(uuid.uuid4()) 58 | 59 | thread_id = get_thread_id() 60 | 61 | async def run_agent_with_streaming(user_input: str): 62 | """ 63 | Run the agent with streaming text for the user_input prompt, 64 | while maintaining the entire conversation in `st.session_state.messages`. 65 | """ 66 | config = { 67 | "configurable": { 68 | "thread_id": thread_id 69 | } 70 | } 71 | 72 | # First message from user 73 | if len(st.session_state.messages) == 1: 74 | async for msg in agentic_flow.astream( 75 | {"latest_user_message": user_input}, config, stream_mode="custom" 76 | ): 77 | yield msg 78 | # Continue the conversation 79 | else: 80 | async for msg in agentic_flow.astream( 81 | Command(resume=user_input), config, stream_mode="custom" 82 | ): 83 | yield msg 84 | 85 | 86 | async def main(): 87 | st.title("Archon - Agent Builder") 88 | st.write("Describe to me an AI agent you want to build and I'll code it for you with Pydantic AI.") 89 | st.write("Example: Build me an AI agent that can search the web with the Brave API.") 90 | 91 | # Initialize chat history in session state if not present 92 | if "messages" not in st.session_state: 93 | st.session_state.messages = [] 94 | 95 | # Display chat messages from history on app rerun 96 | for message in st.session_state.messages: 97 | message_type = message["type"] 98 | if message_type in ["human", "ai", "system"]: 99 | with st.chat_message(message_type): 100 | st.markdown(message["content"]) 101 | 102 | # Chat input for the user 103 | user_input = st.chat_input("What do you want to build today?") 104 | 105 | if user_input: 106 | # We append a new request to the conversation explicitly 107 | st.session_state.messages.append({"type": "human", "content": user_input}) 108 | 109 | # Display user prompt in the UI 110 | with st.chat_message("user"): 111 | st.markdown(user_input) 112 | 113 | # Display assistant response in chat message container 114 | response_content = "" 115 | with st.chat_message("assistant"): 116 | message_placeholder = st.empty() # Placeholder for updating the message 117 | # Run the async generator to fetch responses 118 | async for chunk in run_agent_with_streaming(user_input): 119 | response_content += chunk 120 | # Update the placeholder with the current response content 121 | message_placeholder.markdown(response_content) 122 | 123 | st.session_state.messages.append({"type": "ai", "content": response_content}) 124 | 125 | 126 | if __name__ == "__main__": 127 | asyncio.run(main()) 128 | -------------------------------------------------------------------------------- /mcp/mcp_server.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import FastMCP 2 | from datetime import datetime 3 | from dotenv import load_dotenv 4 | from typing import Dict, List 5 | import threading 6 | import requests 7 | import asyncio 8 | import uuid 9 | import sys 10 | import os 11 | 12 | # Load environment variables from .env file 13 | load_dotenv() 14 | 15 | # Initialize FastMCP server with ERROR logging level 16 | mcp = FastMCP("archon", log_level="ERROR") 17 | 18 | # Store active threads 19 | active_threads: Dict[str, List[str]] = {} 20 | 21 | # FastAPI service URL 22 | GRAPH_SERVICE_URL = os.getenv("GRAPH_SERVICE_URL", "http://localhost:8100") 23 | 24 | def write_to_log(message: str): 25 | """Write a message to the logs.txt file in the workbench directory. 26 | 27 | Args: 28 | message: The message to log 29 | """ 30 | # Get the directory one level up from the current file 31 | current_dir = os.path.dirname(os.path.abspath(__file__)) 32 | parent_dir = os.path.dirname(current_dir) 33 | workbench_dir = os.path.join(parent_dir, "workbench") 34 | log_path = os.path.join(workbench_dir, "logs.txt") 35 | os.makedirs(workbench_dir, exist_ok=True) 36 | 37 | timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 38 | log_entry = f"[{timestamp}] {message}\n" 39 | 40 | with open(log_path, "a", encoding="utf-8") as f: 41 | f.write(log_entry) 42 | 43 | @mcp.tool() 44 | async def create_thread() -> str: 45 | """Create a new conversation thread for Archon. 46 | Always call this tool before invoking Archon for the first time in a conversation. 47 | (if you don't already have a thread ID) 48 | 49 | Returns: 50 | str: A unique thread ID for the conversation 51 | """ 52 | thread_id = str(uuid.uuid4()) 53 | active_threads[thread_id] = [] 54 | write_to_log(f"Created new thread: {thread_id}") 55 | return thread_id 56 | 57 | 58 | def _make_request(thread_id: str, user_input: str, config: dict) -> str: 59 | """Make synchronous request to graph service""" 60 | try: 61 | response = requests.post( 62 | f"{GRAPH_SERVICE_URL}/invoke", 63 | json={ 64 | "message": user_input, 65 | "thread_id": thread_id, 66 | "is_first_message": not active_threads[thread_id], 67 | "config": config 68 | }, 69 | timeout=300 # 5 minute timeout for long-running operations 70 | ) 71 | response.raise_for_status() 72 | return response.json() 73 | except requests.exceptions.Timeout: 74 | write_to_log(f"Request timed out for thread {thread_id}") 75 | raise TimeoutError("Request to graph service timed out. The operation took longer than expected.") 76 | except requests.exceptions.RequestException as e: 77 | write_to_log(f"Request failed for thread {thread_id}: {str(e)}") 78 | raise 79 | 80 | 81 | @mcp.tool() 82 | async def run_agent(thread_id: str, user_input: str) -> str: 83 | """Run the Archon agent with user input. 84 | Only use this tool after you have called create_thread in this conversation to get a unique thread ID. 85 | If you already created a thread ID in this conversation, do not create another one. Reuse the same ID. 86 | After you receive the code from Archon, always implement it into the codebase unless asked not to. 87 | 88 | After using this tool and implementing the code it gave back, ask the user if they want you to refine the agent 89 | autonomously (they can just say 'refine') or they can just give feedback and you'll improve the agent that way. 90 | 91 | If they want to refine the agent, just give 'refine' for user_input. 92 | 93 | Args: 94 | thread_id: The conversation thread ID 95 | user_input: The user's message to process 96 | 97 | Returns: 98 | str: The agent's response which generally includes the code for the agent 99 | """ 100 | if thread_id not in active_threads: 101 | write_to_log(f"Error: Thread not found - {thread_id}") 102 | raise ValueError("Thread not found") 103 | 104 | write_to_log(f"Processing message for thread {thread_id}: {user_input}") 105 | 106 | config = { 107 | "configurable": { 108 | "thread_id": thread_id 109 | } 110 | } 111 | 112 | try: 113 | result = await asyncio.to_thread(_make_request, thread_id, user_input, config) 114 | active_threads[thread_id].append(user_input) 115 | return result['response'] 116 | 117 | except Exception as e: 118 | raise 119 | 120 | 121 | if __name__ == "__main__": 122 | write_to_log("Starting MCP server") 123 | 124 | # Run MCP server 125 | mcp.run(transport='stdio') 126 | 127 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/mcp/mcp_server.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import FastMCP 2 | from datetime import datetime 3 | from dotenv import load_dotenv 4 | from typing import Dict, List 5 | import threading 6 | import requests 7 | import asyncio 8 | import uuid 9 | import sys 10 | import os 11 | 12 | # Load environment variables from .env file 13 | load_dotenv() 14 | 15 | # Initialize FastMCP server with ERROR logging level 16 | mcp = FastMCP("archon", log_level="ERROR") 17 | 18 | # Store active threads 19 | active_threads: Dict[str, List[str]] = {} 20 | 21 | # FastAPI service URL 22 | GRAPH_SERVICE_URL = os.getenv("GRAPH_SERVICE_URL", "http://localhost:8100") 23 | 24 | def write_to_log(message: str): 25 | """Write a message to the logs.txt file in the workbench directory. 26 | 27 | Args: 28 | message: The message to log 29 | """ 30 | # Get the directory one level up from the current file 31 | current_dir = os.path.dirname(os.path.abspath(__file__)) 32 | parent_dir = os.path.dirname(current_dir) 33 | workbench_dir = os.path.join(parent_dir, "workbench") 34 | log_path = os.path.join(workbench_dir, "logs.txt") 35 | os.makedirs(workbench_dir, exist_ok=True) 36 | 37 | timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 38 | log_entry = f"[{timestamp}] {message}\n" 39 | 40 | with open(log_path, "a", encoding="utf-8") as f: 41 | f.write(log_entry) 42 | 43 | @mcp.tool() 44 | async def create_thread() -> str: 45 | """Create a new conversation thread for Archon. 46 | Always call this tool before invoking Archon for the first time in a conversation. 47 | (if you don't already have a thread ID) 48 | 49 | Returns: 50 | str: A unique thread ID for the conversation 51 | """ 52 | thread_id = str(uuid.uuid4()) 53 | active_threads[thread_id] = [] 54 | write_to_log(f"Created new thread: {thread_id}") 55 | return thread_id 56 | 57 | 58 | def _make_request(thread_id: str, user_input: str, config: dict) -> str: 59 | """Make synchronous request to graph service""" 60 | try: 61 | response = requests.post( 62 | f"{GRAPH_SERVICE_URL}/invoke", 63 | json={ 64 | "message": user_input, 65 | "thread_id": thread_id, 66 | "is_first_message": not active_threads[thread_id], 67 | "config": config 68 | }, 69 | timeout=300 # 5 minute timeout for long-running operations 70 | ) 71 | response.raise_for_status() 72 | return response.json() 73 | except requests.exceptions.Timeout: 74 | write_to_log(f"Request timed out for thread {thread_id}") 75 | raise TimeoutError("Request to graph service timed out. The operation took longer than expected.") 76 | except requests.exceptions.RequestException as e: 77 | write_to_log(f"Request failed for thread {thread_id}: {str(e)}") 78 | raise 79 | 80 | 81 | @mcp.tool() 82 | async def run_agent(thread_id: str, user_input: str) -> str: 83 | """Run the Archon agent with user input. 84 | Only use this tool after you have called create_thread in this conversation to get a unique thread ID. 85 | If you already created a thread ID in this conversation, do not create another one. Reuse the same ID. 86 | After you receive the code from Archon, always implement it into the codebase unless asked not to. 87 | 88 | After using this tool and implementing the code it gave back, ask the user if they want you to refine the agent 89 | autonomously (they can just say 'refine') or they can just give feedback and you'll improve the agent that way. 90 | 91 | If they want to refine the agent, just give 'refine' for user_input. 92 | 93 | Args: 94 | thread_id: The conversation thread ID 95 | user_input: The user's message to process 96 | 97 | Returns: 98 | str: The agent's response which generally includes the code for the agent 99 | """ 100 | if thread_id not in active_threads: 101 | write_to_log(f"Error: Thread not found - {thread_id}") 102 | raise ValueError("Thread not found") 103 | 104 | write_to_log(f"Processing message for thread {thread_id}: {user_input}") 105 | 106 | config = { 107 | "configurable": { 108 | "thread_id": thread_id 109 | } 110 | } 111 | 112 | try: 113 | result = await asyncio.to_thread(_make_request, thread_id, user_input, config) 114 | active_threads[thread_id].append(user_input) 115 | return result['response'] 116 | 117 | except Exception as e: 118 | raise 119 | 120 | 121 | if __name__ == "__main__": 122 | write_to_log("Starting MCP server") 123 | 124 | # Run MCP server 125 | mcp.run(transport='stdio') 126 | 127 | -------------------------------------------------------------------------------- /iterations/v2-agentic-workflow/README.md: -------------------------------------------------------------------------------- 1 | # Archon V2 - Agentic Workflow for Building Pydantic AI Agents 2 | 3 | This is the second iteration of the Archon project, building upon V1 by introducing LangGraph for a full agentic workflow. The system starts with a reasoning LLM (like O3-mini or R1) that analyzes user requirements and documentation to create a detailed scope, which then guides specialized coding and routing agents in generating high-quality Pydantic AI agents. 4 | 5 | An intelligent documentation crawler and RAG (Retrieval-Augmented Generation) system built using Pydantic AI, LangGraph, and Supabase that is capable of building other Pydantic AI agents. The system crawls the Pydantic AI documentation, stores content in a vector database, and provides Pydantic AI agent code by retrieving and analyzing relevant documentation chunks. 6 | 7 | This version also supports local LLMs with Ollama for the main agent and reasoning LLM. 8 | 9 | Note that we are still relying on OpenAI for embeddings no matter what, but future versions of Archon will change that. 10 | 11 | ## Features 12 | 13 | - Multi-agent workflow using LangGraph 14 | - Specialized agents for reasoning, routing, and coding 15 | - Pydantic AI documentation crawling and chunking 16 | - Vector database storage with Supabase 17 | - Semantic search using OpenAI embeddings 18 | - RAG-based question answering 19 | - Support for code block preservation 20 | - Streamlit UI for interactive querying 21 | 22 | ## Prerequisites 23 | 24 | - Python 3.11+ 25 | - Supabase account and database 26 | - OpenAI/OpenRouter API key or Ollama for local LLMs 27 | - Streamlit (for web interface) 28 | 29 | ## Installation 30 | 31 | 1. Clone the repository: 32 | ```bash 33 | git clone https://github.com/coleam00/archon.git 34 | cd archon/iterations/v2-agentic-workflow 35 | ``` 36 | 37 | 2. Install dependencies (recommended to use a Python virtual environment): 38 | ```bash 39 | python -m venv venv 40 | source venv/bin/activate # On Windows: venv\Scripts\activate 41 | pip install -r requirements.txt 42 | ``` 43 | 44 | 3. Set up environment variables: 45 | - Rename `.env.example` to `.env` 46 | - Edit `.env` with your API keys and preferences: 47 | ```env 48 | BASE_URL=https://api.openai.com/v1 for OpenAI, https://api.openrouter.ai/v1 for OpenRouter, or your Ollama URL 49 | LLM_API_KEY=your_openai_or_openrouter_api_key 50 | OPENAI_API_KEY=your_openai_api_key 51 | SUPABASE_URL=your_supabase_url 52 | SUPABASE_SERVICE_KEY=your_supabase_service_key 53 | PRIMARY_MODEL=gpt-4o-mini # or your preferred OpenAI model for main agent 54 | REASONER_MODEL=o3-mini # or your preferred OpenAI model for reasoning 55 | ``` 56 | 57 | ## Usage 58 | 59 | ### Database Setup 60 | 61 | Execute the SQL commands in `site_pages.sql` to: 62 | 1. Create the necessary tables 63 | 2. Enable vector similarity search 64 | 3. Set up Row Level Security policies 65 | 66 | In Supabase, do this by going to the "SQL Editor" tab and pasting in the SQL into the editor there. Then click "Run". 67 | 68 | ### Crawl Documentation 69 | 70 | To crawl and store documentation in the vector database: 71 | 72 | ```bash 73 | python crawl_pydantic_ai_docs.py 74 | ``` 75 | 76 | This will: 77 | 1. Fetch URLs from the documentation sitemap 78 | 2. Crawl each page and split into chunks 79 | 3. Generate embeddings and store in Supabase 80 | 81 | ### Chunking Configuration 82 | 83 | You can configure chunking parameters in `crawl_pydantic_ai_docs.py`: 84 | ```python 85 | chunk_size = 5000 # Characters per chunk 86 | ``` 87 | 88 | The chunker intelligently preserves: 89 | - Code blocks 90 | - Paragraph boundaries 91 | - Sentence boundaries 92 | 93 | ### Streamlit Web Interface 94 | 95 | For an interactive web interface to query the documentation and create agents: 96 | 97 | ```bash 98 | streamlit run streamlit_ui.py 99 | ``` 100 | 101 | The interface will be available at `http://localhost:8501` 102 | 103 | ## Configuration 104 | 105 | ### Database Schema 106 | 107 | The Supabase database uses the following schema: 108 | ```sql 109 | CREATE TABLE site_pages ( 110 | id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), 111 | url TEXT, 112 | chunk_number INTEGER, 113 | title TEXT, 114 | summary TEXT, 115 | content TEXT, 116 | metadata JSONB, 117 | embedding VECTOR(1536) 118 | ); 119 | ``` 120 | 121 | ## Project Structure 122 | 123 | - `archon_graph.py`: LangGraph workflow definition and agent coordination 124 | - `pydantic_ai_coder.py`: Main coding agent with RAG capabilities 125 | - `crawl_pydantic_ai_docs.py`: Documentation crawler and processor 126 | - `streamlit_ui.py`: Web interface with streaming support 127 | - `site_pages.sql`: Database setup commands 128 | - `requirements.txt`: Project dependencies 129 | 130 | ## Contributing 131 | 132 | Contributions are welcome! Please feel free to submit a Pull Request. 133 | -------------------------------------------------------------------------------- /archon/agent_tools.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, List, Optional 2 | from openai import AsyncOpenAI 3 | from supabase import Client 4 | import sys 5 | import os 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | from utils.utils import get_env_var 9 | 10 | embedding_model = get_env_var('EMBEDDING_MODEL') or 'text-embedding-3-small' 11 | 12 | async def get_embedding(text: str, embedding_client: AsyncOpenAI) -> List[float]: 13 | """Get embedding vector from OpenAI.""" 14 | try: 15 | response = await embedding_client.embeddings.create( 16 | model=embedding_model, 17 | input=text 18 | ) 19 | return response.data[0].embedding 20 | except Exception as e: 21 | print(f"Error getting embedding: {e}") 22 | return [0] * 1536 # Return zero vector on error 23 | 24 | async def retrieve_relevant_documentation_tool(supabase: Client, embedding_client: AsyncOpenAI, user_query: str) -> str: 25 | try: 26 | # Get the embedding for the query 27 | query_embedding = await get_embedding(user_query, embedding_client) 28 | 29 | # Query Supabase for relevant documents 30 | result = supabase.rpc( 31 | 'match_site_pages', 32 | { 33 | 'query_embedding': query_embedding, 34 | 'match_count': 4, 35 | 'filter': {'source': 'pydantic_ai_docs'} 36 | } 37 | ).execute() 38 | 39 | if not result.data: 40 | return "No relevant documentation found." 41 | 42 | # Format the results 43 | formatted_chunks = [] 44 | for doc in result.data: 45 | chunk_text = f""" 46 | # {doc['title']} 47 | 48 | {doc['content']} 49 | """ 50 | formatted_chunks.append(chunk_text) 51 | 52 | # Join all chunks with a separator 53 | return "\n\n---\n\n".join(formatted_chunks) 54 | 55 | except Exception as e: 56 | print(f"Error retrieving documentation: {e}") 57 | return f"Error retrieving documentation: {str(e)}" 58 | 59 | async def list_documentation_pages_tool(supabase: Client) -> List[str]: 60 | """ 61 | Function to retrieve a list of all available Pydantic AI documentation pages. 62 | This is called by the list_documentation_pages tool and also externally 63 | to fetch documentation pages for the reasoner LLM. 64 | 65 | Returns: 66 | List[str]: List of unique URLs for all documentation pages 67 | """ 68 | try: 69 | # Query Supabase for unique URLs where source is pydantic_ai_docs 70 | result = supabase.from_('site_pages') \ 71 | .select('url') \ 72 | .eq('metadata->>source', 'pydantic_ai_docs') \ 73 | .execute() 74 | 75 | if not result.data: 76 | return [] 77 | 78 | # Extract unique URLs 79 | urls = sorted(set(doc['url'] for doc in result.data)) 80 | return urls 81 | 82 | except Exception as e: 83 | print(f"Error retrieving documentation pages: {e}") 84 | return [] 85 | 86 | async def get_page_content_tool(supabase: Client, url: str) -> str: 87 | """ 88 | Retrieve the full content of a specific documentation page by combining all its chunks. 89 | 90 | Args: 91 | ctx: The context including the Supabase client 92 | url: The URL of the page to retrieve 93 | 94 | Returns: 95 | str: The complete page content with all chunks combined in order 96 | """ 97 | try: 98 | # Query Supabase for all chunks of this URL, ordered by chunk_number 99 | result = supabase.from_('site_pages') \ 100 | .select('title, content, chunk_number') \ 101 | .eq('url', url) \ 102 | .eq('metadata->>source', 'pydantic_ai_docs') \ 103 | .order('chunk_number') \ 104 | .execute() 105 | 106 | if not result.data: 107 | return f"No content found for URL: {url}" 108 | 109 | # Format the page with its title and all chunks 110 | page_title = result.data[0]['title'].split(' - ')[0] # Get the main title 111 | formatted_content = [f"# {page_title}\n"] 112 | 113 | # Add each chunk's content 114 | for chunk in result.data: 115 | formatted_content.append(chunk['content']) 116 | 117 | # Join everything together but limit the characters in case the page is massive (there are a coule big ones) 118 | # This will be improved later so if the page is too big RAG will be performed on the page itself 119 | return "\n\n".join(formatted_content)[:20000] 120 | 121 | except Exception as e: 122 | print(f"Error retrieving page content: {e}") 123 | return f"Error retrieving page content: {str(e)}" 124 | -------------------------------------------------------------------------------- /streamlit_ui.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from dotenv import load_dotenv 3 | import streamlit as st 4 | import logfire 5 | import asyncio 6 | 7 | # Set page config - must be the first Streamlit command 8 | st.set_page_config( 9 | page_title="Archon - Agent Builder", 10 | page_icon="🤖", 11 | layout="wide", 12 | ) 13 | 14 | # Utilities and styles 15 | from utils.utils import get_clients 16 | from streamlit_pages.styles import load_css 17 | 18 | # Streamlit pages 19 | from streamlit_pages.intro import intro_tab 20 | from streamlit_pages.chat import chat_tab 21 | from streamlit_pages.environment import environment_tab 22 | from streamlit_pages.database import database_tab 23 | from streamlit_pages.documentation import documentation_tab 24 | from streamlit_pages.agent_service import agent_service_tab 25 | from streamlit_pages.mcp import mcp_tab 26 | from streamlit_pages.future_enhancements import future_enhancements_tab 27 | 28 | # Load environment variables from .env file 29 | load_dotenv() 30 | 31 | # Initialize clients 32 | openai_client, supabase = get_clients() 33 | 34 | # Load custom CSS styles 35 | load_css() 36 | 37 | # Configure logfire to suppress warnings (optional) 38 | logfire.configure(send_to_logfire='never') 39 | 40 | async def main(): 41 | # Check for tab query parameter 42 | query_params = st.query_params 43 | if "tab" in query_params: 44 | tab_name = query_params["tab"] 45 | if tab_name in ["Intro", "Chat", "Environment", "Database", "Documentation", "Agent Service", "MCP", "Future Enhancements"]: 46 | st.session_state.selected_tab = tab_name 47 | 48 | # Add sidebar navigation 49 | with st.sidebar: 50 | st.image("public/ArchonLightGrey.png", width=1000) 51 | 52 | # Navigation options with vertical buttons 53 | st.write("### Navigation") 54 | 55 | # Initialize session state for selected tab if not present 56 | if "selected_tab" not in st.session_state: 57 | st.session_state.selected_tab = "Intro" 58 | 59 | # Vertical navigation buttons 60 | intro_button = st.button("Intro", use_container_width=True, key="intro_button") 61 | chat_button = st.button("Chat", use_container_width=True, key="chat_button") 62 | env_button = st.button("Environment", use_container_width=True, key="env_button") 63 | db_button = st.button("Database", use_container_width=True, key="db_button") 64 | docs_button = st.button("Documentation", use_container_width=True, key="docs_button") 65 | service_button = st.button("Agent Service", use_container_width=True, key="service_button") 66 | mcp_button = st.button("MCP", use_container_width=True, key="mcp_button") 67 | future_enhancements_button = st.button("Future Enhancements", use_container_width=True, key="future_enhancements_button") 68 | 69 | # Update selected tab based on button clicks 70 | if intro_button: 71 | st.session_state.selected_tab = "Intro" 72 | elif chat_button: 73 | st.session_state.selected_tab = "Chat" 74 | elif mcp_button: 75 | st.session_state.selected_tab = "MCP" 76 | elif env_button: 77 | st.session_state.selected_tab = "Environment" 78 | elif service_button: 79 | st.session_state.selected_tab = "Agent Service" 80 | elif db_button: 81 | st.session_state.selected_tab = "Database" 82 | elif docs_button: 83 | st.session_state.selected_tab = "Documentation" 84 | elif future_enhancements_button: 85 | st.session_state.selected_tab = "Future Enhancements" 86 | 87 | # Display the selected tab 88 | if st.session_state.selected_tab == "Intro": 89 | st.title("Archon - Introduction") 90 | intro_tab() 91 | elif st.session_state.selected_tab == "Chat": 92 | st.title("Archon - Agent Builder") 93 | await chat_tab() 94 | elif st.session_state.selected_tab == "MCP": 95 | st.title("Archon - MCP Configuration") 96 | mcp_tab() 97 | elif st.session_state.selected_tab == "Environment": 98 | st.title("Archon - Environment Configuration") 99 | environment_tab() 100 | elif st.session_state.selected_tab == "Agent Service": 101 | st.title("Archon - Agent Service") 102 | agent_service_tab() 103 | elif st.session_state.selected_tab == "Database": 104 | st.title("Archon - Database Configuration") 105 | database_tab(supabase) 106 | elif st.session_state.selected_tab == "Documentation": 107 | st.title("Archon - Documentation") 108 | documentation_tab(supabase) 109 | elif st.session_state.selected_tab == "Future Enhancements": 110 | st.title("Archon - Future Enhancements") 111 | future_enhancements_tab() 112 | 113 | if __name__ == "__main__": 114 | asyncio.run(main()) 115 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/archon/agent_tools.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, List, Optional 2 | from openai import AsyncOpenAI 3 | from supabase import Client 4 | import sys 5 | import os 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | from utils.utils import get_env_var 9 | 10 | embedding_model = get_env_var('EMBEDDING_MODEL') or 'text-embedding-3-small' 11 | 12 | async def get_embedding(text: str, embedding_client: AsyncOpenAI) -> List[float]: 13 | """Get embedding vector from OpenAI.""" 14 | try: 15 | response = await embedding_client.embeddings.create( 16 | model=embedding_model, 17 | input=text 18 | ) 19 | return response.data[0].embedding 20 | except Exception as e: 21 | print(f"Error getting embedding: {e}") 22 | return [0] * 1536 # Return zero vector on error 23 | 24 | async def retrieve_relevant_documentation_tool(supabase: Client, embedding_client: AsyncOpenAI, user_query: str) -> str: 25 | try: 26 | # Get the embedding for the query 27 | query_embedding = await get_embedding(user_query, embedding_client) 28 | 29 | # Query Supabase for relevant documents 30 | result = supabase.rpc( 31 | 'match_site_pages', 32 | { 33 | 'query_embedding': query_embedding, 34 | 'match_count': 4, 35 | 'filter': {'source': 'pydantic_ai_docs'} 36 | } 37 | ).execute() 38 | 39 | if not result.data: 40 | return "No relevant documentation found." 41 | 42 | # Format the results 43 | formatted_chunks = [] 44 | for doc in result.data: 45 | chunk_text = f""" 46 | # {doc['title']} 47 | 48 | {doc['content']} 49 | """ 50 | formatted_chunks.append(chunk_text) 51 | 52 | # Join all chunks with a separator 53 | return "\n\n---\n\n".join(formatted_chunks) 54 | 55 | except Exception as e: 56 | print(f"Error retrieving documentation: {e}") 57 | return f"Error retrieving documentation: {str(e)}" 58 | 59 | async def list_documentation_pages_tool(supabase: Client) -> List[str]: 60 | """ 61 | Function to retrieve a list of all available Pydantic AI documentation pages. 62 | This is called by the list_documentation_pages tool and also externally 63 | to fetch documentation pages for the reasoner LLM. 64 | 65 | Returns: 66 | List[str]: List of unique URLs for all documentation pages 67 | """ 68 | try: 69 | # Query Supabase for unique URLs where source is pydantic_ai_docs 70 | result = supabase.from_('site_pages') \ 71 | .select('url') \ 72 | .eq('metadata->>source', 'pydantic_ai_docs') \ 73 | .execute() 74 | 75 | if not result.data: 76 | return [] 77 | 78 | # Extract unique URLs 79 | urls = sorted(set(doc['url'] for doc in result.data)) 80 | return urls 81 | 82 | except Exception as e: 83 | print(f"Error retrieving documentation pages: {e}") 84 | return [] 85 | 86 | async def get_page_content_tool(supabase: Client, url: str) -> str: 87 | """ 88 | Retrieve the full content of a specific documentation page by combining all its chunks. 89 | 90 | Args: 91 | ctx: The context including the Supabase client 92 | url: The URL of the page to retrieve 93 | 94 | Returns: 95 | str: The complete page content with all chunks combined in order 96 | """ 97 | try: 98 | # Query Supabase for all chunks of this URL, ordered by chunk_number 99 | result = supabase.from_('site_pages') \ 100 | .select('title, content, chunk_number') \ 101 | .eq('url', url) \ 102 | .eq('metadata->>source', 'pydantic_ai_docs') \ 103 | .order('chunk_number') \ 104 | .execute() 105 | 106 | if not result.data: 107 | return f"No content found for URL: {url}" 108 | 109 | # Format the page with its title and all chunks 110 | page_title = result.data[0]['title'].split(' - ')[0] # Get the main title 111 | formatted_content = [f"# {page_title}\n"] 112 | 113 | # Add each chunk's content 114 | for chunk in result.data: 115 | formatted_content.append(chunk['content']) 116 | 117 | # Join everything together but limit the characters in case the page is massive (there are a coule big ones) 118 | # This will be improved later so if the page is too big RAG will be performed on the page itself 119 | return "\n\n".join(formatted_content)[:20000] 120 | 121 | except Exception as e: 122 | print(f"Error retrieving page content: {e}") 123 | return f"Error retrieving page content: {str(e)}" 124 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/streamlit_ui.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from dotenv import load_dotenv 3 | import streamlit as st 4 | import logfire 5 | import asyncio 6 | 7 | # Set page config - must be the first Streamlit command 8 | st.set_page_config( 9 | page_title="Archon - Agent Builder", 10 | page_icon="🤖", 11 | layout="wide", 12 | ) 13 | 14 | # Utilities and styles 15 | from utils.utils import get_clients 16 | from streamlit_pages.styles import load_css 17 | 18 | # Streamlit pages 19 | from streamlit_pages.intro import intro_tab 20 | from streamlit_pages.chat import chat_tab 21 | from streamlit_pages.environment import environment_tab 22 | from streamlit_pages.database import database_tab 23 | from streamlit_pages.documentation import documentation_tab 24 | from streamlit_pages.agent_service import agent_service_tab 25 | from streamlit_pages.mcp import mcp_tab 26 | from streamlit_pages.future_enhancements import future_enhancements_tab 27 | 28 | # Load environment variables from .env file 29 | load_dotenv() 30 | 31 | # Initialize clients 32 | openai_client, supabase = get_clients() 33 | 34 | # Load custom CSS styles 35 | load_css() 36 | 37 | # Configure logfire to suppress warnings (optional) 38 | logfire.configure(send_to_logfire='never') 39 | 40 | async def main(): 41 | # Check for tab query parameter 42 | query_params = st.query_params 43 | if "tab" in query_params: 44 | tab_name = query_params["tab"] 45 | if tab_name in ["Intro", "Chat", "Environment", "Database", "Documentation", "Agent Service", "MCP", "Future Enhancements"]: 46 | st.session_state.selected_tab = tab_name 47 | 48 | # Add sidebar navigation 49 | with st.sidebar: 50 | st.image("public/ArchonLightGrey.png", width=1000) 51 | 52 | # Navigation options with vertical buttons 53 | st.write("### Navigation") 54 | 55 | # Initialize session state for selected tab if not present 56 | if "selected_tab" not in st.session_state: 57 | st.session_state.selected_tab = "Intro" 58 | 59 | # Vertical navigation buttons 60 | intro_button = st.button("Intro", use_container_width=True, key="intro_button") 61 | chat_button = st.button("Chat", use_container_width=True, key="chat_button") 62 | env_button = st.button("Environment", use_container_width=True, key="env_button") 63 | db_button = st.button("Database", use_container_width=True, key="db_button") 64 | docs_button = st.button("Documentation", use_container_width=True, key="docs_button") 65 | service_button = st.button("Agent Service", use_container_width=True, key="service_button") 66 | mcp_button = st.button("MCP", use_container_width=True, key="mcp_button") 67 | future_enhancements_button = st.button("Future Enhancements", use_container_width=True, key="future_enhancements_button") 68 | 69 | # Update selected tab based on button clicks 70 | if intro_button: 71 | st.session_state.selected_tab = "Intro" 72 | elif chat_button: 73 | st.session_state.selected_tab = "Chat" 74 | elif mcp_button: 75 | st.session_state.selected_tab = "MCP" 76 | elif env_button: 77 | st.session_state.selected_tab = "Environment" 78 | elif service_button: 79 | st.session_state.selected_tab = "Agent Service" 80 | elif db_button: 81 | st.session_state.selected_tab = "Database" 82 | elif docs_button: 83 | st.session_state.selected_tab = "Documentation" 84 | elif future_enhancements_button: 85 | st.session_state.selected_tab = "Future Enhancements" 86 | 87 | # Display the selected tab 88 | if st.session_state.selected_tab == "Intro": 89 | st.title("Archon - Introduction") 90 | intro_tab() 91 | elif st.session_state.selected_tab == "Chat": 92 | st.title("Archon - Agent Builder") 93 | await chat_tab() 94 | elif st.session_state.selected_tab == "MCP": 95 | st.title("Archon - MCP Configuration") 96 | mcp_tab() 97 | elif st.session_state.selected_tab == "Environment": 98 | st.title("Archon - Environment Configuration") 99 | environment_tab() 100 | elif st.session_state.selected_tab == "Agent Service": 101 | st.title("Archon - Agent Service") 102 | agent_service_tab() 103 | elif st.session_state.selected_tab == "Database": 104 | st.title("Archon - Database Configuration") 105 | database_tab(supabase) 106 | elif st.session_state.selected_tab == "Documentation": 107 | st.title("Archon - Documentation") 108 | documentation_tab(supabase) 109 | elif st.session_state.selected_tab == "Future Enhancements": 110 | st.title("Archon - Future Enhancements") 111 | future_enhancements_tab() 112 | 113 | if __name__ == "__main__": 114 | asyncio.run(main()) 115 | -------------------------------------------------------------------------------- /iterations/v1-single-agent/streamlit_ui.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Literal, TypedDict 3 | import asyncio 4 | import os 5 | 6 | import streamlit as st 7 | import json 8 | import logfire 9 | from supabase import Client 10 | from openai import AsyncOpenAI 11 | 12 | # Import all the message part classes 13 | from pydantic_ai.messages import ( 14 | ModelMessage, 15 | ModelRequest, 16 | ModelResponse, 17 | SystemPromptPart, 18 | UserPromptPart, 19 | TextPart, 20 | ToolCallPart, 21 | ToolReturnPart, 22 | RetryPromptPart, 23 | ModelMessagesTypeAdapter 24 | ) 25 | from pydantic_ai_coder import pydantic_ai_coder, PydanticAIDeps 26 | 27 | # Load environment variables 28 | from dotenv import load_dotenv 29 | load_dotenv() 30 | 31 | openai_client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY")) 32 | supabase: Client = Client( 33 | os.getenv("SUPABASE_URL"), 34 | os.getenv("SUPABASE_SERVICE_KEY") 35 | ) 36 | 37 | # Configure logfire to suppress warnings (optional) 38 | logfire.configure(send_to_logfire='never') 39 | 40 | class ChatMessage(TypedDict): 41 | """Format of messages sent to the browser/API.""" 42 | 43 | role: Literal['user', 'model'] 44 | timestamp: str 45 | content: str 46 | 47 | 48 | def display_message_part(part): 49 | """ 50 | Display a single part of a message in the Streamlit UI. 51 | Customize how you display system prompts, user prompts, 52 | tool calls, tool returns, etc. 53 | """ 54 | # system-prompt 55 | if part.part_kind == 'system-prompt': 56 | with st.chat_message("system"): 57 | st.markdown(f"**System**: {part.content}") 58 | # user-prompt 59 | elif part.part_kind == 'user-prompt': 60 | with st.chat_message("user"): 61 | st.markdown(part.content) 62 | # text 63 | elif part.part_kind == 'text': 64 | with st.chat_message("assistant"): 65 | st.markdown(part.content) 66 | 67 | 68 | async def run_agent_with_streaming(user_input: str): 69 | """ 70 | Run the agent with streaming text for the user_input prompt, 71 | while maintaining the entire conversation in `st.session_state.messages`. 72 | """ 73 | # Prepare dependencies 74 | deps = PydanticAIDeps( 75 | supabase=supabase, 76 | openai_client=openai_client 77 | ) 78 | 79 | # Run the agent in a stream 80 | async with pydantic_ai_coder.run_stream( 81 | user_input, 82 | deps=deps, 83 | message_history= st.session_state.messages[:-1], # pass entire conversation so far 84 | ) as result: 85 | # We'll gather partial text to show incrementally 86 | partial_text = "" 87 | message_placeholder = st.empty() 88 | 89 | # Render partial text as it arrives 90 | async for chunk in result.stream_text(delta=True): 91 | partial_text += chunk 92 | message_placeholder.markdown(partial_text) 93 | 94 | # Now that the stream is finished, we have a final result. 95 | # Add new messages from this run, excluding user-prompt messages 96 | filtered_messages = [msg for msg in result.new_messages() 97 | if not (hasattr(msg, 'parts') and 98 | any(part.part_kind == 'user-prompt' for part in msg.parts))] 99 | st.session_state.messages.extend(filtered_messages) 100 | 101 | # Add the final response to the messages 102 | st.session_state.messages.append( 103 | ModelResponse(parts=[TextPart(content=partial_text)]) 104 | ) 105 | 106 | 107 | async def main(): 108 | st.title("Archon - Agent Builder") 109 | st.write("Describe to me an AI agent you want to build and I'll code it for you with Pydantic AI.") 110 | 111 | # Initialize chat history in session state if not present 112 | if "messages" not in st.session_state: 113 | st.session_state.messages = [] 114 | 115 | # Display all messages from the conversation so far 116 | # Each message is either a ModelRequest or ModelResponse. 117 | # We iterate over their parts to decide how to display them. 118 | for msg in st.session_state.messages: 119 | if isinstance(msg, ModelRequest) or isinstance(msg, ModelResponse): 120 | for part in msg.parts: 121 | display_message_part(part) 122 | 123 | # Chat input for the user 124 | user_input = st.chat_input("What do you want to build today?") 125 | 126 | if user_input: 127 | # We append a new request to the conversation explicitly 128 | st.session_state.messages.append( 129 | ModelRequest(parts=[UserPromptPart(content=user_input)]) 130 | ) 131 | 132 | # Display user prompt in the UI 133 | with st.chat_message("user"): 134 | st.markdown(user_input) 135 | 136 | # Display the assistant's partial response while streaming 137 | with st.chat_message("assistant"): 138 | # Actually run the agent now, streaming the text 139 | await run_agent_with_streaming(user_input) 140 | 141 | 142 | if __name__ == "__main__": 143 | asyncio.run(main()) 144 | -------------------------------------------------------------------------------- /run_docker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Simple script to build and run Archon Docker containers. 4 | """ 5 | 6 | import os 7 | import subprocess 8 | import platform 9 | import time 10 | from pathlib import Path 11 | 12 | def run_command(command, cwd=None): 13 | """Run a command and print output in real-time.""" 14 | print(f"Running: {' '.join(command)}") 15 | process = subprocess.Popen( 16 | command, 17 | stdout=subprocess.PIPE, 18 | stderr=subprocess.STDOUT, 19 | text=False, 20 | cwd=cwd 21 | ) 22 | 23 | for line in process.stdout: 24 | try: 25 | decoded_line = line.decode('utf-8', errors='replace') 26 | print(decoded_line.strip()) 27 | except Exception as e: 28 | print(f"Error processing output: {e}") 29 | 30 | process.wait() 31 | return process.returncode 32 | 33 | def check_docker(): 34 | """Check if Docker is installed and running.""" 35 | try: 36 | subprocess.run( 37 | ["docker", "--version"], 38 | check=True, 39 | stdout=subprocess.PIPE, 40 | stderr=subprocess.PIPE 41 | ) 42 | return True 43 | except (subprocess.SubprocessError, FileNotFoundError): 44 | print("Error: Docker is not installed or not in PATH") 45 | return False 46 | 47 | def main(): 48 | """Main function to build and run Archon containers.""" 49 | # Check if Docker is available 50 | if not check_docker(): 51 | return 1 52 | 53 | # Get the base directory 54 | base_dir = Path(__file__).parent.absolute() 55 | 56 | # Check for .env file 57 | env_file = base_dir / ".env" 58 | env_args = [] 59 | if env_file.exists(): 60 | print(f"Using environment file: {env_file}") 61 | env_args = ["--env-file", str(env_file)] 62 | else: 63 | print("No .env file found. Continuing without environment variables.") 64 | 65 | # Build the MCP container 66 | print("\n=== Building Archon MCP container ===") 67 | mcp_dir = base_dir / "mcp" 68 | if run_command(["docker", "build", "-t", "archon-mcp:latest", "."], cwd=mcp_dir) != 0: 69 | print("Error building MCP container") 70 | return 1 71 | 72 | # Build the main Archon container 73 | print("\n=== Building main Archon container ===") 74 | if run_command(["docker", "build", "-t", "archon:latest", "."], cwd=base_dir) != 0: 75 | print("Error building main Archon container") 76 | return 1 77 | 78 | # Check if the container exists (running or stopped) 79 | try: 80 | result = subprocess.run( 81 | ["docker", "ps", "-a", "-q", "--filter", "name=archon-container"], 82 | check=True, 83 | capture_output=True, 84 | text=True 85 | ) 86 | if result.stdout.strip(): 87 | print("\n=== Removing existing Archon container ===") 88 | container_id = result.stdout.strip() 89 | print(f"Found container with ID: {container_id}") 90 | 91 | # Check if the container is running 92 | running_check = subprocess.run( 93 | ["docker", "ps", "-q", "--filter", "id=" + container_id], 94 | check=True, 95 | capture_output=True, 96 | text=True 97 | ) 98 | 99 | # If running, stop it first 100 | if running_check.stdout.strip(): 101 | print("Container is running. Stopping it first...") 102 | stop_result = run_command(["docker", "stop", container_id]) 103 | if stop_result != 0: 104 | print("Warning: Failed to stop container gracefully, will try force removal") 105 | 106 | # Remove the container with force flag to ensure it's removed 107 | print("Removing container...") 108 | rm_result = run_command(["docker", "rm", "-f", container_id]) 109 | if rm_result != 0: 110 | print("Error: Failed to remove container. Please remove it manually with:") 111 | print(f" docker rm -f {container_id}") 112 | return 1 113 | 114 | print("Container successfully removed") 115 | except subprocess.SubprocessError as e: 116 | print(f"Error checking for existing containers: {e}") 117 | pass 118 | 119 | # Run the Archon container 120 | print("\n=== Starting Archon container ===") 121 | cmd = [ 122 | "docker", "run", "-d", 123 | "--name", "archon-container", 124 | "-p", "8501:8501", 125 | "-p", "8100:8100", 126 | "--add-host", "host.docker.internal:host-gateway" 127 | ] 128 | 129 | # Add environment variables if .env exists 130 | if env_args: 131 | cmd.extend(env_args) 132 | 133 | # Add image name 134 | cmd.append("archon:latest") 135 | 136 | if run_command(cmd) != 0: 137 | print("Error starting Archon container") 138 | return 1 139 | 140 | # Wait a moment for the container to start 141 | time.sleep(2) 142 | 143 | # Print success message 144 | print("\n=== Archon is now running! ===") 145 | print("-> Access the Streamlit UI at: http://localhost:8501") 146 | print("-> MCP container is ready to use - see the MCP tab in the UI.") 147 | print("\nTo stop Archon, run: docker stop archon-container && docker rm archon-container") 148 | 149 | return 0 150 | 151 | if __name__ == "__main__": 152 | exit(main()) 153 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/run_docker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Simple script to build and run Archon Docker containers. 4 | """ 5 | 6 | import os 7 | import subprocess 8 | import platform 9 | import time 10 | from pathlib import Path 11 | 12 | def run_command(command, cwd=None): 13 | """Run a command and print output in real-time.""" 14 | print(f"Running: {' '.join(command)}") 15 | process = subprocess.Popen( 16 | command, 17 | stdout=subprocess.PIPE, 18 | stderr=subprocess.STDOUT, 19 | text=False, 20 | cwd=cwd 21 | ) 22 | 23 | for line in process.stdout: 24 | try: 25 | decoded_line = line.decode('utf-8', errors='replace') 26 | print(decoded_line.strip()) 27 | except Exception as e: 28 | print(f"Error processing output: {e}") 29 | 30 | process.wait() 31 | return process.returncode 32 | 33 | def check_docker(): 34 | """Check if Docker is installed and running.""" 35 | try: 36 | subprocess.run( 37 | ["docker", "--version"], 38 | check=True, 39 | stdout=subprocess.PIPE, 40 | stderr=subprocess.PIPE 41 | ) 42 | return True 43 | except (subprocess.SubprocessError, FileNotFoundError): 44 | print("Error: Docker is not installed or not in PATH") 45 | return False 46 | 47 | def main(): 48 | """Main function to build and run Archon containers.""" 49 | # Check if Docker is available 50 | if not check_docker(): 51 | return 1 52 | 53 | # Get the base directory 54 | base_dir = Path(__file__).parent.absolute() 55 | 56 | # Check for .env file 57 | env_file = base_dir / ".env" 58 | env_args = [] 59 | if env_file.exists(): 60 | print(f"Using environment file: {env_file}") 61 | env_args = ["--env-file", str(env_file)] 62 | else: 63 | print("No .env file found. Continuing without environment variables.") 64 | 65 | # Build the MCP container 66 | print("\n=== Building Archon MCP container ===") 67 | mcp_dir = base_dir / "mcp" 68 | if run_command(["docker", "build", "-t", "archon-mcp:latest", "."], cwd=mcp_dir) != 0: 69 | print("Error building MCP container") 70 | return 1 71 | 72 | # Build the main Archon container 73 | print("\n=== Building main Archon container ===") 74 | if run_command(["docker", "build", "-t", "archon:latest", "."], cwd=base_dir) != 0: 75 | print("Error building main Archon container") 76 | return 1 77 | 78 | # Check if the container exists (running or stopped) 79 | try: 80 | result = subprocess.run( 81 | ["docker", "ps", "-a", "-q", "--filter", "name=archon-container"], 82 | check=True, 83 | capture_output=True, 84 | text=True 85 | ) 86 | if result.stdout.strip(): 87 | print("\n=== Removing existing Archon container ===") 88 | container_id = result.stdout.strip() 89 | print(f"Found container with ID: {container_id}") 90 | 91 | # Check if the container is running 92 | running_check = subprocess.run( 93 | ["docker", "ps", "-q", "--filter", "id=" + container_id], 94 | check=True, 95 | capture_output=True, 96 | text=True 97 | ) 98 | 99 | # If running, stop it first 100 | if running_check.stdout.strip(): 101 | print("Container is running. Stopping it first...") 102 | stop_result = run_command(["docker", "stop", container_id]) 103 | if stop_result != 0: 104 | print("Warning: Failed to stop container gracefully, will try force removal") 105 | 106 | # Remove the container with force flag to ensure it's removed 107 | print("Removing container...") 108 | rm_result = run_command(["docker", "rm", "-f", container_id]) 109 | if rm_result != 0: 110 | print("Error: Failed to remove container. Please remove it manually with:") 111 | print(f" docker rm -f {container_id}") 112 | return 1 113 | 114 | print("Container successfully removed") 115 | except subprocess.SubprocessError as e: 116 | print(f"Error checking for existing containers: {e}") 117 | pass 118 | 119 | # Run the Archon container 120 | print("\n=== Starting Archon container ===") 121 | cmd = [ 122 | "docker", "run", "-d", 123 | "--name", "archon-container", 124 | "-p", "8501:8501", 125 | "-p", "8100:8100", 126 | "--add-host", "host.docker.internal:host-gateway" 127 | ] 128 | 129 | # Add environment variables if .env exists 130 | if env_args: 131 | cmd.extend(env_args) 132 | 133 | # Add image name 134 | cmd.append("archon:latest") 135 | 136 | if run_command(cmd) != 0: 137 | print("Error starting Archon container") 138 | return 1 139 | 140 | # Wait a moment for the container to start 141 | time.sleep(2) 142 | 143 | # Print success message 144 | print("\n=== Archon is now running! ===") 145 | print("-> Access the Streamlit UI at: http://localhost:8501") 146 | print("-> MCP container is ready to use - see the MCP tab in the UI.") 147 | print("\nTo stop Archon, run: docker stop archon-container && docker rm archon-container") 148 | 149 | return 0 150 | 151 | if __name__ == "__main__": 152 | exit(main()) 153 | -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/README.md: -------------------------------------------------------------------------------- 1 | # Archon V5 - Multi-Agent Coding Workflow 2 | 3 | This is the fifth iteration of the Archon project, building upon V4 by implementing a multi-agent coding workflow with specialized refiner agents. The system retains the comprehensive Streamlit UI and Docker support from V4, but now adds a sophisticated refinement process with specialized agents for different aspects of agent creation. 4 | 5 | What makes V5 special is its approach to agent refinement. The primary coding agent still creates the initial cohesive agent structure, but now users can trigger specialized refiner agents by simply saying "refine" in the chat. This activates three parallel specialized agents that focus on optimizing different aspects of the agent: 6 | 7 | 1. **Prompt Refiner Agent**: Specializes in optimizing system prompts 8 | 2. **Tools Refiner Agent**: Focuses on implementing and improving agent tools 9 | 3. **Agent Refiner Agent**: Optimizes agent configuration and dependencies 10 | 11 | The core remains an intelligent documentation crawler and RAG (Retrieval-Augmented Generation) system built using Pydantic AI, LangGraph, and Supabase, but now with a more sophisticated multi-agent workflow for refinement. 12 | 13 | ## Key Features 14 | 15 | - **Specialized Refiner Agents**: Three dedicated agents for different aspects of agent creation 16 | - **Parallel Refinement Process**: Agents work simultaneously to improve different components 17 | - **Two-Phase Development**: Initial cohesive structure followed by specialized refinement 18 | - **Simple Refinement Trigger**: Just say "refine" to activate the specialized agents 19 | - **Improved Workflow Orchestration**: Enhanced LangGraph implementation 20 | - **Comprehensive Streamlit UI**: Unified interface for all Archon functionality (from V4) 21 | - **Docker Support**: Containerized deployment with automated build and run scripts (from V4) 22 | - **Multiple LLM Support**: OpenAI, Anthropic, OpenRouter, and local Ollama models 23 | 24 | ## Architecture 25 | 26 | The V5 architecture introduces a more sophisticated agent workflow: 27 | 28 | 1. **Initial Request**: User describes the AI agent they want to create 29 | 2. **Scope Definition**: Reasoner LLM creates a high-level scope for the agent 30 | 3. **Initial Agent Creation**: Primary coding agent creates a cohesive initial agent 31 | 4. **User Interaction**: User can provide feedback or request refinement 32 | 5. **Specialized Refinement**: When "refine" is requested, three specialized agents work in parallel: 33 | - Prompt Refiner Agent optimizes the system prompt 34 | - Tools Refiner Agent improves the agent's tools 35 | - Agent Refiner Agent enhances the agent configuration 36 | 6. **Integrated Improvements**: Primary coding agent incorporates all refinements 37 | 7. **Iterative Process**: Steps 4-6 repeat until the user is satisfied 38 | 8. **Finalization**: Archon provides the complete code with execution instructions 39 | 40 | ### Agent Graph 41 | 42 | The LangGraph workflow orchestrates the entire process: 43 | 44 | ![Archon Graph](../../public/ArchonGraph.png) 45 | 46 | The graph shows how control flows between different agents and how user input can either continue the conversation or trigger the refinement process. 47 | 48 | ## Specialized Agents 49 | 50 | ### Prompt Refiner Agent 51 | - Focuses exclusively on optimizing the system prompt 52 | - Analyzes the conversation history to understand the agent's purpose 53 | - Suggests improvements to make the prompt more effective and precise 54 | - Ensures the prompt aligns with best practices for the specific agent type 55 | 56 | ### Tools Refiner Agent 57 | - Specializes in implementing and improving agent tools 58 | - Has access to Pydantic AI documentation on tool implementation 59 | - Suggests new tools or improvements to existing tools 60 | - Ensures tools follow best practices and are properly typed 61 | 62 | ### Agent Refiner Agent 63 | - Optimizes agent configuration and dependencies 64 | - Ensures proper setup of agent dependencies and parameters 65 | - Improves error handling and retry mechanisms 66 | - Optimizes agent performance and reliability 67 | 68 | ## Prerequisites 69 | - Docker (optional but preferred) 70 | - Python 3.11+ 71 | - Supabase account (for vector database) 72 | - OpenAI/Anthropic/OpenRouter API key or Ollama for local LLMs 73 | 74 | ## Installation 75 | 76 | ### Option 1: Docker (Recommended) 77 | 1. Clone the repository: 78 | ```bash 79 | git clone https://github.com/coleam00/archon.git 80 | cd archon 81 | ``` 82 | 83 | 2. Run the Docker setup script: 84 | ```bash 85 | # This will build both containers and start Archon 86 | python run_docker.py 87 | ``` 88 | 89 | 3. Access the Streamlit UI at http://localhost:8501. 90 | 91 | ### Option 2: Local Python Installation 92 | 1. Clone the repository: 93 | ```bash 94 | git clone https://github.com/coleam00/archon.git 95 | cd archon 96 | ``` 97 | 98 | 2. Install dependencies: 99 | ```bash 100 | python -m venv venv 101 | source venv/bin/activate # On Windows: venv\Scripts\activate 102 | pip install -r requirements.txt 103 | ``` 104 | 105 | 3. Start the Streamlit UI: 106 | ```bash 107 | streamlit run streamlit_ui.py 108 | ``` 109 | 110 | 4. Access the Streamlit UI at http://localhost:8501. 111 | 112 | ## Using the Refinement Feature 113 | 114 | To use the new refinement feature in V5: 115 | 116 | 1. Start a conversation with Archon and describe the agent you want to create 117 | 2. After Archon generates the initial agent, type "refine" or a similar phrase 118 | 3. Archon will activate the specialized refiner agents in parallel 119 | 4. Once refinement is complete, Archon will present the improved agent 120 | 5. You can continue to provide feedback or request additional refinements 121 | 6. When satisfied, ask Archon to finalize the agent 122 | 123 | ## Core Files 124 | 125 | ### Refiner Agents 126 | - `archon/refiner_agents/`: Directory containing specialized refiner agents 127 | - `prompt_refiner_agent.py`: Agent specialized in optimizing system prompts 128 | - `tools_refiner_agent.py`: Agent focused on implementing and improving tools 129 | - `agent_refiner_agent.py`: Agent for optimizing agent configuration and dependencies 130 | 131 | ### Workflow Orchestration 132 | - `archon/archon_graph.py`: Enhanced LangGraph workflow with refinement paths 133 | - `archon/pydantic_ai_coder.py`: Main coding agent with RAG capabilities 134 | 135 | ## Contributing 136 | 137 | Contributions are welcome! Please feel free to submit a Pull Request. 138 | -------------------------------------------------------------------------------- /streamlit_pages/intro.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import sys 3 | import os 4 | 5 | # Add the parent directory to sys.path to allow importing from the parent directory 6 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 7 | from utils.utils import create_new_tab_button 8 | 9 | def intro_tab(): 10 | """Display the introduction and setup guide for Archon""" 11 | # Welcome message 12 | st.markdown(""" 13 | Archon is an AI meta-agent designed to autonomously build, refine, and optimize other AI agents. 14 | 15 | It serves both as a practical tool for developers and as an educational framework demonstrating the evolution of agentic systems. 16 | Archon is developed in iterations, starting with a simple Pydantic AI agent that can build other Pydantic AI agents, 17 | all the way to a full agentic workflow using LangGraph that can build other AI agents with any framework. 18 | 19 | Through its iterative development, Archon showcases the power of planning, feedback loops, and domain-specific knowledge in creating robust AI agents. 20 | """) 21 | 22 | # Environment variables update notice 23 | st.warning(""" 24 | **🔄 IMPORTANT UPDATE (March 20th):** Archon now uses a multi-agent workflow with specialized refiner agents for autonomous prompt, tools, and agent definition improvements. The primary coding agent still creates the initial agent by itself, but then you can say 'refine' or something along those lines as a follow up prompt to kick off the specialized agents in parallel. 25 | """) 26 | 27 | # Setup guide with expandable sections 28 | st.markdown("## Setup Guide") 29 | st.markdown("Follow these concise steps to get Archon up and running (IMPORTANT: come back here after each step):") 30 | 31 | # Step 1: Environment Configuration 32 | with st.expander("Step 1: Environment Configuration", expanded=True): 33 | st.markdown(""" 34 | ### Environment Configuration 35 | 36 | First, you need to set up your environment variables: 37 | 38 | 1. Go to the **Environment** tab 39 | 2. Configure the following essential variables: 40 | - `BASE_URL`: API endpoint (OpenAI, OpenRouter, or Ollama) 41 | - `LLM_API_KEY`: Your API key for the LLM service 42 | - `OPENAI_API_KEY`: Required for embeddings 43 | - `SUPABASE_URL`: Your Supabase project URL 44 | - `SUPABASE_SERVICE_KEY`: Your Supabase service key 45 | - `PRIMARY_MODEL`: Main agent model (e.g., gpt-4o-mini) 46 | - `REASONER_MODEL`: Planning model (e.g., o3-mini) 47 | 48 | These settings determine how Archon connects to external services and which models it uses. 49 | """) 50 | # Add a button to navigate to the Environment tab 51 | create_new_tab_button("Go to Environment Section (New Tab)", "Environment", key="goto_env", use_container_width=True) 52 | 53 | # Step 2: Database Setup 54 | with st.expander("Step 2: Database Setup", expanded=False): 55 | st.markdown(""" 56 | ### Database Setup 57 | 58 | Archon uses Supabase for vector storage and retrieval: 59 | 60 | 1. Go to the **Database** tab 61 | 2. Select your embedding dimensions (1536 for OpenAI, 768 for nomic-embed-text) 62 | 3. Follow the instructions to create the `site_pages` table 63 | 64 | This creates the necessary tables, indexes, and functions for vector similarity search. 65 | """) 66 | # Add a button to navigate to the Database tab 67 | create_new_tab_button("Go to Database Section (New Tab)", "Database", key="goto_db", use_container_width=True) 68 | 69 | # Step 3: Documentation Crawling 70 | with st.expander("Step 3: Documentation Crawling", expanded=False): 71 | st.markdown(""" 72 | ### Documentation Crawling 73 | 74 | Populate the database with framework documentation: 75 | 76 | 1. Go to the **Documentation** tab 77 | 2. Click on "Crawl Pydantic AI Docs" 78 | 3. Wait for the crawling process to complete 79 | 80 | This step downloads and processes documentation, creating embeddings for semantic search. 81 | """) 82 | # Add a button to navigate to the Documentation tab 83 | create_new_tab_button("Go to the Documentation Section (New Tab)", "Documentation", key="goto_docs", use_container_width=True) 84 | 85 | # Step 4: Agent Service 86 | with st.expander("Step 4: Agent Service Setup (for MCP)", expanded=False): 87 | st.markdown(""" 88 | ### MCP Agent Service Setup 89 | 90 | Start the graph service for agent generation: 91 | 92 | 1. Go to the **Agent Service** tab 93 | 2. Click on "Start Agent Service" 94 | 3. Verify the service is running 95 | 96 | The agent service powers the LangGraph workflow for agent creation. 97 | """) 98 | # Add a button to navigate to the Agent Service tab 99 | create_new_tab_button("Go to Agent Service Section (New Tab)", "Agent Service", key="goto_service", use_container_width=True) 100 | 101 | # Step 5: MCP Configuration (Optional) 102 | with st.expander("Step 5: MCP Configuration (Optional)", expanded=False): 103 | st.markdown(""" 104 | ### MCP Configuration 105 | 106 | For integration with AI IDEs: 107 | 108 | 1. Go to the **MCP** tab 109 | 2. Select your IDE (Windsurf, Cursor, or Cline/Roo Code) 110 | 3. Follow the instructions to configure your IDE 111 | 112 | This enables you to use Archon directly from your AI-powered IDE. 113 | """) 114 | # Add a button to navigate to the MCP tab 115 | create_new_tab_button("Go to MCP Section (New Tab)", "MCP", key="goto_mcp", use_container_width=True) 116 | 117 | # Step 6: Using Archon 118 | with st.expander("Step 6: Using Archon", expanded=False): 119 | st.markdown(""" 120 | ### Using Archon 121 | 122 | Once everything is set up: 123 | 124 | 1. Go to the **Chat** tab 125 | 2. Describe the agent you want to build 126 | 3. Archon will plan and generate the necessary code 127 | 128 | You can also use Archon directly from your AI IDE if you've configured MCP. 129 | """) 130 | # Add a button to navigate to the Chat tab 131 | create_new_tab_button("Go to Chat Section (New Tab)", "Chat", key="goto_chat", use_container_width=True) 132 | 133 | # Resources 134 | st.markdown(""" 135 | ## Additional Resources 136 | 137 | - [GitHub Repository](https://github.com/coleam00/archon) 138 | - [Archon Community Forum](https://thinktank.ottomator.ai/c/archon/30) 139 | - [GitHub Kanban Board](https://github.com/users/coleam00/projects/1) 140 | """) -------------------------------------------------------------------------------- /iterations/v5-parallel-specialized-agents/streamlit_pages/intro.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import sys 3 | import os 4 | 5 | # Add the parent directory to sys.path to allow importing from the parent directory 6 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 7 | from utils.utils import create_new_tab_button 8 | 9 | def intro_tab(): 10 | """Display the introduction and setup guide for Archon""" 11 | # Welcome message 12 | st.markdown(""" 13 | Archon is an AI meta-agent designed to autonomously build, refine, and optimize other AI agents. 14 | 15 | It serves both as a practical tool for developers and as an educational framework demonstrating the evolution of agentic systems. 16 | Archon is developed in iterations, starting with a simple Pydantic AI agent that can build other Pydantic AI agents, 17 | all the way to a full agentic workflow using LangGraph that can build other AI agents with any framework. 18 | 19 | Through its iterative development, Archon showcases the power of planning, feedback loops, and domain-specific knowledge in creating robust AI agents. 20 | """) 21 | 22 | # Environment variables update notice 23 | st.warning(""" 24 | **🔄 IMPORTANT UPDATE (March 20th):** Archon now uses a multi-agent workflow with specialized refiner agents for autonomous prompt, tools, and agent definition improvements. The primary coding agent still creates the initial agent by itself, but then you can say 'refine' or something along those lines as a follow up prompt to kick off the specialized agents in parallel. 25 | """) 26 | 27 | # Setup guide with expandable sections 28 | st.markdown("## Setup Guide") 29 | st.markdown("Follow these concise steps to get Archon up and running (IMPORTANT: come back here after each step):") 30 | 31 | # Step 1: Environment Configuration 32 | with st.expander("Step 1: Environment Configuration", expanded=True): 33 | st.markdown(""" 34 | ### Environment Configuration 35 | 36 | First, you need to set up your environment variables: 37 | 38 | 1. Go to the **Environment** tab 39 | 2. Configure the following essential variables: 40 | - `BASE_URL`: API endpoint (OpenAI, OpenRouter, or Ollama) 41 | - `LLM_API_KEY`: Your API key for the LLM service 42 | - `OPENAI_API_KEY`: Required for embeddings 43 | - `SUPABASE_URL`: Your Supabase project URL 44 | - `SUPABASE_SERVICE_KEY`: Your Supabase service key 45 | - `PRIMARY_MODEL`: Main agent model (e.g., gpt-4o-mini) 46 | - `REASONER_MODEL`: Planning model (e.g., o3-mini) 47 | 48 | These settings determine how Archon connects to external services and which models it uses. 49 | """) 50 | # Add a button to navigate to the Environment tab 51 | create_new_tab_button("Go to Environment Section (New Tab)", "Environment", key="goto_env", use_container_width=True) 52 | 53 | # Step 2: Database Setup 54 | with st.expander("Step 2: Database Setup", expanded=False): 55 | st.markdown(""" 56 | ### Database Setup 57 | 58 | Archon uses Supabase for vector storage and retrieval: 59 | 60 | 1. Go to the **Database** tab 61 | 2. Select your embedding dimensions (1536 for OpenAI, 768 for nomic-embed-text) 62 | 3. Follow the instructions to create the `site_pages` table 63 | 64 | This creates the necessary tables, indexes, and functions for vector similarity search. 65 | """) 66 | # Add a button to navigate to the Database tab 67 | create_new_tab_button("Go to Database Section (New Tab)", "Database", key="goto_db", use_container_width=True) 68 | 69 | # Step 3: Documentation Crawling 70 | with st.expander("Step 3: Documentation Crawling", expanded=False): 71 | st.markdown(""" 72 | ### Documentation Crawling 73 | 74 | Populate the database with framework documentation: 75 | 76 | 1. Go to the **Documentation** tab 77 | 2. Click on "Crawl Pydantic AI Docs" 78 | 3. Wait for the crawling process to complete 79 | 80 | This step downloads and processes documentation, creating embeddings for semantic search. 81 | """) 82 | # Add a button to navigate to the Documentation tab 83 | create_new_tab_button("Go to the Documentation Section (New Tab)", "Documentation", key="goto_docs", use_container_width=True) 84 | 85 | # Step 4: Agent Service 86 | with st.expander("Step 4: Agent Service Setup (for MCP)", expanded=False): 87 | st.markdown(""" 88 | ### MCP Agent Service Setup 89 | 90 | Start the graph service for agent generation: 91 | 92 | 1. Go to the **Agent Service** tab 93 | 2. Click on "Start Agent Service" 94 | 3. Verify the service is running 95 | 96 | The agent service powers the LangGraph workflow for agent creation. 97 | """) 98 | # Add a button to navigate to the Agent Service tab 99 | create_new_tab_button("Go to Agent Service Section (New Tab)", "Agent Service", key="goto_service", use_container_width=True) 100 | 101 | # Step 5: MCP Configuration (Optional) 102 | with st.expander("Step 5: MCP Configuration (Optional)", expanded=False): 103 | st.markdown(""" 104 | ### MCP Configuration 105 | 106 | For integration with AI IDEs: 107 | 108 | 1. Go to the **MCP** tab 109 | 2. Select your IDE (Windsurf, Cursor, or Cline/Roo Code) 110 | 3. Follow the instructions to configure your IDE 111 | 112 | This enables you to use Archon directly from your AI-powered IDE. 113 | """) 114 | # Add a button to navigate to the MCP tab 115 | create_new_tab_button("Go to MCP Section (New Tab)", "MCP", key="goto_mcp", use_container_width=True) 116 | 117 | # Step 6: Using Archon 118 | with st.expander("Step 6: Using Archon", expanded=False): 119 | st.markdown(""" 120 | ### Using Archon 121 | 122 | Once everything is set up: 123 | 124 | 1. Go to the **Chat** tab 125 | 2. Describe the agent you want to build 126 | 3. Archon will plan and generate the necessary code 127 | 128 | You can also use Archon directly from your AI IDE if you've configured MCP. 129 | """) 130 | # Add a button to navigate to the Chat tab 131 | create_new_tab_button("Go to Chat Section (New Tab)", "Chat", key="goto_chat", use_container_width=True) 132 | 133 | # Resources 134 | st.markdown(""" 135 | ## Additional Resources 136 | 137 | - [GitHub Repository](https://github.com/coleam00/archon) 138 | - [Archon Community Forum](https://thinktank.ottomator.ai/c/archon/30) 139 | - [GitHub Kanban Board](https://github.com/users/coleam00/projects/1) 140 | """) --------------------------------------------------------------------------------