├── .dockerignore ├── .editorconfig ├── .github └── workflows │ └── validate.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── agents ├── __init__.py ├── agno_assist.py ├── finance_agent.py ├── selector.py └── web_agent.py ├── api ├── __init__.py ├── main.py ├── routes │ ├── __init__.py │ ├── agents.py │ ├── health.py │ ├── playground.py │ └── v1_router.py └── settings.py ├── compose.yaml ├── db ├── __init__.py ├── session.py └── url.py ├── example.env ├── pyproject.toml ├── requirements.txt └── scripts ├── _utils.sh ├── build_image.sh ├── dev_setup.sh ├── entrypoint.sh ├── format.sh ├── generate_requirements.sh └── validate.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .git 3 | 4 | # Cache 5 | .mypy_cache 6 | *__pycache__* 7 | *.egg-info 8 | *.pyc 9 | *.pytest_cache 10 | *.ruff_cache 11 | *.cache* 12 | *.config* 13 | *.local* 14 | 15 | # Machine specific 16 | .idea 17 | .vscode 18 | 19 | # Ignore .env files 20 | .env 21 | .envrc 22 | 23 | # ignore storage dir 24 | /storage 25 | 26 | # ignore dist dir 27 | dist 28 | 29 | # ignore virtualenvs 30 | .venv* 31 | venv* 32 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | indent_size = 2 5 | indent_style = space 6 | end_of_line = lf 7 | charset = utf-8 8 | trim_trailing_whitespace = true 9 | insert_final_newline = true 10 | 11 | [*.py] 12 | indent_size = 4 13 | -------------------------------------------------------------------------------- /.github/workflows/validate.yml: -------------------------------------------------------------------------------- 1 | name: Validate 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | pull_request: 8 | types: 9 | - opened 10 | - synchronize 11 | - reopened 12 | branches: 13 | - "main" 14 | 15 | jobs: 16 | validate: 17 | runs-on: ubuntu-latest 18 | strategy: 19 | matrix: 20 | python-version: ["3.11", "3.12"] 21 | fail-fast: false 22 | 23 | steps: 24 | - uses: actions/checkout@v4 25 | 26 | - name: Install uv 27 | uses: astral-sh/setup-uv@v3 28 | with: 29 | enable-cache: true 30 | cache-dependency-glob: "requirements**.txt" 31 | 32 | - name: Set up Python ${{ matrix.python-version }} 33 | uses: actions/setup-python@v5 34 | with: 35 | python-version: ${{ matrix.python-version }} 36 | 37 | - name: Create a virtual environment 38 | run: uv venv --python ${{ matrix.python-version }} 39 | 40 | - name: Install dependencies 41 | run: | 42 | uv pip sync requirements.txt 43 | uv pip install ruff mypy 44 | 45 | - name: Format with ruff 46 | run: uv run ruff format . --check 47 | 48 | - name: Lint with ruff 49 | run: uv run ruff check . 50 | 51 | - name: Type-check with mypy 52 | run: uv run mypy . -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/ignore-files/ for more about ignoring files. 2 | 3 | .DS_Store 4 | 5 | # Cache 6 | .mypy_cache 7 | *__pycache__* 8 | *.egg-info 9 | *.pyc 10 | *.pytest_cache 11 | *.ruff_cache 12 | *.cache* 13 | *.config* 14 | 15 | # Machine specific 16 | .idea 17 | .vscode 18 | 19 | # Ignore .env files 20 | .env 21 | .envrc 22 | 23 | # ignore storage dir 24 | /storage 25 | 26 | # ignore dist dir 27 | dist 28 | 29 | # ignore virtualenvs 30 | .venv* 31 | venv* 32 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM agnohq/python:3.12 2 | 3 | ARG USER=app 4 | ARG APP_DIR=/app 5 | ENV APP_DIR=${APP_DIR} 6 | 7 | # Create user and home directory 8 | RUN groupadd -g 61000 ${USER} \ 9 | && useradd -g 61000 -u 61000 -ms /bin/bash -d ${APP_DIR} ${USER} 10 | 11 | WORKDIR ${APP_DIR} 12 | 13 | # Copy requirements.txt 14 | COPY requirements.txt ./ 15 | 16 | # Install requirements 17 | RUN uv pip sync requirements.txt --system 18 | 19 | # Copy project files 20 | COPY . . 21 | 22 | # Set permissions for the /app directory 23 | RUN chown -R ${USER}:${USER} ${APP_DIR} 24 | 25 | # Switch to non-root user 26 | USER ${USER} 27 | 28 | ENTRYPOINT ["/app/scripts/entrypoint.sh"] 29 | CMD ["chill"] 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Agno 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Simple Agent API 2 | 3 | Welcome to the Simple Agent API: a robust, production-ready application for serving Agents as an API. It includes: 4 | * A **FastAPI server** for handling API requests. 5 | * A **PostgreSQL database** for storing Agent sessions, knowledge, and memories. 6 | * A set of **pre-built Agents** to use as a starting point. 7 | 8 | For more information, checkout [Agno](https://agno.link/gh) and give it a ⭐️ 9 | 10 | ## Quickstart 11 | 12 | Follow these steps to get your Agent API up and running: 13 | 14 | > Prerequisites: [docker desktop](https://www.docker.com/products/docker-desktop) should be installed and running. 15 | 16 | ### Clone the repo 17 | 18 | ```sh 19 | git clone https://github.com/agno-agi/agent-api.git 20 | cd agent-api 21 | ``` 22 | 23 | ### Configure API keys 24 | 25 | We use GPT 4.1 as the default model, please export the `OPENAI_API_KEY` environment variable to get started. 26 | 27 | ```sh 28 | export OPENAI_API_KEY="YOUR_API_KEY_HERE" 29 | ``` 30 | 31 | > **Note**: You can use any model provider, just update the agents in the `/agents` folder. 32 | 33 | ### Start the application 34 | 35 | Run the application using docker compose: 36 | 37 | ```sh 38 | docker compose up -d 39 | ``` 40 | 41 | This command starts: 42 | * The **FastAPI server**, running on [http://localhost:8000](http://localhost:8000). 43 | * The **PostgreSQL database**, accessible on `localhost:5432`. 44 | 45 | Once started, you can: 46 | * Test the API at [http://localhost:8000/docs](http://localhost:8000/docs). 47 | 48 | ### Connect to Agno Playground or Agent UI 49 | 50 | * Open the [Agno Playground](https://app.agno.com/playground). 51 | * Add `http://localhost:8000` as a new endpoint. You can name it `Agent API` (or any name you prefer). 52 | * Select your newly added endpoint and start chatting with your Agents. 53 | 54 | https://github.com/user-attachments/assets/a0078ade-9fb7-4a03-a124-d5abcca6b562 55 | 56 | ### Stop the application 57 | 58 | When you're done, stop the application using: 59 | 60 | ```sh 61 | docker compose down 62 | ``` 63 | 64 | ## Prebuilt Agents 65 | 66 | The `/agents` folder contains pre-built agents that you can use as a starting point. 67 | - Web Search Agent: A simple agent that can search the web. 68 | - Agno Assist: An Agent that can help answer questions about Agno. 69 | - Important: Make sure to load the `agno_assist` [knowledge base](http://localhost:8000/docs#/Agents/load_agent_knowledge_v1_agents__agent_id__knowledge_load_post) before using this agent. 70 | - Finance Agent: An agent that uses the YFinance API to get stock prices and financial data. 71 | 72 | ## Development Setup 73 | 74 | To setup your local virtual environment: 75 | 76 | ### Install `uv` 77 | 78 | We use `uv` for python environment and package management. Install it by following the the [`uv` documentation](https://docs.astral.sh/uv/#getting-started) or use the command below for unix-like systems: 79 | 80 | ```sh 81 | curl -LsSf https://astral.sh/uv/install.sh | sh 82 | ``` 83 | 84 | ### Create Virtual Environment & Install Dependencies 85 | 86 | Run the `dev_setup.sh` script. This will create a virtual environment and install project dependencies: 87 | 88 | ```sh 89 | ./scripts/dev_setup.sh 90 | ``` 91 | 92 | ### Activate Virtual Environment 93 | 94 | Activate the created virtual environment: 95 | 96 | ```sh 97 | source .venv/bin/activate 98 | ``` 99 | 100 | (On Windows, the command might differ, e.g., `.venv\Scripts\activate`) 101 | 102 | ## Managing Python Dependencies 103 | 104 | If you need to add or update python dependencies: 105 | 106 | ### Modify pyproject.toml 107 | 108 | Add or update your desired Python package dependencies in the `[dependencies]` section of the `pyproject.toml` file. 109 | 110 | ### Generate requirements.txt 111 | 112 | The `requirements.txt` file is used to build the application image. After modifying `pyproject.toml`, regenerate `requirements.txt` using: 113 | 114 | ```sh 115 | ./scripts/generate_requirements.sh 116 | ``` 117 | 118 | To upgrade all existing dependencies to their latest compatible versions, run: 119 | 120 | ```sh 121 | ./scripts/generate_requirements.sh upgrade 122 | ``` 123 | 124 | ### Rebuild Docker Images 125 | 126 | Rebuild your Docker images to include the updated dependencies: 127 | 128 | ```sh 129 | docker compose up -d --build 130 | ``` 131 | 132 | ## Community & Support 133 | 134 | Need help, have a question, or want to connect with the community? 135 | 136 | * 📚 **[Read the Agno Docs](https://docs.agno.com)** for more in-depth information. 137 | * 💬 **Chat with us on [Discord](https://agno.link/discord)** for live discussions. 138 | * ❓ **Ask a question on [Discourse](https://agno.link/community)** for community support. 139 | * 🐛 **[Report an Issue](https://github.com/agno-agi/agent-api/issues)** on GitHub if you find a bug or have a feature request. 140 | 141 | ## Running in Production 142 | 143 | This repository includes a `Dockerfile` for building a production-ready container image of the application. 144 | 145 | The general process to run in production is: 146 | 147 | 1. Update the `scripts/build_image.sh` file and set your IMAGE_NAME and IMAGE_TAG variables. 148 | 2. Build and push the image to your container registry: 149 | 150 | ```sh 151 | ./scripts/build_image.sh 152 | ``` 153 | 3. Run in your cloud provider of choice. 154 | 155 | ### Detailed Steps 156 | 157 | 1. **Configure for Production** 158 | * Ensure your production environment variables (e.g., `OPENAI_API_KEY`, database connection strings) are securely managed. Most cloud providers offer a way to set these as environment variables for your deployed service. 159 | * Review the agent configurations in the `/agents` directory and ensure they are set up for your production needs (e.g., correct model versions, any production-specific settings). 160 | 161 | 2. **Build Your Production Docker Image** 162 | * Update the `scripts/build_image.sh` script to set your desired `IMAGE_NAME` and `IMAGE_TAG` (e.g., `your-repo/agent-api:v1.0.0`). 163 | * Run the script to build and push the image: 164 | 165 | ```sh 166 | ./scripts/build_image.sh 167 | ``` 168 | 169 | 3. **Deploy to a Cloud Service** 170 | With your image in a registry, you can deploy it to various cloud services that support containerized applications. Some common options include: 171 | 172 | * **Serverless Container Platforms**: 173 | * **Google Cloud Run**: A fully managed platform that automatically scales your stateless containers. Ideal for HTTP-driven applications. 174 | * **AWS App Runner**: Similar to Cloud Run, AWS App Runner makes it easy to deploy containerized web applications and APIs at scale. 175 | * **Azure Container Apps**: Build and deploy modern apps and microservices using serverless containers. 176 | 177 | * **Container Orchestration Services**: 178 | * **Amazon Elastic Container Service (ECS)**: A highly scalable, high-performance container orchestration service that supports Docker containers. Often used with AWS Fargate for serverless compute or EC2 instances for more control. 179 | * **Google Kubernetes Engine (GKE)**: A managed Kubernetes service for deploying, managing, and scaling containerized applications using Google infrastructure. 180 | * **Azure Kubernetes Service (AKS)**: A managed Kubernetes service for deploying and managing containerized applications in Azure. 181 | 182 | * **Platform as a Service (PaaS) with Docker Support** 183 | * **Railway.app**: Offers a simple way to deploy applications from a Dockerfile. It handles infrastructure, scaling, and networking. 184 | * **Render**: Another platform that simplifies deploying Docker containers, databases, and static sites. 185 | * **Heroku**: While traditionally known for buildpacks, Heroku also supports deploying Docker containers. 186 | 187 | * **Specialized Platforms**: 188 | * **Modal**: A platform designed for running Python code (including web servers like FastAPI) in the cloud, often with a focus on batch jobs, scheduled functions, and model inference, but can also serve web endpoints. 189 | 190 | The specific deployment steps will vary depending on the chosen provider. Generally, you'll point the service to your container image in the registry and configure aspects like port mapping (the application runs on port 8000 by default inside the container), environment variables, scaling parameters, and any necessary database connections. 191 | 192 | 4. **Database Configuration** 193 | * The default `docker-compose.yml` sets up a PostgreSQL database for local development. In production, you will typically use a managed database service provided by your cloud provider (e.g., AWS RDS, Google Cloud SQL, Azure Database for PostgreSQL) for better reliability, scalability, and manageability. 194 | * Ensure your deployed application is configured with the correct database connection URL for your production database instance. This is usually set via an environment variables. 195 | -------------------------------------------------------------------------------- /agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agno-agi/agent-api/e1b3bea3e1f212c90e9f16ca22f80c4333e44777/agents/__init__.py -------------------------------------------------------------------------------- /agents/agno_assist.py: -------------------------------------------------------------------------------- 1 | from textwrap import dedent 2 | from typing import Optional 3 | 4 | from agno.agent import Agent, AgentKnowledge 5 | from agno.embedder.openai import OpenAIEmbedder 6 | from agno.knowledge.url import UrlKnowledge 7 | from agno.memory.v2.db.postgres import PostgresMemoryDb 8 | from agno.memory.v2.memory import Memory 9 | from agno.models.openai import OpenAIChat 10 | from agno.storage.agent.postgres import PostgresAgentStorage 11 | from agno.tools.duckduckgo import DuckDuckGoTools 12 | from agno.vectordb.pgvector import PgVector, SearchType 13 | 14 | from db.session import db_url 15 | 16 | 17 | def get_agno_assist_knowledge() -> AgentKnowledge: 18 | return UrlKnowledge( 19 | urls=["https://docs.agno.com/llms-full.txt"], 20 | vector_db=PgVector( 21 | db_url=db_url, 22 | table_name="agno_assist_knowledge", 23 | search_type=SearchType.hybrid, 24 | embedder=OpenAIEmbedder(id="text-embedding-3-small"), 25 | ), 26 | ) 27 | 28 | 29 | def get_agno_assist( 30 | model_id: str = "gpt-4.1", 31 | user_id: Optional[str] = None, 32 | session_id: Optional[str] = None, 33 | debug_mode: bool = True, 34 | ) -> Agent: 35 | return Agent( 36 | name="Agno Assist", 37 | agent_id="agno_assist", 38 | user_id=user_id, 39 | session_id=session_id, 40 | model=OpenAIChat(id=model_id), 41 | # Tools available to the agent 42 | tools=[DuckDuckGoTools()], 43 | # Description of the agent 44 | description=dedent("""\ 45 | You are AgnoAssist, an advanced AI Agent specializing in Agno: a lightweight framework for building multi-modal, reasoning Agents. 46 | 47 | Your goal is to help developers understand and use Agno by providing clear explanations, functional code examples, and best-practice guidance for using Agno. 48 | """), 49 | # Instructions for the agent 50 | instructions=dedent("""\ 51 | Your mission is to provide comprehensive and actionable support for developers working with the Agno framework. Follow these steps to deliver high-quality assistance: 52 | 53 | 1. **Understand the request** 54 | - Analyze the request to determine if it requires a knowledge search, creating an Agent, or both. 55 | - If you need to search the knowledge base, identify 1-3 key search terms related to Agno concepts. 56 | - If you need to create an Agent, search the knowledge base for relevant concepts and use the example code as a guide. 57 | - When the user asks for an Agent, they mean an Agno Agent. 58 | - All concepts are related to Agno, so you can search the knowledge base for relevant information 59 | 60 | After Analysis, always start the iterative search process. No need to wait for approval from the user. 61 | 62 | 2. **Iterative Knowledge Base Search:** 63 | - Use the `search_knowledge_base` tool to iteratively gather information. 64 | - Focus on retrieving Agno concepts, illustrative code examples, and specific implementation details relevant to the user's request. 65 | - Continue searching until you have sufficient information to comprehensively address the query or have explored all relevant search terms. 66 | 67 | After the iterative search process, determine if you need to create an Agent. 68 | 69 | 3. **Code Creation** 70 | - Create complete, working code examples that users can run. For example: 71 | ```python 72 | from agno.agent import Agent 73 | from agno.tools.duckduckgo import DuckDuckGoTools 74 | 75 | agent = Agent(tools=[DuckDuckGoTools()]) 76 | 77 | # Perform a web search and capture the response 78 | response = agent.run("What's happening in France?") 79 | ``` 80 | - Remember to: 81 | * Build the complete agent implementation 82 | * Includes all necessary imports and setup 83 | * Add comprehensive comments explaining the implementation 84 | * Ensure all dependencies are listed 85 | * Include error handling and best practices 86 | * Add type hints and documentation 87 | 88 | Key topics to cover: 89 | - Agent architecture, levels, and capabilities. 90 | - Knowledge base integration and memory management strategies. 91 | - Tool creation, integration, and usage. 92 | - Supported models and their configuration. 93 | - Common development patterns and best practices within Agno. 94 | 95 | Additional Information: 96 | - You are interacting with the user_id: {current_user_id} 97 | - The user's name might be different from the user_id, you may ask for it if needed and add it to your memory if they share it with you.\ 98 | """), 99 | # This makes `current_user_id` available in the instructions 100 | add_state_in_messages=True, 101 | # -*- Knowledge -*- 102 | # Add the knowledge base to the agent 103 | knowledge=get_agno_assist_knowledge(), 104 | # Give the agent a tool to search the knowledge base (this is True by default but set here for clarity) 105 | search_knowledge=True, 106 | # -*- Storage -*- 107 | # Storage chat history and session state in a Postgres table 108 | storage=PostgresAgentStorage(table_name="agno_assist_sessions", db_url=db_url), 109 | # -*- History -*- 110 | # Send the last 3 messages from the chat history 111 | add_history_to_messages=True, 112 | num_history_runs=3, 113 | # Add a tool to read the chat history if needed 114 | read_chat_history=True, 115 | # -*- Memory -*- 116 | # Enable agentic memory where the Agent can personalize responses to the user 117 | memory=Memory( 118 | model=OpenAIChat(id=model_id), 119 | db=PostgresMemoryDb(table_name="user_memories", db_url=db_url), 120 | delete_memories=True, 121 | clear_memories=True, 122 | ), 123 | enable_agentic_memory=True, 124 | # -*- Other settings -*- 125 | # Format responses using markdown 126 | markdown=True, 127 | # Add the current date and time to the instructions 128 | add_datetime_to_instructions=True, 129 | # Show debug logs 130 | debug_mode=debug_mode, 131 | ) 132 | -------------------------------------------------------------------------------- /agents/finance_agent.py: -------------------------------------------------------------------------------- 1 | from textwrap import dedent 2 | from typing import Optional 3 | 4 | from agno.agent import Agent 5 | from agno.memory.v2.db.postgres import PostgresMemoryDb 6 | from agno.memory.v2.memory import Memory 7 | from agno.models.openai import OpenAIChat 8 | from agno.storage.agent.postgres import PostgresAgentStorage 9 | from agno.tools.duckduckgo import DuckDuckGoTools 10 | from agno.tools.yfinance import YFinanceTools 11 | 12 | from db.session import db_url 13 | 14 | 15 | def get_finance_agent( 16 | model_id: str = "gpt-4.1", 17 | user_id: Optional[str] = None, 18 | session_id: Optional[str] = None, 19 | debug_mode: bool = True, 20 | ) -> Agent: 21 | return Agent( 22 | name="Finance Agent", 23 | agent_id="finance_agent", 24 | user_id=user_id, 25 | session_id=session_id, 26 | model=OpenAIChat(id=model_id), 27 | # Tools available to the agent 28 | tools=[ 29 | DuckDuckGoTools(), 30 | YFinanceTools( 31 | stock_price=True, 32 | analyst_recommendations=True, 33 | stock_fundamentals=True, 34 | historical_prices=True, 35 | company_info=True, 36 | company_news=True, 37 | ), 38 | ], 39 | # Description of the agent 40 | description=dedent("""\ 41 | You are FinMaster, a seasoned Wall Street analyst with deep expertise in market analysis and financial data interpretation. 42 | 43 | Your goal is to provide users with comprehensive, accurate, and actionable financial insights, presented in a clear and professional manner. 44 | """), 45 | # Instructions for the agent 46 | instructions=dedent("""\ 47 | As FinMaster, your goal is to deliver insightful and data-driven responses. Adhere to the following process: 48 | 49 | 1. **Understand the Query:** 50 | - Carefully analyze the user's request to determine the specific financial information or analysis needed. 51 | - Identify the relevant company, ticker symbol, or market sector. 52 | 53 | 2. **Gather Financial Data:** 54 | - Utilize available tools to collect up-to-date information for: 55 | - Market Overview (Latest stock price, 52-week high/low) 56 | - Financial Deep Dive (Key metrics like P/E, Market Cap, EPS) 57 | - Professional Insights (Analyst recommendations, recent rating changes) 58 | - If necessary for broader market context or news, use `duckduckgo_search`, prioritizing reputable financial news outlets. 59 | 60 | 3. **Analyze and Synthesize:** 61 | - Interpret the collected data to form a comprehensive view. 62 | - For Market Context: 63 | - Consider industry trends and the company's positioning. 64 | - Perform a high-level competitive analysis if data is available. 65 | - Note market sentiment indicators if discernible from news or analyst opinions. 66 | 67 | 4. **Construct Your Report:** 68 | - **Reporting Style:** 69 | - Begin with a concise executive summary of the key findings. 70 | - Important: USE TABLES for presenting numerical data (e.g., key metrics, historical prices). 71 | - Employ clear section headers for organization (e.g., "Market Overview," "Financial Deep Dive"). 72 | - Use emoji indicators for trends (e.g., 📈 for upward, 📉 for downward) where appropriate. 73 | - Highlight key insights using bullet points. 74 | - Where possible, compare metrics to industry averages or historical performance. 75 | - Include brief explanations for technical terms if they are likely to be unfamiliar to the user. 76 | - Conclude with a brief forward-looking statement or potential outlook, based on available data. 77 | - **Risk Disclosure:** 78 | - Always highlight potential risk factors associated with an investment or market condition. 79 | - Note any significant market uncertainties or volatility. 80 | - Mention relevant regulatory concerns if applicable and known. 81 | 82 | 5. **Leverage Memory & Context:** 83 | - You have access to recent messages. Integrate previous interactions and user clarifications to maintain conversational continuity. 84 | 85 | 6. **Final Quality & Presentation Review:** 86 | - Before sending, critically review your response for: 87 | - Accuracy of data and analysis. 88 | - Clarity and conciseness of language. 89 | - Completeness in addressing the user's query. 90 | - Professionalism in tone and presentation. 91 | - Proper organization and formatting. 92 | 93 | 7. **Handle Uncertainties Gracefully:** 94 | - If you cannot find definitive information for a specific request, or if data is inconclusive, clearly state these limitations. 95 | - Do not speculate beyond the available data. 96 | 97 | Additional Information: 98 | - You are interacting with the user_id: {current_user_id} 99 | - The user's name might be different from the user_id, you may ask for it if needed and add it to your memory if they share it with you. 100 | - Always use the available tools to fetch the latest data; do not rely on pre-existing knowledge for financial figures or recommendations.\ 101 | """), 102 | # This makes `current_user_id` available in the instructions 103 | add_state_in_messages=True, 104 | # -*- Storage -*- 105 | # Storage chat history and session state in a Postgres table 106 | storage=PostgresAgentStorage(table_name="finance_agent_sessions", db_url=db_url), 107 | # -*- History -*- 108 | # Send the last 3 messages from the chat history 109 | add_history_to_messages=True, 110 | num_history_runs=3, 111 | # Add a tool to read the chat history if needed 112 | read_chat_history=True, 113 | # -*- Memory -*- 114 | # Enable agentic memory where the Agent can personalize responses to the user 115 | memory=Memory( 116 | model=OpenAIChat(id=model_id), 117 | db=PostgresMemoryDb(table_name="user_memories", db_url=db_url), 118 | delete_memories=True, 119 | clear_memories=True, 120 | ), 121 | enable_agentic_memory=True, 122 | # -*- Other settings -*- 123 | # Format responses using markdown 124 | markdown=True, 125 | # Add the current date and time to the instructions 126 | add_datetime_to_instructions=True, 127 | # Show debug logs 128 | debug_mode=debug_mode, 129 | ) 130 | -------------------------------------------------------------------------------- /agents/selector.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import List, Optional 3 | 4 | from agents.agno_assist import get_agno_assist 5 | from agents.finance_agent import get_finance_agent 6 | from agents.web_agent import get_web_agent 7 | 8 | 9 | class AgentType(Enum): 10 | WEB_AGENT = "web_agent" 11 | AGNO_ASSIST = "agno_assist" 12 | FINANCE_AGENT = "finance_agent" 13 | 14 | 15 | def get_available_agents() -> List[str]: 16 | """Returns a list of all available agent IDs.""" 17 | return [agent.value for agent in AgentType] 18 | 19 | 20 | def get_agent( 21 | model_id: str = "gpt-4.1", 22 | agent_id: Optional[AgentType] = None, 23 | user_id: Optional[str] = None, 24 | session_id: Optional[str] = None, 25 | debug_mode: bool = True, 26 | ): 27 | if agent_id == AgentType.WEB_AGENT: 28 | return get_web_agent(model_id=model_id, user_id=user_id, session_id=session_id, debug_mode=debug_mode) 29 | elif agent_id == AgentType.AGNO_ASSIST: 30 | return get_agno_assist(model_id=model_id, user_id=user_id, session_id=session_id, debug_mode=debug_mode) 31 | elif agent_id == AgentType.FINANCE_AGENT: 32 | return get_finance_agent(model_id=model_id, user_id=user_id, session_id=session_id, debug_mode=debug_mode) 33 | 34 | raise ValueError(f"Agent: {agent_id} not found") 35 | -------------------------------------------------------------------------------- /agents/web_agent.py: -------------------------------------------------------------------------------- 1 | from textwrap import dedent 2 | from typing import Optional 3 | 4 | from agno.agent import Agent 5 | from agno.memory.v2.db.postgres import PostgresMemoryDb 6 | from agno.memory.v2.memory import Memory 7 | from agno.models.openai import OpenAIChat 8 | from agno.storage.agent.postgres import PostgresAgentStorage 9 | from agno.tools.duckduckgo import DuckDuckGoTools 10 | 11 | from db.session import db_url 12 | 13 | 14 | def get_web_agent( 15 | model_id: str = "gpt-4.1", 16 | user_id: Optional[str] = None, 17 | session_id: Optional[str] = None, 18 | debug_mode: bool = True, 19 | ) -> Agent: 20 | return Agent( 21 | name="Web Search Agent", 22 | agent_id="web_search_agent", 23 | user_id=user_id, 24 | session_id=session_id, 25 | model=OpenAIChat(id=model_id), 26 | # Tools available to the agent 27 | tools=[DuckDuckGoTools()], 28 | # Description of the agent 29 | description=dedent("""\ 30 | You are WebX, an advanced Web Search Agent designed to deliver accurate, context-rich information from the web. 31 | 32 | Your responses should be clear, concise, and supported by citations from the web. 33 | """), 34 | # Instructions for the agent 35 | instructions=dedent("""\ 36 | As WebX, your goal is to provide users with accurate, context-rich information from the web. Follow these steps meticulously: 37 | 38 | 1. Understand and Search: 39 | - Carefully analyze the user's query to identify 1-3 *precise* search terms. 40 | - Use the `duckduckgo_search` tool to gather relevant information. Prioritize reputable and recent sources. 41 | - Cross-reference information from multiple sources to ensure accuracy. 42 | - If initial searches are insufficient or yield conflicting information, refine your search terms or acknowledge the limitations/conflicts in your response. 43 | 44 | 2. Leverage Memory & Context: 45 | - You have access to the last 3 messages. Use the `get_chat_history` tool if more conversational history is needed. 46 | - Integrate previous interactions and user preferences to maintain continuity. 47 | - Keep track of user preferences and prior clarifications. 48 | 49 | 3. Construct Your Response: 50 | - **Start** with a direct and succinct answer that immediately addresses the user's core question. 51 | - **Then, if the query warrants it** (e.g., not for simple factual questions like "What is the weather in Tokyo?" or "What is the capital of France?"), **expand** your answer by: 52 | - Providing clear explanations, relevant context, and definitions. 53 | - Including supporting evidence such as statistics, real-world examples, and data points. 54 | - Addressing common misconceptions or providing alternative viewpoints if appropriate. 55 | - Structure your response for both quick understanding and deeper exploration. 56 | - Avoid speculation and hedging language (e.g., "it might be," "based on my limited knowledge"). 57 | - **Citations are mandatory.** Support all factual claims with clear citations from your search results. 58 | 59 | 4. Enhance Engagement: 60 | - After delivering your answer, propose relevant follow-up questions or related topics the user might find interesting to explore further. 61 | 62 | 5. Final Quality & Presentation Review: 63 | - Before sending, critically review your response for clarity, accuracy, completeness, depth, and overall engagement. 64 | - Ensure your answer is well-organized, easy to read, and aligns with your role as an expert web search agent. 65 | 66 | 6. Handle Uncertainties Gracefully: 67 | - If you cannot find definitive information, if data is inconclusive, or if sources significantly conflict, clearly state these limitations. 68 | - Encourage the user to ask further questions if they need more clarification or if you can assist in a different way. 69 | 70 | Additional Information: 71 | - You are interacting with the user_id: {current_user_id} 72 | - The user's name might be different from the user_id, you may ask for it if needed and add it to your memory if they share it with you.\ 73 | """), 74 | # This makes `current_user_id` available in the instructions 75 | add_state_in_messages=True, 76 | # -*- Storage -*- 77 | # Storage chat history and session state in a Postgres table 78 | storage=PostgresAgentStorage(table_name="web_search_agent_sessions", db_url=db_url), 79 | # -*- History -*- 80 | # Send the last 3 messages from the chat history 81 | add_history_to_messages=True, 82 | num_history_runs=3, 83 | # Add a tool to read the chat history if needed 84 | read_chat_history=True, 85 | # -*- Memory -*- 86 | # Enable agentic memory where the Agent can personalize responses to the user 87 | memory=Memory( 88 | model=OpenAIChat(id=model_id), 89 | db=PostgresMemoryDb(table_name="user_memories", db_url=db_url), 90 | delete_memories=True, 91 | clear_memories=True, 92 | ), 93 | enable_agentic_memory=True, 94 | # -*- Other settings -*- 95 | # Format responses using markdown 96 | markdown=True, 97 | # Add the current date and time to the instructions 98 | add_datetime_to_instructions=True, 99 | # Show debug logs 100 | debug_mode=debug_mode, 101 | ) 102 | -------------------------------------------------------------------------------- /api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agno-agi/agent-api/e1b3bea3e1f212c90e9f16ca22f80c4333e44777/api/__init__.py -------------------------------------------------------------------------------- /api/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI 2 | from starlette.middleware.cors import CORSMiddleware 3 | 4 | from api.routes.v1_router import v1_router 5 | from api.settings import api_settings 6 | 7 | 8 | def create_app() -> FastAPI: 9 | """Create a FastAPI App""" 10 | 11 | # Create FastAPI App 12 | app: FastAPI = FastAPI( 13 | title=api_settings.title, 14 | version=api_settings.version, 15 | docs_url="/docs" if api_settings.docs_enabled else None, 16 | redoc_url="/redoc" if api_settings.docs_enabled else None, 17 | openapi_url="/openapi.json" if api_settings.docs_enabled else None, 18 | ) 19 | 20 | # Add v1 router 21 | app.include_router(v1_router) 22 | 23 | # Add Middlewares 24 | app.add_middleware( 25 | CORSMiddleware, 26 | allow_origins=api_settings.cors_origin_list, 27 | allow_credentials=True, 28 | allow_methods=["*"], 29 | allow_headers=["*"], 30 | ) 31 | 32 | return app 33 | 34 | 35 | # Create a FastAPI app 36 | app = create_app() 37 | -------------------------------------------------------------------------------- /api/routes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agno-agi/agent-api/e1b3bea3e1f212c90e9f16ca22f80c4333e44777/api/routes/__init__.py -------------------------------------------------------------------------------- /api/routes/agents.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from logging import getLogger 3 | from typing import AsyncGenerator, List, Optional 4 | 5 | from agno.agent import Agent, AgentKnowledge 6 | from fastapi import APIRouter, HTTPException, status 7 | from fastapi.responses import StreamingResponse 8 | from pydantic import BaseModel 9 | 10 | from agents.agno_assist import get_agno_assist_knowledge 11 | from agents.selector import AgentType, get_agent, get_available_agents 12 | 13 | logger = getLogger(__name__) 14 | 15 | ###################################################### 16 | ## Routes for the Agent Interface 17 | ###################################################### 18 | 19 | agents_router = APIRouter(prefix="/agents", tags=["Agents"]) 20 | 21 | 22 | class Model(str, Enum): 23 | gpt_4_1 = "gpt-4.1" 24 | o4_mini = "o4-mini" 25 | 26 | 27 | @agents_router.get("", response_model=List[str]) 28 | async def list_agents(): 29 | """ 30 | Returns a list of all available agent IDs. 31 | 32 | Returns: 33 | List[str]: List of agent identifiers 34 | """ 35 | return get_available_agents() 36 | 37 | 38 | async def chat_response_streamer(agent: Agent, message: str) -> AsyncGenerator: 39 | """ 40 | Stream agent responses chunk by chunk. 41 | 42 | Args: 43 | agent: The agent instance to interact with 44 | message: User message to process 45 | 46 | Yields: 47 | Text chunks from the agent response 48 | """ 49 | run_response = await agent.arun(message, stream=True) 50 | async for chunk in run_response: 51 | # chunk.content only contains the text response from the Agent. 52 | # For advanced use cases, we should yield the entire chunk 53 | # that contains the tool calls and intermediate steps. 54 | yield chunk.content 55 | 56 | 57 | class RunRequest(BaseModel): 58 | """Request model for an running an agent""" 59 | 60 | message: str 61 | stream: bool = True 62 | model: Model = Model.gpt_4_1 63 | user_id: Optional[str] = None 64 | session_id: Optional[str] = None 65 | 66 | 67 | @agents_router.post("/{agent_id}/runs", status_code=status.HTTP_200_OK) 68 | async def create_agent_run(agent_id: AgentType, body: RunRequest): 69 | """ 70 | Sends a message to a specific agent and returns the response. 71 | 72 | Args: 73 | agent_id: The ID of the agent to interact with 74 | body: Request parameters including the message 75 | 76 | Returns: 77 | Either a streaming response or the complete agent response 78 | """ 79 | logger.debug(f"RunRequest: {body}") 80 | 81 | try: 82 | agent: Agent = get_agent( 83 | model_id=body.model.value, 84 | agent_id=agent_id, 85 | user_id=body.user_id, 86 | session_id=body.session_id, 87 | ) 88 | except ValueError as e: 89 | raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) 90 | 91 | if body.stream: 92 | return StreamingResponse( 93 | chat_response_streamer(agent, body.message), 94 | media_type="text/event-stream", 95 | ) 96 | else: 97 | response = await agent.arun(body.message, stream=False) 98 | # In this case, the response.content only contains the text response from the Agent. 99 | # For advanced use cases, we should yield the entire response 100 | # that contains the tool calls and intermediate steps. 101 | return response.content 102 | 103 | 104 | @agents_router.post("/{agent_id}/knowledge/load", status_code=status.HTTP_200_OK) 105 | async def load_agent_knowledge(agent_id: AgentType): 106 | """ 107 | Loads the knowledge base for a specific agent. 108 | 109 | Args: 110 | agent_id: The ID of the agent to load knowledge for. 111 | 112 | Returns: 113 | A success message if the knowledge base is loaded. 114 | """ 115 | agent_knowledge: Optional[AgentKnowledge] = None 116 | 117 | if agent_id == AgentType.AGNO_ASSIST: 118 | agent_knowledge = get_agno_assist_knowledge() 119 | else: 120 | raise HTTPException( 121 | status_code=status.HTTP_400_BAD_REQUEST, 122 | detail=f"Agent {agent_id} does not have a knowledge base.", 123 | ) 124 | 125 | try: 126 | await agent_knowledge.aload(upsert=True) 127 | except Exception as e: 128 | logger.error(f"Error loading knowledge base for {agent_id}: {e}") 129 | raise HTTPException( 130 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, 131 | detail=f"Failed to load knowledge base for {agent_id}.", 132 | ) 133 | 134 | return {"message": f"Knowledge base for {agent_id} loaded successfully."} 135 | -------------------------------------------------------------------------------- /api/routes/health.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter 2 | 3 | ###################################################### 4 | ## Routes for the API Health 5 | ###################################################### 6 | 7 | health_router = APIRouter(tags=["Health"]) 8 | 9 | 10 | @health_router.get("/health") 11 | def get_health(): 12 | """Check the health of the Api""" 13 | 14 | return { 15 | "status": "success", 16 | } 17 | -------------------------------------------------------------------------------- /api/routes/playground.py: -------------------------------------------------------------------------------- 1 | from agno.playground import Playground 2 | 3 | from agents.agno_assist import get_agno_assist 4 | from agents.finance_agent import get_finance_agent 5 | from agents.web_agent import get_web_agent 6 | 7 | ###################################################### 8 | ## Routes for the Playground Interface 9 | ###################################################### 10 | 11 | # Get Agents to serve in the playground 12 | web_agent = get_web_agent(debug_mode=True) 13 | agno_assist = get_agno_assist(debug_mode=True) 14 | finance_agent = get_finance_agent(debug_mode=True) 15 | 16 | # Create a playground instance 17 | playground = Playground(agents=[web_agent, agno_assist, finance_agent]) 18 | 19 | # Get the router for the playground 20 | playground_router = playground.get_async_router() 21 | -------------------------------------------------------------------------------- /api/routes/v1_router.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter 2 | 3 | from api.routes.agents import agents_router 4 | from api.routes.health import health_router 5 | from api.routes.playground import playground_router 6 | 7 | v1_router = APIRouter(prefix="/v1") 8 | v1_router.include_router(health_router) 9 | v1_router.include_router(agents_router) 10 | v1_router.include_router(playground_router) 11 | -------------------------------------------------------------------------------- /api/settings.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from pydantic import Field, field_validator 4 | from pydantic_core.core_schema import FieldValidationInfo 5 | from pydantic_settings import BaseSettings 6 | 7 | 8 | class ApiSettings(BaseSettings): 9 | """Api settings that are set using environment variables.""" 10 | 11 | title: str = "agent-api" 12 | version: str = "1.0" 13 | 14 | # Set to False to disable docs at /docs and /redoc 15 | docs_enabled: bool = True 16 | 17 | # Cors origin list to allow requests from. 18 | # This list is set using the set_cors_origin_list validator 19 | # which uses the runtime_env variable to set the 20 | # default cors origin list. 21 | cors_origin_list: Optional[List[str]] = Field(None, validate_default=True) 22 | 23 | @field_validator("cors_origin_list", mode="before") 24 | def set_cors_origin_list(cls, cors_origin_list, info: FieldValidationInfo): 25 | valid_cors = cors_origin_list or [] 26 | 27 | # Add app.agno.com to cors to allow requests from the Agno playground. 28 | valid_cors.append("https://app.agno.com") 29 | # Add localhost to cors to allow requests from the local environment. 30 | valid_cors.append("http://localhost") 31 | # Add localhost:3000 to cors to allow requests from local Agent UI. 32 | valid_cors.append("http://localhost:3000") 33 | 34 | return valid_cors 35 | 36 | 37 | # Create ApiSettings object 38 | api_settings = ApiSettings() 39 | -------------------------------------------------------------------------------- /compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | pgvector: 3 | image: agnohq/pgvector:16 4 | restart: unless-stopped 5 | ports: 6 | - "5432:5432" 7 | volumes: 8 | - pgdata:/var/lib/postgresql/data 9 | environment: 10 | POSTGRES_USER: ${DB_USER:-ai} 11 | POSTGRES_PASSWORD: ${DB_PASSWORD:-ai} 12 | POSTGRES_DB: ${DB_NAME:-ai} 13 | networks: 14 | - agent-api 15 | 16 | api: 17 | build: 18 | context: . 19 | dockerfile: Dockerfile 20 | image: ${IMAGE_NAME:-agent-api}:${IMAGE_TAG:-latest} 21 | command: uvicorn api.main:app --host 0.0.0.0 --port 8000 --reload 22 | restart: unless-stopped 23 | ports: 24 | - "8000:8000" 25 | volumes: 26 | - .:/app 27 | environment: 28 | OPENAI_API_KEY: ${OPENAI_API_KEY} 29 | # AGNO_MONITOR: "True" 30 | # AGNO_API_KEY: ${AGNO_API_KEY} 31 | DB_HOST: pgvector 32 | DB_PORT: 5432 33 | DB_USER: ${DB_USER:-ai} 34 | DB_PASS: ${DB_PASSWORD:-ai} 35 | DB_DATABASE: ${DB_NAME:-ai} 36 | WAIT_FOR_DB: "True" 37 | PRINT_ENV_ON_LOAD: "True" 38 | networks: 39 | - agent-api 40 | depends_on: 41 | - pgvector 42 | extra_hosts: 43 | - "host.docker.internal:host-gateway" 44 | 45 | networks: 46 | agent-api: 47 | 48 | volumes: 49 | pgdata: -------------------------------------------------------------------------------- /db/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agno-agi/agent-api/e1b3bea3e1f212c90e9f16ca22f80c4333e44777/db/__init__.py -------------------------------------------------------------------------------- /db/session.py: -------------------------------------------------------------------------------- 1 | from typing import Generator 2 | 3 | from sqlalchemy.engine import Engine, create_engine 4 | from sqlalchemy.orm import Session, sessionmaker 5 | 6 | from db.url import get_db_url 7 | 8 | # Create SQLAlchemy Engine using a database URL 9 | db_url: str = get_db_url() 10 | db_engine: Engine = create_engine(db_url, pool_pre_ping=True) 11 | 12 | # Create a SessionLocal class 13 | SessionLocal: sessionmaker[Session] = sessionmaker(autocommit=False, autoflush=False, bind=db_engine) 14 | 15 | 16 | def get_db() -> Generator[Session, None, None]: 17 | """ 18 | Dependency to get a database session. 19 | 20 | Yields: 21 | Session: An SQLAlchemy database session. 22 | """ 23 | db: Session = SessionLocal() 24 | try: 25 | yield db 26 | finally: 27 | db.close() 28 | -------------------------------------------------------------------------------- /db/url.py: -------------------------------------------------------------------------------- 1 | from os import getenv 2 | 3 | 4 | def get_db_url() -> str: 5 | db_driver = getenv("DB_DRIVER", "postgresql+psycopg") 6 | db_user = getenv("DB_USER") 7 | db_pass = getenv("DB_PASS") 8 | db_host = getenv("DB_HOST") 9 | db_port = getenv("DB_PORT") 10 | db_database = getenv("DB_DATABASE") 11 | return "{}://{}{}@{}:{}/{}".format( 12 | db_driver, 13 | db_user, 14 | f":{db_pass}" if db_pass else "", 15 | db_host, 16 | db_port, 17 | db_database, 18 | ) 19 | -------------------------------------------------------------------------------- /example.env: -------------------------------------------------------------------------------- 1 | # Database Configuration 2 | # DB_USER=ai 3 | # DB_PASSWORD=ai 4 | # DB_NAME=ai 5 | 6 | # API Keys 7 | # OPENAI_API_KEY="your_openai_api_key_here" 8 | # ANTHROPIC_API_KEY="your_anthropic_api_key_here" 9 | # AGNO_API_KEY="your_agno_api_key_here" 10 | 11 | # Docker Image Configuration 12 | # IMAGE_NAME=agent-api 13 | # IMAGE_TAG=latest -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent-api" 3 | version = "0.1.0" 4 | requires-python = ">=3.11" 5 | readme = "README.md" 6 | authors = [{ name = "Agno", email = "hello@agno.com" }] 7 | 8 | dependencies = [ 9 | "agno==1.4.6", 10 | "duckduckgo-search", 11 | "fastapi[standard]", 12 | "openai", 13 | "pgvector", 14 | "psycopg[binary]", 15 | "sqlalchemy", 16 | "yfinance", 17 | ] 18 | 19 | [project.optional-dependencies] 20 | dev = ["mypy", "ruff"] 21 | 22 | [build-system] 23 | requires = ["setuptools"] 24 | build-backend = "setuptools.build_meta" 25 | 26 | [tool.setuptools.packages.find] 27 | 28 | [tool.ruff] 29 | line-length = 120 30 | exclude = [".venv*"] 31 | [tool.ruff.lint.per-file-ignores] 32 | # Ignore `F401` (import violations) in all `__init__.py` files 33 | "__init__.py" = ["F401", "F403"] 34 | 35 | [tool.mypy] 36 | check_untyped_defs = true 37 | no_implicit_optional = true 38 | warn_unused_configs = true 39 | plugins = ["pydantic.mypy"] 40 | exclude = [".venv*"] 41 | 42 | [[tool.mypy.overrides]] 43 | module = ["pgvector.*", "setuptools.*", "nest_asyncio.*", "agno.*"] 44 | ignore_missing_imports = true 45 | 46 | [tool.uv.pip] 47 | no-annotate = true 48 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # ./scripts/generate_requirements.sh upgrade 3 | agno==1.4.6 4 | annotated-types==0.7.0 5 | anyio==4.9.0 6 | beautifulsoup4==4.13.4 7 | certifi==2025.4.26 8 | cffi==1.17.1 9 | charset-normalizer==3.4.2 10 | click==8.2.0 11 | curl-cffi==0.10.0 12 | distro==1.9.0 13 | dnspython==2.7.0 14 | docstring-parser==0.16 15 | duckduckgo-search==8.0.1 16 | email-validator==2.2.0 17 | fastapi==0.115.12 18 | fastapi-cli==0.0.7 19 | frozendict==2.4.6 20 | gitdb==4.0.12 21 | gitpython==3.1.44 22 | h11==0.16.0 23 | httpcore==1.0.9 24 | httptools==0.6.4 25 | httpx==0.28.1 26 | idna==3.10 27 | jinja2==3.1.6 28 | jiter==0.9.0 29 | lxml==5.4.0 30 | markdown-it-py==3.0.0 31 | markupsafe==3.0.2 32 | mdurl==0.1.2 33 | multitasking==0.0.11 34 | numpy==2.2.5 35 | openai==1.78.0 36 | pandas==2.2.3 37 | peewee==3.18.1 38 | pgvector==0.4.1 39 | platformdirs==4.3.8 40 | primp==0.15.0 41 | protobuf==5.29.4 42 | psycopg==3.2.7 43 | psycopg-binary==3.2.7 44 | pycparser==2.22 45 | pydantic==2.11.4 46 | pydantic-core==2.33.2 47 | pydantic-settings==2.9.1 48 | pygments==2.19.1 49 | python-dateutil==2.9.0.post0 50 | python-dotenv==1.1.0 51 | python-multipart==0.0.20 52 | pytz==2025.2 53 | pyyaml==6.0.2 54 | requests==2.32.3 55 | rich==14.0.0 56 | rich-toolkit==0.14.5 57 | shellingham==1.5.4 58 | six==1.17.0 59 | smmap==5.0.2 60 | sniffio==1.3.1 61 | soupsieve==2.7 62 | sqlalchemy==2.0.40 63 | starlette==0.46.2 64 | tomli==2.2.1 65 | tqdm==4.67.1 66 | typer==0.15.3 67 | typing-extensions==4.13.2 68 | typing-inspection==0.4.0 69 | tzdata==2025.2 70 | urllib3==2.4.0 71 | uvicorn==0.34.2 72 | uvloop==0.21.0 73 | watchfiles==1.0.5 74 | websockets==15.0.1 75 | yfinance==0.2.59 76 | -------------------------------------------------------------------------------- /scripts/_utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ############################################################################ 4 | # Helper functions to import in other scripts 5 | ############################################################################ 6 | 7 | print_horizontal_line() { 8 | echo "------------------------------------------------------------" 9 | } 10 | 11 | print_heading() { 12 | print_horizontal_line 13 | echo "-*- $1" 14 | print_horizontal_line 15 | } 16 | 17 | print_info() { 18 | echo "-*- $1" 19 | } 20 | -------------------------------------------------------------------------------- /scripts/build_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ############################################################################ 4 | # Script to build the Docker image using Docker Buildx. 5 | # 6 | # Instructions: 7 | # 1. Set the IMAGE_NAME and IMAGE_TAG variables to the desired values. 8 | # 2. Ensure Docker Buildx is installed and configured. 9 | # 3. Run 'docker buildx create --use' before executing this script. 10 | # 11 | # This script builds a multi-platform Docker image for linux/amd64 and linux/arm64. 12 | # The image is tagged and pushed to the specified repository. 13 | ############################################################################ 14 | 15 | # Exit immediately if a command exits with a non-zero status. 16 | set -e 17 | 18 | CURR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 19 | WS_ROOT="$(dirname ${CURR_DIR})" 20 | DOCKER_FILE="Dockerfile" 21 | IMAGE_NAME="agent-api" 22 | IMAGE_TAG="latest" 23 | 24 | echo "Running: docker buildx build --platform=linux/amd64,linux/arm64 -t $IMAGE_NAME:$IMAGE_TAG -f $DOCKER_FILE $WS_ROOT --push" 25 | docker buildx build --platform=linux/amd64,linux/arm64 -t $IMAGE_NAME:$IMAGE_TAG -f $DOCKER_FILE $WS_ROOT --push 26 | -------------------------------------------------------------------------------- /scripts/dev_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ############################################################################ 4 | # Development Setup 5 | # - This script creates a virtual environment and installs libraries in editable mode. 6 | # - Please install uv before running this script. 7 | # - Please deactivate the existing virtual environment before running. 8 | # Usage: ./scripts/dev_setup.sh 9 | ############################################################################ 10 | 11 | CURR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 12 | REPO_ROOT="$(dirname $CURR_DIR)" 13 | VENV_DIR="${REPO_ROOT}/.venv" 14 | source ${CURR_DIR}/_utils.sh 15 | 16 | print_heading "Development setup..." 17 | 18 | print_heading "Removing virtual env" 19 | print_info "rm -rf ${VENV_DIR}" 20 | rm -rf ${VENV_DIR} 21 | 22 | print_heading "Creating virtual env" 23 | print_info "VIRTUAL_ENV=${VENV_DIR} uv venv --python 3.12" 24 | VIRTUAL_ENV=${VENV_DIR} uv venv --python 3.12 25 | 26 | print_heading "Installing requirements" 27 | print_info "VIRTUAL_ENV=${VENV_DIR} uv pip install -r ${REPO_ROOT}/requirements.txt" 28 | VIRTUAL_ENV=${VENV_DIR} uv pip install -r ${REPO_ROOT}/requirements.txt 29 | 30 | print_heading "Installing workspace in editable mode with dev dependencies" 31 | print_info "VIRTUAL_ENV=${VENV_DIR} uv pip install -e ${REPO_ROOT}[dev]" 32 | VIRTUAL_ENV=${VENV_DIR} uv pip install -e ${REPO_ROOT}[dev] 33 | 34 | print_heading "Development setup complete" 35 | print_heading "Activate venv using: source .venv/bin/activate" 36 | -------------------------------------------------------------------------------- /scripts/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ############################################################################ 4 | # Container Entrypoint script 5 | ############################################################################ 6 | 7 | if [[ "$PRINT_ENV_ON_LOAD" = true || "$PRINT_ENV_ON_LOAD" = True ]]; then 8 | echo "==================================================" 9 | printenv 10 | echo "==================================================" 11 | fi 12 | 13 | if [[ "$WAIT_FOR_DB" = true || "$WAIT_FOR_DB" = True ]]; then 14 | dockerize \ 15 | -wait tcp://$DB_HOST:$DB_PORT \ 16 | -timeout 300s 17 | fi 18 | 19 | ############################################################################ 20 | # Start App 21 | ############################################################################ 22 | 23 | case "$1" in 24 | chill) 25 | ;; 26 | *) 27 | echo "Running: $@" 28 | exec "$@" 29 | ;; 30 | esac 31 | 32 | echo ">>> Hello World!" 33 | while true; do sleep 18000; done 34 | -------------------------------------------------------------------------------- /scripts/format.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ############################################################################ 4 | # Format the workspace using ruff 5 | # Usage: ./scripts/format.sh 6 | ############################################################################ 7 | 8 | CURR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 9 | REPO_ROOT="$(dirname $CURR_DIR)" 10 | source ${CURR_DIR}/_utils.sh 11 | 12 | print_heading "Formatting workspace..." 13 | 14 | print_heading "Running: ruff format ${REPO_ROOT}" 15 | ruff format ${REPO_ROOT} 16 | 17 | print_heading "Running: ruff check --select I --fix ${REPO_ROOT}" 18 | ruff check --select I --fix ${REPO_ROOT} 19 | -------------------------------------------------------------------------------- /scripts/generate_requirements.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ############################################################################ 4 | # Generate requirements.txt from pyproject.toml 5 | # Usage: 6 | # ./scripts/generate_requirements.sh: Generate requirements.txt 7 | # ./scripts/generate_requirements.sh upgrade: Upgrade requirements.txt 8 | ############################################################################ 9 | 10 | CURR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 11 | REPO_ROOT="$(dirname $CURR_DIR)" 12 | source ${CURR_DIR}/_utils.sh 13 | 14 | print_heading "Generating requirements.txt..." 15 | 16 | if [[ "$#" -eq 1 ]] && [[ "$1" = "upgrade" ]]; 17 | then 18 | print_heading "Generating requirements.txt with upgrade" 19 | UV_CUSTOM_COMPILE_COMMAND="./scripts/generate_requirements.sh upgrade" \ 20 | uv pip compile ${REPO_ROOT}/pyproject.toml --no-cache --upgrade -o ${REPO_ROOT}/requirements.txt 21 | else 22 | print_heading "Generating requirements.txt" 23 | UV_CUSTOM_COMPILE_COMMAND="./scripts/generate_requirements.sh" \ 24 | uv pip compile ${REPO_ROOT}/pyproject.toml --no-cache -o ${REPO_ROOT}/requirements.txt 25 | fi 26 | -------------------------------------------------------------------------------- /scripts/validate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ############################################################################ 4 | # Validate workspace using ruff and mypy 5 | # 1. Lint using ruff 6 | # 2. Type check using mypy 7 | # Usage: ./scripts/validate.sh 8 | ############################################################################ 9 | 10 | CURR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 11 | REPO_ROOT="$(dirname $CURR_DIR)" 12 | source ${CURR_DIR}/_utils.sh 13 | 14 | print_heading "Validating workspace..." 15 | 16 | print_heading "Running: ruff check ${REPO_ROOT}" 17 | ruff check ${REPO_ROOT} 18 | 19 | print_heading "Running: mypy ${REPO_ROOT} --config-file ${REPO_ROOT}/pyproject.toml" 20 | mypy ${REPO_ROOT} --config-file ${REPO_ROOT}/pyproject.toml 21 | --------------------------------------------------------------------------------