├── docs ├── static │ ├── .nojekyll │ └── img │ │ ├── favicon.ico │ │ ├── docusaurus.png │ │ ├── tyler-soap.png │ │ ├── wandb_logo.png │ │ ├── weave_logo.png │ │ ├── tyler_soap_filled.png │ │ ├── docusaurus-social-card.jpg │ │ ├── chat-with-tyler-interface.png │ │ ├── tyler_chat_UI_demo_short.gif │ │ ├── tyler_soap_filled_inverse.png │ │ └── logo.svg ├── blog │ ├── 2021-08-26-welcome │ │ ├── docusaurus-plushie-banner.jpeg │ │ └── index.md │ ├── 2019-05-28-first-blog-post.md │ ├── tags.yml │ ├── 2021-08-01-mdx-blog-post.mdx │ ├── authors.yml │ └── 2019-05-29-long-blog-post.md ├── src │ ├── pages │ │ └── markdown-page.md │ └── components │ │ └── HomepageFeatures │ │ ├── styles.module.css │ │ └── index.tsx ├── docs │ ├── examples │ │ ├── _category_.json │ │ ├── index.md │ │ ├── streaming.md │ │ └── file-storage.md │ ├── api-reference │ │ ├── _category_.json │ │ ├── index.md │ │ └── mcp.md │ ├── category │ │ ├── api-reference.md │ │ └── examples.md │ ├── troubleshooting.md │ ├── installation.md │ ├── chat-with-tyler.md │ ├── intro.md │ ├── tools │ │ ├── web.md │ │ ├── notion.md │ │ ├── audio.md │ │ └── image.md │ └── how-it-works.md ├── tsconfig.json ├── .gitignore ├── README.md ├── package.json ├── database-migrations.md ├── sidebars.ts └── docusaurus.config.js ├── tests ├── mcp │ └── __init__.py ├── __init__.py ├── models │ └── __init__.py ├── tools │ ├── __init__.py │ ├── test_command_line.py │ └── test_web.py ├── utils │ ├── __init__.py │ └── test_agent_runner.py ├── database │ └── __init__.py ├── test_imports.py ├── conftest.py └── test_examples.py ├── tyler ├── cli │ ├── __init__.py │ └── main.py ├── models │ └── __init__.py ├── utils │ ├── __init__.py │ ├── logging.py │ ├── files.py │ └── registry.py ├── mcp │ ├── __init__.py │ ├── utils.py │ └── server_manager.py ├── storage │ └── __init__.py ├── database │ ├── __init__.py │ ├── migrations │ │ ├── script.py.mako │ │ ├── versions │ │ │ ├── 20250206_0506_197750e12030_add_reactions.py │ │ │ ├── 20250207_0101_197750e12031_add_platforms.py │ │ │ ├── 20250206_0505_197750e12029_initial.py │ │ │ └── 20250208_0000_197750e12032_migrate_json_to_jsonb.py │ │ ├── alembic.ini │ │ └── env.py │ ├── config.py │ └── models.py ├── __init__.py └── tools │ └── __init__.py ├── package.json ├── examples ├── assets │ └── sample_pdf.pdf ├── basic.py ├── streaming.py ├── selective_tools.py ├── explicit_stores.py ├── reactions_example.py ├── agent_delegation.py ├── mcp_basic.py ├── tools_basic.py ├── tools_streaming.py └── attachments.py ├── requirements-dev.txt ├── MANIFEST.in ├── .coveragerc ├── requirements-dev.in ├── pytest.ini ├── docker-compose.yml ├── tyler-chat-config.yaml ├── LICENSE ├── .env.example ├── .github └── workflows │ ├── pytest.yml │ ├── publish.yml │ └── deploy-docs.yml ├── .gitignore ├── init_tyler_project.sh ├── scripts ├── release.sh └── bump_version.py ├── requirements.txt └── pyproject.toml /docs/static/.nojekyll: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/mcp/__init__.py: -------------------------------------------------------------------------------- 1 | """MCP service tests.""" -------------------------------------------------------------------------------- /tyler/cli/__init__.py: -------------------------------------------------------------------------------- 1 | """CLI package for Tyler""" -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # Empty file to mark directory as Python package -------------------------------------------------------------------------------- /tyler/models/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Models package initialization. 3 | """ -------------------------------------------------------------------------------- /tyler/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Empty file to mark directory as Python package -------------------------------------------------------------------------------- /tests/models/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test models package initialization. 3 | """ -------------------------------------------------------------------------------- /tests/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test tools package initialization. 3 | """ -------------------------------------------------------------------------------- /tests/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test utils package initialization. 3 | """ -------------------------------------------------------------------------------- /tests/database/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test database package initialization. 3 | """ -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "@docsearch/react": "^3.8.3" 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /docs/static/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adamwdraper/tyler/HEAD/docs/static/img/favicon.ico -------------------------------------------------------------------------------- /docs/static/img/docusaurus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adamwdraper/tyler/HEAD/docs/static/img/docusaurus.png -------------------------------------------------------------------------------- /docs/static/img/tyler-soap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adamwdraper/tyler/HEAD/docs/static/img/tyler-soap.png -------------------------------------------------------------------------------- /docs/static/img/wandb_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adamwdraper/tyler/HEAD/docs/static/img/wandb_logo.png -------------------------------------------------------------------------------- /docs/static/img/weave_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adamwdraper/tyler/HEAD/docs/static/img/weave_logo.png -------------------------------------------------------------------------------- /examples/assets/sample_pdf.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adamwdraper/tyler/HEAD/examples/assets/sample_pdf.pdf -------------------------------------------------------------------------------- /docs/static/img/tyler_soap_filled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adamwdraper/tyler/HEAD/docs/static/img/tyler_soap_filled.png -------------------------------------------------------------------------------- /docs/static/img/docusaurus-social-card.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adamwdraper/tyler/HEAD/docs/static/img/docusaurus-social-card.jpg -------------------------------------------------------------------------------- /docs/static/img/chat-with-tyler-interface.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adamwdraper/tyler/HEAD/docs/static/img/chat-with-tyler-interface.png -------------------------------------------------------------------------------- /docs/static/img/tyler_chat_UI_demo_short.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adamwdraper/tyler/HEAD/docs/static/img/tyler_chat_UI_demo_short.gif -------------------------------------------------------------------------------- /docs/static/img/tyler_soap_filled_inverse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adamwdraper/tyler/HEAD/docs/static/img/tyler_soap_filled_inverse.png -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | pip-tools>=7.4.1 2 | pipdeptree>=2.25.0 3 | pytest>=8.3.4 4 | pytest-asyncio>=0.25.2 5 | pytest-cov>=6.0.0 6 | coverage>=7.6.10 -------------------------------------------------------------------------------- /docs/blog/2021-08-26-welcome/docusaurus-plushie-banner.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adamwdraper/tyler/HEAD/docs/blog/2021-08-26-welcome/docusaurus-plushie-banner.jpeg -------------------------------------------------------------------------------- /docs/src/pages/markdown-page.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Markdown page example 3 | --- 4 | 5 | # Markdown page example 6 | 7 | You don't need React to write simple standalone pages. 8 | -------------------------------------------------------------------------------- /tyler/mcp/__init__.py: -------------------------------------------------------------------------------- 1 | """MCP (Model Context Protocol) integration for Tyler.""" 2 | 3 | from .service import MCPService 4 | from .server_manager import MCPServerManager 5 | 6 | __all__ = ["MCPService", "MCPServerManager"] -------------------------------------------------------------------------------- /docs/docs/examples/_category_.json: -------------------------------------------------------------------------------- 1 | { 2 | "label": "Examples", 3 | "position": 5, 4 | "link": { 5 | "type": "generated-index", 6 | "description": "Practical examples demonstrating Tyler's features and capabilities" 7 | } 8 | } -------------------------------------------------------------------------------- /docs/docs/api-reference/_category_.json: -------------------------------------------------------------------------------- 1 | { 2 | "label": "API Reference", 3 | "position": 6, 4 | "link": { 5 | "type": "generated-index", 6 | "description": "Detailed API documentation for Tyler's core components and features" 7 | } 8 | } -------------------------------------------------------------------------------- /docs/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | // This file is not used in compilation. It is here just for a nice editor experience. 3 | "extends": "@docusaurus/tsconfig", 4 | "compilerOptions": { 5 | "baseUrl": "." 6 | }, 7 | "exclude": [".docusaurus", "build"] 8 | } 9 | -------------------------------------------------------------------------------- /tyler/storage/__init__.py: -------------------------------------------------------------------------------- 1 | """File storage module for Tyler""" 2 | import os 3 | from typing import Optional, Set 4 | from .file_store import FileStore 5 | import logging 6 | 7 | # Get logger 8 | logger = logging.getLogger(__name__) 9 | 10 | # Export FileStore 11 | __all__ = ['FileStore'] -------------------------------------------------------------------------------- /tyler/database/__init__.py: -------------------------------------------------------------------------------- 1 | """Database module for Tyler.""" 2 | 3 | # Import only what's needed at package level 4 | from . import models 5 | from . import config 6 | 7 | # These are imported when needed 8 | # from . import cli 9 | # from . import thread_store 10 | # from . import memory_store -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.md 3 | include requirements.txt 4 | include requirements-dev.txt 5 | include pytest.ini 6 | include .coveragerc 7 | 8 | recursive-include tyler *.py 9 | recursive-include tyler/database/migrations * 10 | recursive-include tyler/tools * 11 | recursive-include tests *.py -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | source = . 3 | omit = 4 | tests/* 5 | */__init__.py 6 | setup.py 7 | venv/* 8 | env/* 9 | .env/* 10 | 11 | [report] 12 | exclude_lines = 13 | pragma: no cover 14 | def __repr__ 15 | raise NotImplementedError 16 | if __name__ == .__main__.: 17 | pass 18 | raise ImportError -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | /node_modules 3 | 4 | # Production 5 | /build 6 | 7 | # Generated files 8 | .docusaurus 9 | .cache-loader 10 | 11 | # Misc 12 | .DS_Store 13 | .env.local 14 | .env.development.local 15 | .env.test.local 16 | .env.production.local 17 | 18 | npm-debug.log* 19 | yarn-debug.log* 20 | yarn-error.log* 21 | -------------------------------------------------------------------------------- /docs/blog/2019-05-28-first-blog-post.md: -------------------------------------------------------------------------------- 1 | --- 2 | slug: first-blog-post 3 | title: First Blog Post 4 | authors: [slorber, yangshun] 5 | tags: [hola, docusaurus] 6 | --- 7 | 8 | Lorem ipsum dolor sit amet... 9 | 10 | 11 | 12 | ...consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 13 | -------------------------------------------------------------------------------- /requirements-dev.in: -------------------------------------------------------------------------------- 1 | # Install the package in editable mode 2 | -e . 3 | 4 | # Testing 5 | pytest>=8.3.4 6 | pytest-asyncio>=0.25.2 7 | pytest-cov>=6.0.0 8 | coverage>=7.6.10 9 | 10 | # Development tools 11 | pip-tools>=7.4.1 12 | pipdeptree>=2.25.0 13 | pre-commit>=3.6.2 14 | black>=24.3.0 15 | ruff>=0.3.3 16 | mypy>=1.9.0 17 | 18 | # Documentation 19 | mkdocs>=1.5.3 20 | mkdocs-material>=9.5.13 -------------------------------------------------------------------------------- /docs/blog/tags.yml: -------------------------------------------------------------------------------- 1 | facebook: 2 | label: Facebook 3 | permalink: /facebook 4 | description: Facebook tag description 5 | 6 | hello: 7 | label: Hello 8 | permalink: /hello 9 | description: Hello tag description 10 | 11 | docusaurus: 12 | label: Docusaurus 13 | permalink: /docusaurus 14 | description: Docusaurus tag description 15 | 16 | hola: 17 | label: Hola 18 | permalink: /hola 19 | description: Hola tag description 20 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = 3 | --cov=. 4 | --cov-report=term-missing 5 | --cov-branch 6 | --cov-report=term 7 | --no-cov-on-fail 8 | testpaths = tests 9 | python_files = test_*.py 10 | python_classes = Test 11 | python_functions = test_* 12 | asyncio_mode = auto 13 | markers = 14 | asyncio: mark a test as an async test 15 | examples: mark a test as an example integration test 16 | integration: mark a test as an integration test -------------------------------------------------------------------------------- /docs/docs/category/api-reference.md: -------------------------------------------------------------------------------- 1 | # API Reference 2 | 3 | This section provides detailed documentation for Tyler's core APIs. Here you'll find comprehensive information about the main classes and interfaces that make up the development kit. 4 | 5 | ## Core APIs 6 | 7 | - [Agent](../api-reference/agent.md) - The main Agent class documentation 8 | - [Thread](../api-reference/thread.md) - Documentation for the Thread class 9 | - [Message](../api-reference/message.md) - Documentation for the Message class -------------------------------------------------------------------------------- /tyler/__init__.py: -------------------------------------------------------------------------------- 1 | """Tyler - A development kit for AI agents with a complete lack of conventional limitations""" 2 | 3 | __version__ = "1.1.0" 4 | 5 | from tyler.utils.logging import get_logger 6 | from tyler.models.agent import Agent, StreamUpdate 7 | from tyler.models.thread import Thread 8 | from tyler.models.message import Message 9 | from tyler.database.thread_store import ThreadStore 10 | from tyler.storage.file_store import FileStore 11 | from tyler.utils.registry import Registry 12 | from tyler.models.attachment import Attachment 13 | 14 | # Configure logging when package is imported 15 | logger = get_logger(__name__) -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | postgres: 5 | image: postgres:16 6 | container_name: tyler-postgres 7 | environment: 8 | POSTGRES_DB: ${TYLER_DB_NAME:-tyler} 9 | POSTGRES_USER: ${TYLER_DB_USER:-tyler} 10 | POSTGRES_PASSWORD: ${TYLER_DB_PASSWORD:-tyler_dev} 11 | ports: 12 | - "${TYLER_DB_PORT:-5433}:5432" 13 | volumes: 14 | - postgres_data:/var/lib/postgresql/data 15 | healthcheck: 16 | test: ["CMD-SHELL", "pg_isready -U ${TYLER_DB_USER:-tyler}"] 17 | interval: 5s 18 | timeout: 5s 19 | retries: 5 20 | 21 | volumes: 22 | postgres_data: -------------------------------------------------------------------------------- /tyler/database/migrations/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision | comma,n} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | ${imports if imports else ""} 11 | 12 | # revision identifiers, used by Alembic 13 | revision = ${repr(up_revision)} 14 | down_revision = ${repr(down_revision)} 15 | branch_labels = ${repr(branch_labels)} 16 | depends_on = ${repr(depends_on)} 17 | 18 | 19 | def upgrade() -> None: 20 | ${upgrades if upgrades else "pass"} 21 | 22 | 23 | def downgrade() -> None: 24 | ${downgrades if downgrades else "pass"} -------------------------------------------------------------------------------- /docs/blog/2021-08-01-mdx-blog-post.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | slug: mdx-blog-post 3 | title: MDX Blog Post 4 | authors: [slorber] 5 | tags: [docusaurus] 6 | --- 7 | 8 | Blog posts support [Docusaurus Markdown features](https://docusaurus.io/docs/markdown-features), such as [MDX](https://mdxjs.com/). 9 | 10 | :::tip 11 | 12 | Use the power of React to create interactive blog posts. 13 | 14 | ::: 15 | 16 | {/* truncate */} 17 | 18 | For example, use JSX to create an interactive button: 19 | 20 | ```js 21 | alert('button clicked!')}>Click me! 22 | ``` 23 | 24 | alert('button clicked!')}>Click me! 25 | -------------------------------------------------------------------------------- /tyler-chat-config.yaml: -------------------------------------------------------------------------------- 1 | # Tyler Agent Configuration 2 | 3 | # Agent Identity 4 | name: "Tyler" 5 | purpose: "To be a helpful AI assistant with access to various tools and capabilities." 6 | notes: | 7 | - Prefer clear, concise communication 8 | - Use tools when appropriate to enhance responses 9 | - Maintain context across conversations 10 | 11 | # Model Configuration 12 | model_name: "gpt-4.1" 13 | temperature: 0.7 14 | max_tool_iterations: 10 15 | 16 | # Tool Configuration 17 | tools: 18 | # Built-in tool modules (as strings) 19 | - "web" 20 | - "slack" 21 | - "notion" 22 | - "command_line" 23 | # Custom tools (as paths to Python files) 24 | - "./examples/custom_tools.py" -------------------------------------------------------------------------------- /tyler/cli/main.py: -------------------------------------------------------------------------------- 1 | """Main CLI for Tyler""" 2 | import click 3 | from tyler.database.cli import cli as db_cli 4 | 5 | @click.group() 6 | def cli(): 7 | """Tyler CLI - Main command-line interface for Tyler.""" 8 | pass 9 | 10 | # Add database commands as a subcommand group 11 | cli.add_command(db_cli, name="db") 12 | 13 | # Import other CLI modules and add their commands 14 | try: 15 | from tyler.cli.chat import cli as chat_cli 16 | cli.add_command(chat_cli, name="chat") 17 | except ImportError: 18 | # Chat CLI might not be available, continue without it 19 | pass 20 | 21 | def main(): 22 | """Entry point for the CLI""" 23 | cli() 24 | 25 | if __name__ == "__main__": 26 | main() -------------------------------------------------------------------------------- /docs/blog/authors.yml: -------------------------------------------------------------------------------- 1 | yangshun: 2 | name: Yangshun Tay 3 | title: Front End Engineer @ Facebook 4 | url: https://github.com/yangshun 5 | image_url: https://github.com/yangshun.png 6 | page: true 7 | socials: 8 | x: yangshunz 9 | github: yangshun 10 | 11 | slorber: 12 | name: Sébastien Lorber 13 | title: Docusaurus maintainer 14 | url: https://sebastienlorber.com 15 | image_url: https://github.com/slorber.png 16 | page: 17 | # customize the url of the author page at /blog/authors/ 18 | permalink: '/all-sebastien-lorber-articles' 19 | socials: 20 | x: sebastienlorber 21 | linkedin: sebastienlorber 22 | github: slorber 23 | newsletter: https://thisweekinreact.com 24 | -------------------------------------------------------------------------------- /docs/docs/category/examples.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | This section contains various examples of how to use Tyler in different scenarios. Each example demonstrates specific features and use cases of the development kit. 4 | 5 | ## Available Examples 6 | 7 | - [Using Tools](../examples/using-tools.md) - Learn how to use tools with your Tyler agent 8 | - [Full Configuration](../examples/full-configuration.md) - See a complete configuration example 9 | - [Database Storage](../examples/database-storage.md) - Learn how to use database storage for your agent 10 | - [Interrupt Tools](../examples/interrupt-tools.md) - Understand how to handle tool interruptions 11 | - [Message Attachments](../examples/message-attachments.md) - Learn how to work with message attachments -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0) 2 | 3 | Copyright (c) 2024 Adam Draper 4 | 5 | This work is licensed under the Creative Commons Attribution-NonCommercial 4.0 International License. 6 | To view a copy of this license, visit: http://creativecommons.org/licenses/by-nc/4.0/ 7 | 8 | You are free to: 9 | * Share — copy and redistribute the material in any medium or format 10 | * Adapt — remix, transform, and build upon the material 11 | 12 | Under the following terms: 13 | * Attribution — You must give appropriate credit, provide a link to the license, and indicate if changes were made. 14 | * NonCommercial — You may not use the material for commercial purposes. 15 | 16 | For commercial use, please contact the author. -------------------------------------------------------------------------------- /docs/src/components/HomepageFeatures/styles.module.css: -------------------------------------------------------------------------------- 1 | .features { 2 | display: flex; 3 | align-items: center; 4 | justify-content: center; 5 | padding: 2rem 0; 6 | width: 100%; 7 | } 8 | 9 | .featuresContainer { 10 | max-width: 1200px; 11 | margin: 0 auto; 12 | display: flex; 13 | justify-content: center; 14 | } 15 | 16 | .featureItem { 17 | margin-top: 2rem; 18 | text-align: center; 19 | max-width: 360px; 20 | margin-left: auto; 21 | margin-right: auto; 22 | } 23 | 24 | .demoSection { 25 | background-color: var(--ifm-color-emphasis-100); 26 | } 27 | 28 | .demoContainer { 29 | max-width: 800px; 30 | margin: 2rem auto; 31 | border-radius: 8px; 32 | overflow: hidden; 33 | box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); 34 | } 35 | 36 | .demoGif { 37 | width: 100%; 38 | height: auto; 39 | display: block; 40 | } 41 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Website 2 | 3 | This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. 4 | 5 | ### Installation 6 | 7 | ``` 8 | $ yarn 9 | ``` 10 | 11 | ### Local Development 12 | 13 | ``` 14 | $ yarn start 15 | ``` 16 | 17 | This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. 18 | 19 | ### Build 20 | 21 | ``` 22 | $ yarn build 23 | ``` 24 | 25 | This command generates static content into the `build` directory and can be served using any static contents hosting service. 26 | 27 | ### Deployment 28 | 29 | Using SSH: 30 | 31 | ``` 32 | $ USE_SSH=true yarn deploy 33 | ``` 34 | 35 | Not using SSH: 36 | 37 | ``` 38 | $ GIT_USER= yarn deploy 39 | ``` 40 | 41 | If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. 42 | -------------------------------------------------------------------------------- /tests/test_imports.py: -------------------------------------------------------------------------------- 1 | """Tests for top-level package imports.""" 2 | 3 | import pytest 4 | 5 | def test_top_level_imports(): 6 | """Verify that core classes can be imported directly from the tyler package.""" 7 | try: 8 | from tyler import ( 9 | Agent, 10 | StreamUpdate, 11 | Thread, 12 | Message, 13 | ThreadStore, 14 | FileStore, 15 | Registry, 16 | Attachment 17 | ) 18 | # If imports succeed, the test passes implicitly 19 | assert True 20 | except ImportError as e: 21 | pytest.fail(f"Failed to import one or more top-level classes: {e}") 22 | 23 | # Example of testing a utility function if needed (optional) 24 | # def test_top_level_utils(): 25 | # try: 26 | # from tyler import get_logger 27 | # assert callable(get_logger) 28 | # except ImportError as e: 29 | # pytest.fail(f"Failed to import get_logger: {e}") -------------------------------------------------------------------------------- /docs/blog/2021-08-26-welcome/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | slug: welcome 3 | title: Welcome 4 | authors: [slorber, yangshun] 5 | tags: [facebook, hello, docusaurus] 6 | --- 7 | 8 | [Docusaurus blogging features](https://docusaurus.io/docs/blog) are powered by the [blog plugin](https://docusaurus.io/docs/api/plugins/@docusaurus/plugin-content-blog). 9 | 10 | Here are a few tips you might find useful. 11 | 12 | 13 | 14 | Simply add Markdown files (or folders) to the `blog` directory. 15 | 16 | Regular blog authors can be added to `authors.yml`. 17 | 18 | The blog post date can be extracted from filenames, such as: 19 | 20 | - `2019-05-30-welcome.md` 21 | - `2019-05-30-welcome/index.md` 22 | 23 | A blog post folder can be convenient to co-locate blog post images: 24 | 25 |  26 | 27 | The blog supports tags as well! 28 | 29 | **And if you don't want a blog**: just delete this directory, and use `blog: false` in your Docusaurus config. 30 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Database Configuration 2 | TYLER_DB_TYPE=postgresql 3 | TYLER_DB_HOST=localhost 4 | TYLER_DB_PORT=5432 5 | TYLER_DB_NAME=tyler 6 | TYLER_DB_USER=tyler 7 | TYLER_DB_PASSWORD=tyler_dev 8 | 9 | # Optional Database Settings 10 | TYLER_DB_ECHO=false 11 | TYLER_DB_POOL_SIZE=5 12 | TYLER_DB_MAX_OVERFLOW=10 13 | TYLER_DB_POOL_TIMEOUT=30 14 | TYLER_DB_POOL_RECYCLE=1800 15 | 16 | # OpenAI Configuration 17 | OPENAI_API_KEY=your_openai_api_key 18 | 19 | # Logging Configuration 20 | WANDB_API_KEY=your_wandb_api_key 21 | 22 | # Optional Integrations 23 | NOTION_TOKEN=your_notion_token 24 | SLACK_BOT_TOKEN=your_slack_bot_token 25 | SLACK_SIGNING_SECRET=your_slack_signing_secret 26 | 27 | # File storage configuration 28 | TYLER_FILE_STORAGE_TYPE=local 29 | TYLER_FILE_STORAGE_PATH=~/.tyler/files # Optional, defaults to ~/.tyler/files 30 | TYLER_MAX_FILE_SIZE=52428800 # Optional, 50MB default 31 | TYLER_MAX_STORAGE_SIZE=5368709120 # Optional, 5GB limit 32 | TYLER_ALLOWED_MIME_TYPES=application/pdf,image/jpeg,image/png # Optional, comma-separated list 33 | 34 | # Other settings 35 | # Valid log levels: DEBUG, INFO, WARNING, ERROR, CRITICAL 36 | LOG_LEVEL=INFO 37 | -------------------------------------------------------------------------------- /tyler/database/migrations/versions/20250206_0506_197750e12030_add_reactions.py: -------------------------------------------------------------------------------- 1 | """Add reactions to messages 2 | 3 | Revision ID: 197750e12030 4 | Revises: 197750e12029 5 | Create Date: 2025-02-06 05:06:00.000000 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '197750e12030' 14 | down_revision = '197750e12029' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # Add reactions column to messages table 21 | with op.batch_alter_table('messages', schema=None) as batch_op: 22 | batch_op.add_column(sa.Column('reactions', sa.JSON(), nullable=True)) 23 | 24 | # SQLite doesn't support adding a JSON column with a default value directly, 25 | # so we update the table after adding the column to set empty objects 26 | if op.get_context().dialect.name == 'sqlite': 27 | op.execute("UPDATE messages SET reactions = '{}'") 28 | 29 | 30 | def downgrade(): 31 | # Remove reactions column from messages table 32 | with op.batch_alter_table('messages', schema=None) as batch_op: 33 | batch_op.drop_column('reactions') -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | name: Python Tests 2 | 3 | on: 4 | pull_request: 5 | branches: [ main ] 6 | push: 7 | branches: [ main ] 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | python-version: ["3.12"] 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - name: Set up Python ${{ matrix.python-version }} 20 | uses: actions/setup-python@v5 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | cache: 'pip' 24 | 25 | - name: Install dependencies 26 | run: | 27 | python -m pip install --upgrade pip 28 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 29 | if [ -f requirements-dev.txt ]; then pip install -r requirements-dev.txt; fi 30 | 31 | - name: Run tests 32 | env: 33 | SLACK_BOT_TOKEN: test-bot-token 34 | SLACK_SIGNING_SECRET: test-signing-secret 35 | OPENAI_API_KEY: test-openai-key 36 | NOTION_TOKEN: test-notion-token 37 | WANDB_API_KEY: test-wandb-key 38 | run: | 39 | PYTHONPATH=. pytest tests/ --cov=. --cov-report=term-missing --cov-branch --cov-report=term --no-cov-on-fail -v -p no:warnings -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import pytest 4 | from unittest.mock import patch, MagicMock 5 | 6 | # Add project root to PYTHONPATH 7 | project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 8 | sys.path.insert(0, project_root) 9 | 10 | @pytest.fixture(autouse=True) 11 | def mock_env_vars(): 12 | """Set environment variables for testing""" 13 | with patch.dict(os.environ, { 14 | 'SLACK_BOT_TOKEN': 'test-bot-token', 15 | 'SLACK_SIGNING_SECRET': 'test-signing-secret', 16 | 'OPENAI_API_KEY': 'test-openai-key', 17 | 'NOTION_TOKEN': 'test-notion-token', 18 | 'WANDB_API_KEY': 'test-wandb-key' 19 | }): 20 | yield 21 | 22 | @pytest.fixture(autouse=True) 23 | def mock_openai(): 24 | """Mock OpenAI/litellm calls for testing""" 25 | with patch('litellm.completion') as mock: 26 | mock.return_value = MagicMock( 27 | choices=[MagicMock(message=MagicMock(content="Test response"))] 28 | ) 29 | yield mock 30 | 31 | @pytest.fixture(autouse=True) 32 | def mock_wandb(): 33 | """Mock wandb calls for testing""" 34 | with patch('wandb.init') as mock_init, \ 35 | patch('wandb.log') as mock_log: 36 | mock_init.return_value = MagicMock(__enter__=MagicMock(), __exit__=MagicMock()) 37 | yield mock_init, mock_log -------------------------------------------------------------------------------- /docs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "docs", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "docusaurus": "docusaurus", 7 | "start": "docusaurus start", 8 | "build": "docusaurus build", 9 | "swizzle": "docusaurus swizzle", 10 | "deploy": "docusaurus deploy", 11 | "clear": "docusaurus clear", 12 | "serve": "docusaurus serve", 13 | "write-translations": "docusaurus write-translations", 14 | "write-heading-ids": "docusaurus write-heading-ids", 15 | "typecheck": "tsc" 16 | }, 17 | "dependencies": { 18 | "@docusaurus/core": "3.7.0", 19 | "@docusaurus/preset-classic": "3.7.0", 20 | "@docusaurus/theme-search-algolia": "^3.7.0", 21 | "@mdx-js/react": "^3.0.0", 22 | "clsx": "^2.0.0", 23 | "prism-react-renderer": "^2.3.0", 24 | "react": "^19.0.0", 25 | "react-dom": "^19.0.0" 26 | }, 27 | "devDependencies": { 28 | "@docusaurus/module-type-aliases": "3.7.0", 29 | "@docusaurus/tsconfig": "3.7.0", 30 | "@docusaurus/types": "3.7.0", 31 | "typescript": "~5.6.2" 32 | }, 33 | "browserslist": { 34 | "production": [ 35 | ">0.5%", 36 | "not dead", 37 | "not op_mini all" 38 | ], 39 | "development": [ 40 | "last 3 chrome version", 41 | "last 3 firefox version", 42 | "last 5 safari version" 43 | ] 44 | }, 45 | "engines": { 46 | "node": ">=18.0" 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /tyler/mcp/utils.py: -------------------------------------------------------------------------------- 1 | """Utility functions for MCP integration.""" 2 | 3 | import logging 4 | from typing import Dict, List, Any, Optional 5 | 6 | from .service import MCPService 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | # Global MCP service instance 11 | _mcp_service: Optional[MCPService] = None 12 | 13 | 14 | def get_mcp_service() -> Optional[MCPService]: 15 | """Get the global MCP service instance. 16 | 17 | Returns: 18 | Optional[MCPService]: The global MCP service instance, or None if not initialized 19 | """ 20 | return _mcp_service 21 | 22 | 23 | async def initialize_mcp_service(server_configs: List[Dict[str, Any]]) -> MCPService: 24 | """Initialize the global MCP service with the provided server configurations. 25 | 26 | Args: 27 | server_configs: List of server configuration dictionaries 28 | 29 | Returns: 30 | MCPService: The initialized MCP service 31 | """ 32 | global _mcp_service 33 | 34 | if _mcp_service is not None: 35 | logger.warning("MCP service already initialized, reinitializing") 36 | await _mcp_service.cleanup() 37 | 38 | _mcp_service = MCPService() 39 | await _mcp_service.cleanup() 40 | 41 | # Initialize the service 42 | await _mcp_service.initialize(server_configs) 43 | 44 | return _mcp_service 45 | 46 | 47 | async def cleanup_mcp_service() -> None: 48 | """Clean up the global MCP service.""" 49 | global _mcp_service 50 | 51 | if _mcp_service is not None: 52 | await _mcp_service.cleanup() 53 | _mcp_service = None -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | build/ 8 | develop-eggs/ 9 | dist/ 10 | downloads/ 11 | eggs/ 12 | .eggs/ 13 | lib/ 14 | lib64/ 15 | parts/ 16 | sdist/ 17 | var/ 18 | wheels/ 19 | *.egg-info/ 20 | .installed.cfg 21 | *.egg 22 | 23 | # Virtual Environment 24 | venv/ 25 | env/ 26 | ENV/ 27 | .env 28 | .venv 29 | env.bak/ 30 | venv.bak/ 31 | 32 | # IDE specific files 33 | .idea/ 34 | .vscode/ 35 | *.swp 36 | *.swo 37 | .project 38 | .pydevproject 39 | .settings 40 | 41 | # Jupyter Notebook 42 | .ipynb_checkpoints 43 | 44 | # Distribution / packaging 45 | .Python 46 | build/ 47 | develop-eggs/ 48 | dist/ 49 | downloads/ 50 | eggs/ 51 | .eggs/ 52 | lib/ 53 | lib64/ 54 | parts/ 55 | sdist/ 56 | var/ 57 | wheels/ 58 | *.egg-info/ 59 | .installed.cfg 60 | *.egg 61 | 62 | # Unit test / coverage reports 63 | htmlcov/ 64 | .tox/ 65 | .coverage 66 | .coverage.* 67 | .cache 68 | nosetests.xml 69 | coverage.xml 70 | *.cover 71 | .hypothesis/ 72 | .pytest_cache/ 73 | 74 | # Secrets and local configuration 75 | .secrets 76 | *.env 77 | config.local.py 78 | 79 | # Logs and databases 80 | *.log 81 | *.sqlite 82 | *.db 83 | 84 | # OS generated files 85 | .DS_Store 86 | .DS_Store? 87 | ._* 88 | .Spotlight-V100 89 | .Trashes 90 | ehthumbs.db 91 | Thumbs.db 92 | 93 | # Database 94 | database/*.db 95 | 96 | # SQLite databases 97 | database/:memory: 98 | 99 | .env 100 | .python-version 101 | 102 | # Python package specific 103 | *.egg-info/ 104 | dist/ 105 | build/ 106 | .eggs/ 107 | 108 | data/ 109 | ideas.txt -------------------------------------------------------------------------------- /init_tyler_project.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Get the directory where this script is located (Tyler source directory) 4 | TYLER_SOURCE_DIR="$(pwd)" 5 | PARENT_DIR="$(dirname "$TYLER_SOURCE_DIR")" 6 | NEW_PROJECT_DIR="$PARENT_DIR/tyler-examples" 7 | 8 | # Create new project directory 9 | echo "Creating new project directory at $NEW_PROJECT_DIR..." 10 | mkdir -p "$NEW_PROJECT_DIR" 11 | cd "$NEW_PROJECT_DIR" 12 | 13 | # Set up Python environment 14 | echo "Setting up Python environment..." 15 | if [ ! -f .python-version ]; then 16 | echo "tyler-examples" > .python-version 17 | pyenv virtualenv 3.12.8 tyler-examples || { echo "Failed to create virtualenv"; exit 1; } 18 | fi 19 | 20 | # Activate the virtual environment 21 | eval "$(pyenv init -)" 22 | eval "$(pyenv virtualenv-init -)" 23 | pyenv activate tyler-examples 24 | 25 | # Install Tyler in development mode 26 | echo "Installing Tyler in development mode..." 27 | pip install -e "$TYLER_SOURCE_DIR" 28 | 29 | # Copy example files directly to root 30 | echo "Copying example files..." 31 | cp "$TYLER_SOURCE_DIR/examples/basic.py" ./ 32 | cp "$TYLER_SOURCE_DIR/examples/database_storage.py" ./ 33 | cp "$TYLER_SOURCE_DIR/examples/memory_storage.py" ./ 34 | 35 | # Create .env file 36 | echo "Creating .env file..." 37 | if [ ! -f .env ]; then 38 | cp "$TYLER_SOURCE_DIR/.env.example" .env 39 | echo "Please edit .env file with your API keys and configuration" 40 | fi 41 | 42 | echo "Project initialization complete!" 43 | echo "Next steps:" 44 | echo "1. Edit .env file with your API keys" 45 | echo "2. Run examples with:" 46 | echo " python basic.py" 47 | echo " python memory_storage.py" 48 | echo " python database_storage.py" -------------------------------------------------------------------------------- /tyler/database/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Dict, Any 3 | from urllib.parse import quote_plus 4 | 5 | def get_database_url() -> str: 6 | """ 7 | Get the database URL from environment variables or return default PostgreSQL URL. 8 | """ 9 | db_type = os.getenv("TYLER_DB_TYPE", "postgresql") 10 | 11 | if db_type == "postgresql": 12 | host = os.getenv("TYLER_DB_HOST", "localhost") 13 | port = os.getenv("TYLER_DB_PORT", "5432") 14 | database = os.getenv("TYLER_DB_NAME", "tyler") 15 | user = os.getenv("TYLER_DB_USER", "tyler") 16 | password = os.getenv("TYLER_DB_PASSWORD", "tyler_dev") 17 | 18 | return f"postgresql://{user}:{quote_plus(password)}@{host}:{port}/{database}" 19 | 20 | elif db_type == "sqlite": 21 | # Fallback to SQLite for testing or simple deployments 22 | data_dir = os.path.expanduser("~/.tyler/data") 23 | os.makedirs(data_dir, exist_ok=True) 24 | return f"sqlite:///{data_dir}/tyler.db" 25 | 26 | else: 27 | raise ValueError(f"Unsupported database type: {db_type}") 28 | 29 | def get_database_config() -> Dict[str, Any]: 30 | """ 31 | Get the SQLAlchemy configuration dictionary. 32 | """ 33 | return { 34 | "url": get_database_url(), 35 | "echo": os.getenv("TYLER_DB_ECHO", "false").lower() == "true", 36 | "pool_size": int(os.getenv("TYLER_DB_POOL_SIZE", "5")), 37 | "max_overflow": int(os.getenv("TYLER_DB_MAX_OVERFLOW", "10")), 38 | "pool_timeout": int(os.getenv("TYLER_DB_POOL_TIMEOUT", "30")), 39 | "pool_recycle": int(os.getenv("TYLER_DB_POOL_RECYCLE", "1800")), 40 | } -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to PyPI 2 | 3 | on: 4 | pull_request: 5 | types: [closed] 6 | branches: 7 | - main 8 | 9 | permissions: 10 | contents: write 11 | pull-requests: write 12 | 13 | jobs: 14 | publish: 15 | # Only run if PR was merged and had the 'release' label 16 | if: github.event.pull_request.merged == true && contains(github.event.pull_request.labels.*.name, 'release') 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | with: 21 | fetch-depth: 0 22 | token: ${{ secrets.GITHUB_TOKEN }} 23 | 24 | - name: Set up Python 25 | uses: actions/setup-python@v5 26 | with: 27 | python-version: '3.12' 28 | 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install hatch 33 | 34 | - name: Extract version from PR branch 35 | run: | 36 | BRANCH_NAME="${{ github.event.pull_request.head.ref }}" 37 | VERSION=${BRANCH_NAME#release/v} 38 | echo "VERSION=$VERSION" >> $GITHUB_ENV 39 | 40 | - name: Create and push tag 41 | run: | 42 | git config --local user.email "github-actions[bot]@users.noreply.github.com" 43 | git config --local user.name "github-actions[bot]" 44 | git tag -a "v$VERSION" -m "Release version $VERSION" 45 | git push origin "v$VERSION" 46 | 47 | - name: Build package 48 | run: hatch build 49 | 50 | - name: Publish to PyPI 51 | env: 52 | HATCH_INDEX_USER: __token__ 53 | HATCH_INDEX_AUTH: ${{ secrets.PYPI_API_TOKEN }} 54 | run: hatch publish -------------------------------------------------------------------------------- /tyler/database/migrations/versions/20250207_0101_197750e12031_add_platforms.py: -------------------------------------------------------------------------------- 1 | """add platforms 2 | 3 | Revision ID: 197750e12031 4 | Revises: 197750e12030 5 | Create Date: 2024-05-07 01:01:00.000000 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '197750e12031' 14 | down_revision = '197750e12030' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | """ 21 | Upgrade database schema to add platforms field to messages and threads. 22 | """ 23 | # Add platforms column to messages table 24 | op.add_column('messages', sa.Column('platforms', sa.JSON(), nullable=True)) 25 | 26 | # Add platforms column to threads table 27 | op.add_column('threads', sa.Column('platforms', sa.JSON(), nullable=True)) 28 | 29 | # Copy data from source to platforms for threads (using SQL for efficiency) 30 | op.execute(""" 31 | UPDATE threads 32 | SET platforms = source 33 | WHERE source IS NOT NULL 34 | """) 35 | 36 | # Drop the source column from threads 37 | op.drop_column('threads', 'source') 38 | 39 | 40 | def downgrade(): 41 | """ 42 | Downgrade database schema to revert changes. 43 | """ 44 | # Add source column back to threads 45 | op.add_column('threads', sa.Column('source', sa.JSON(), nullable=True)) 46 | 47 | # Copy data from platforms to source (using SQL for efficiency) 48 | op.execute(""" 49 | UPDATE threads 50 | SET source = platforms 51 | WHERE platforms IS NOT NULL 52 | """) 53 | 54 | # Drop the platforms column from threads 55 | op.drop_column('threads', 'platforms') 56 | 57 | # Drop platforms column from messages 58 | op.drop_column('messages', 'platforms') -------------------------------------------------------------------------------- /scripts/release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if gh CLI is installed 5 | if ! command -v gh &> /dev/null; then 6 | echo "GitHub CLI (gh) is not installed. Please install it first:" 7 | echo " brew install gh # on macOS" 8 | echo " gh auth login # to authenticate" 9 | exit 1 10 | fi 11 | 12 | # Check if version type is provided 13 | VERSION_TYPE=${1:-patch} 14 | if [[ ! "$VERSION_TYPE" =~ ^(major|minor|patch)$ ]]; then 15 | echo "Version type must be one of: major, minor, patch" 16 | exit 1 17 | fi 18 | 19 | # Ensure we're starting from an up-to-date main 20 | git checkout main 21 | git pull origin main 22 | 23 | # Get the new version number without making changes yet 24 | NEW_VERSION=$(python scripts/bump_version.py "$VERSION_TYPE" --dry-run) 25 | if [ $? -ne 0 ]; then 26 | echo "Failed to determine new version number" 27 | exit 1 28 | fi 29 | 30 | # Create and checkout a release branch 31 | BRANCH_NAME="release/v$NEW_VERSION" 32 | git checkout -b "$BRANCH_NAME" 33 | 34 | # Now actually bump the version 35 | NEW_VERSION=$(python scripts/bump_version.py "$VERSION_TYPE") 36 | 37 | # Create git commit 38 | git add pyproject.toml tyler/__init__.py 39 | git commit -m "Bump version to $NEW_VERSION" 40 | 41 | # Push the release branch 42 | git push origin "$BRANCH_NAME" 43 | 44 | # Create PR and add release label 45 | PR_URL=$(gh pr create \ 46 | --title "Release v$NEW_VERSION" \ 47 | --body "Automated release PR for version $NEW_VERSION" \ 48 | --label "release" \ 49 | --base main \ 50 | --head "$BRANCH_NAME") 51 | 52 | echo "✨ Release PR prepared! ✨" 53 | echo "" 54 | echo "Pull Request created at: $PR_URL" 55 | echo "" 56 | echo "The GitHub Actions workflow will automatically:" 57 | echo "- Create the git tag" 58 | echo "- Build the package" 59 | echo "- Publish to PyPI" 60 | echo "" 61 | echo "Please review and merge the PR when ready." -------------------------------------------------------------------------------- /docs/docs/troubleshooting.md: -------------------------------------------------------------------------------- 1 | # Troubleshooting 2 | 3 | Common issues and their solutions when working with Tyler. 4 | 5 | ## Installation issues 6 | 7 | ### Package conflicts 8 | If you encounter package conflicts during installation: 9 | 1. Create a new virtual environment 10 | 2. Install Tyler in the clean environment 11 | 3. Install additional packages one by one 12 | 13 | ### Version mismatch 14 | Make sure you have compatible versions: 15 | - Python 3.9 or higher 16 | - Latest pip version 17 | - Latest Tyler version 18 | 19 | ## Runtime errors 20 | 21 | ### API key errors 22 | - Check if OPENAI_API_KEY is set 23 | - Verify API key is valid 24 | - Check API key permissions 25 | 26 | ### Tool errors 27 | - Ensure required tool dependencies are installed 28 | - Check tool configuration 29 | - Verify tool permissions 30 | 31 | ### Memory issues 32 | - Reduce max_tokens if hitting context limits 33 | - Use streaming for large responses 34 | - Clear conversation history periodically 35 | 36 | ## Performance issues 37 | 38 | ### Slow responses 39 | - Enable response streaming 40 | - Reduce tool timeout values 41 | - Use async where possible 42 | 43 | ### High memory usage 44 | - Limit conversation history 45 | - Use efficient storage backends 46 | - Clean up temporary files 47 | 48 | ## Common error messages 49 | 50 | ### "API key not found" 51 | Set your OpenAI API key: 52 | ```bash 53 | export OPENAI_API_KEY=your-key-here 54 | ``` 55 | 56 | ### "Tool not found" 57 | Add the tool to your configuration: 58 | ```python 59 | agent = Agent( 60 | tools=["web", "file", "your-tool"] 61 | ) 62 | ``` 63 | 64 | ### "Context length exceeded" 65 | Reduce the input size or clear history: 66 | ```python 67 | agent.clear_history() 68 | ``` 69 | 70 | ## Getting help 71 | 72 | If you're still stuck: 73 | 1. Check the [GitHub issues](https://github.com/adamwdraper/tyler/issues) 74 | 2. Join our [Discord community](https://discord.gg/tyler) 75 | 3. Open a new issue with details about your problem -------------------------------------------------------------------------------- /examples/basic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Basic example demonstrating a simple conversation with the agent. 4 | """ 5 | # Load environment variables and configure logging first 6 | from dotenv import load_dotenv 7 | load_dotenv() 8 | 9 | from tyler.utils.logging import get_logger 10 | logger = get_logger(__name__) 11 | 12 | # Now import everything else 13 | import os 14 | import asyncio 15 | import weave 16 | import sys 17 | from tyler import Agent, Thread, Message 18 | 19 | try: 20 | if os.getenv("WANDB_API_KEY"): 21 | weave.init("tyler") 22 | logger.debug("Weave tracing initialized successfully") 23 | except Exception as e: 24 | logger.warning(f"Failed to initialize weave tracing: {e}. Continuing without weave.") 25 | 26 | # Initialize the agent 27 | agent = Agent( 28 | model_name="gpt-4.1", 29 | purpose="To be a helpful assistant.", 30 | temperature=0.7 31 | ) 32 | 33 | async def main(): 34 | # Create a thread 35 | thread = Thread() 36 | 37 | # Example conversation 38 | conversations = [ 39 | "Hello! Can you help me with some tasks?", 40 | "What's your purpose?", 41 | "Thank you, that's all for now." 42 | ] 43 | 44 | for user_input in conversations: 45 | logger.info("User: %s", user_input) 46 | 47 | # Add user message 48 | message = Message( 49 | role="user", 50 | content=user_input 51 | ) 52 | thread.add_message(message) 53 | 54 | # Process the thread 55 | processed_thread, new_messages = await agent.go(thread) 56 | 57 | # Log responses 58 | for message in new_messages: 59 | if message.role == "assistant": 60 | logger.info("Assistant: %s", message.content) 61 | 62 | logger.info("-" * 50) 63 | 64 | if __name__ == "__main__": 65 | try: 66 | asyncio.run(main()) 67 | except KeyboardInterrupt: 68 | logger.warning("Exiting gracefully...") 69 | sys.exit(0) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohappyeyeballs>=2.4.4 2 | aiohttp>=3.11.11 3 | aiosignal>=1.3.2 4 | aiosqlite>=0.21.0 5 | alembic>=1.14.1 6 | annotated-types>=0.7.0 7 | anyio>=4.7.0 8 | asyncpg>=0.30.0 9 | attrs>=24.3.0 10 | backoff>=2.2.1 11 | beautifulsoup4>=4.12.3 12 | blinker>=1.9.0 13 | cachetools>=5.5.0 14 | click>=8.1.8 15 | distro>=1.9.0 16 | docker-pycreds>=0.4.0 17 | emoji>=2.14.0 18 | filelock>=3.16.1 19 | Flask>=3.1.0 20 | frozenlist>=1.5.0 21 | fsspec>=2024.12.0 22 | gitdb>=4.0.11 23 | GitPython>=3.1.43 24 | gql>=3.5.0 25 | graphql-core>=3.2.5 26 | greenlet>=3.1.1 27 | h11>=0.14.0 28 | httpcore>=1.0.7 29 | httpx>=0.27.2 30 | huggingface-hub>=0.27.0 31 | imap-tools>=1.9.0 32 | importlib_metadata>=8.5.0 33 | iniconfig>=2.0.0 34 | itsdangerous>=2.2.0 35 | Jinja2>=3.1.5 36 | jiter>=0.8.2 37 | jsonschema>=4.23.0 38 | jsonschema-specifications>=2024.10.1 39 | litellm>=1.60.2 40 | Mako>=1.3.9 41 | markdown-it-py>=3.0.0 42 | MarkupSafe>=3.0.2 43 | mcp>=1.3.0 44 | mdurl>=0.1.2 45 | multidict>=6.1.0 46 | narwhals>=1.19.1 47 | numpy>=2.2.1 48 | openai>=1.61.0 49 | packaging>=24.2 50 | pandas==2.2.3 51 | pdf2image>=1.17.0 52 | pillow>=11.0.0 53 | platformdirs>=4.3.6 54 | propcache>=0.2.1 55 | protobuf>=5.29.2 56 | psutil>=6.1.1 57 | pyarrow>=18.1.0 58 | pydantic>=2.10.4 59 | pydantic_core>=2.27.2 60 | Pygments>=2.18.0 61 | pypdf>=5.3.0 62 | python-dateutil>=2.9.0.post0 63 | python-dotenv>=1.0.1 64 | python-magic>=0.4.27 65 | pytz>=2024.2 66 | PyYAML>=6.0.2 67 | referencing>=0.35.1 68 | regex>=2024.11.6 69 | requests>=2.32.3 70 | requests-toolbelt>=1.0.0 71 | rich>=13.9.4 72 | rpds-py>=0.22.3 73 | sentry-sdk>=2.19.2 74 | setproctitle>=1.3.4 75 | six>=1.17.0 76 | slack_sdk>=3.34.0 77 | smmap>=5.0.1 78 | sniffio>=1.3.1 79 | soupsieve>=2.6 80 | SQLAlchemy>=2.0.36 81 | starlette>=0.41.3 82 | tenacity>=9.0.0 83 | tiktoken>=0.8.0 84 | tokenizers>=0.21.0 85 | toml>=0.10.2 86 | tornado>=6.4.2 87 | tqdm>=4.67.1 88 | typing_extensions>=4.12.2 89 | tzdata>=2024.2 90 | urllib3>=2.3.0 91 | uuid_utils>=0.10.0 92 | wandb>=0.19.1 93 | weave>=0.51.32 94 | Werkzeug>=3.1.3 95 | yarl>=1.18.3 96 | zipp>=3.21.0 97 | browser-use>=0.1.40 98 | -------------------------------------------------------------------------------- /.github/workflows/deploy-docs.yml: -------------------------------------------------------------------------------- 1 | name: Docusaurus 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | # Allows you to run this workflow manually from the Actions tab 9 | workflow_dispatch: 10 | 11 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages 12 | permissions: 13 | contents: read 14 | pages: write 15 | id-token: write 16 | 17 | # Allow only one concurrent deployment 18 | concurrency: 19 | group: "pages" 20 | cancel-in-progress: true 21 | 22 | jobs: 23 | # Build job 24 | build: 25 | runs-on: ubuntu-latest 26 | steps: 27 | - name: Checkout 28 | uses: actions/checkout@v4 29 | 30 | - name: Set up Node.js 31 | uses: actions/setup-node@v4 32 | with: 33 | node-version: '20' 34 | cache: 'npm' 35 | cache-dependency-path: docs/package-lock.json 36 | 37 | - name: Install dependencies 38 | run: | 39 | cd docs 40 | npm ci 41 | 42 | - name: Build website 43 | run: | 44 | cd docs 45 | npm run build 46 | 47 | - name: Upload build artifacts 48 | uses: actions/upload-artifact@v4 49 | with: 50 | name: docusaurus-build 51 | path: docs/build 52 | retention-days: 1 53 | 54 | # Deploy job (only runs on main branch) 55 | deploy: 56 | needs: build 57 | if: github.ref == 'refs/heads/main' 58 | environment: 59 | name: github-pages 60 | url: ${{ steps.deployment.outputs.page_url }} 61 | runs-on: ubuntu-latest 62 | steps: 63 | - name: Setup Pages 64 | uses: actions/configure-pages@v4 65 | 66 | - name: Download build artifacts 67 | uses: actions/download-artifact@v4 68 | with: 69 | name: docusaurus-build 70 | path: build 71 | 72 | - name: Upload pages artifact 73 | uses: actions/upload-pages-artifact@v3 74 | with: 75 | path: build 76 | 77 | - name: Deploy to GitHub Pages 78 | id: deployment 79 | uses: actions/deploy-pages@v4 -------------------------------------------------------------------------------- /tyler/utils/logging.py: -------------------------------------------------------------------------------- 1 | """Logging configuration for tyler package.""" 2 | import os 3 | import logging 4 | from typing import Optional 5 | 6 | _is_configured = False 7 | 8 | def _ensure_logging_configured(): 9 | """Internal function to configure logging if not already configured.""" 10 | global _is_configured 11 | if _is_configured: 12 | return 13 | 14 | # Get log level from environment and convert to uppercase 15 | log_level_str = os.getenv('LOG_LEVEL', 'INFO').upper() 16 | 17 | # Convert string to logging level constant 18 | try: 19 | log_level = getattr(logging, log_level_str) 20 | except AttributeError: 21 | print(f"Invalid LOG_LEVEL: {log_level_str}. Defaulting to INFO.") 22 | log_level = logging.INFO 23 | 24 | # Configure the root logger with our format 25 | logging.basicConfig( 26 | level=log_level, 27 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', 28 | datefmt='%H:%M:%S', 29 | force=True # Ensure we override any existing configuration 30 | ) 31 | 32 | # Get the root logger and set its level 33 | root_logger = logging.getLogger() 34 | root_logger.setLevel(log_level) 35 | 36 | _is_configured = True 37 | 38 | def get_logger(name: Optional[str] = None) -> logging.Logger: 39 | """Get a configured logger. 40 | 41 | This function ensures logging is configured with the appropriate level from 42 | the LOG_LEVEL environment variable before returning a logger. Configuration 43 | happens automatically the first time this function is called. 44 | 45 | Args: 46 | name: The name for the logger. If None, uses the caller's module name. 47 | 48 | Returns: 49 | A configured logger instance. 50 | 51 | Usage: 52 | # In any file: 53 | from tyler.utils.logging import get_logger 54 | logger = get_logger(__name__) # Automatically configures logging 55 | logger.debug("Debug message") # Will respect LOG_LEVEL from .env 56 | """ 57 | _ensure_logging_configured() 58 | return logging.getLogger(name or '__name__') -------------------------------------------------------------------------------- /examples/streaming.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Example demonstrating streaming updates from the agent. 4 | """ 5 | # Load environment variables and configure logging first 6 | from dotenv import load_dotenv 7 | load_dotenv() 8 | 9 | from tyler.utils.logging import get_logger 10 | logger = get_logger(__name__) 11 | 12 | # Now import everything else 13 | import os 14 | import asyncio 15 | import weave 16 | import sys 17 | from tyler import Agent, Thread, Message, StreamUpdate 18 | 19 | try: 20 | if os.getenv("WANDB_API_KEY"): 21 | weave.init("tyler") 22 | logger.debug("Weave tracing initialized successfully") 23 | except Exception as e: 24 | logger.warning(f"Failed to initialize weave tracing: {e}. Continuing without weave.") 25 | 26 | # Initialize the agent 27 | agent = Agent( 28 | model_name="gpt-4.1", 29 | purpose="To demonstrate streaming updates.", 30 | temperature=0.7 31 | ) 32 | 33 | async def main(): 34 | # Create a thread 35 | thread = Thread() 36 | 37 | # Add a user message 38 | message = Message( 39 | role="user", 40 | content="Write a poem about a brave adventurer." 41 | ) 42 | thread.add_message(message) 43 | 44 | logger.info("User: %s", message.content) 45 | 46 | # Process the thread with streaming 47 | async for update in agent.go_stream(thread): 48 | if update.type == StreamUpdate.Type.CONTENT_CHUNK: 49 | logger.info("Content chunk: %s", update.data) 50 | elif update.type == StreamUpdate.Type.ASSISTANT_MESSAGE: 51 | logger.info("Complete assistant message: %s", update.data.content) 52 | elif update.type == StreamUpdate.Type.TOOL_MESSAGE: 53 | logger.info("Tool message: %s", update.data.content) 54 | elif update.type == StreamUpdate.Type.ERROR: 55 | logger.error("Error: %s", update.data) 56 | elif update.type == StreamUpdate.Type.COMPLETE: 57 | logger.info("Processing complete") 58 | 59 | if __name__ == "__main__": 60 | try: 61 | asyncio.run(main()) 62 | except KeyboardInterrupt: 63 | logger.warning("Exiting gracefully...") 64 | sys.exit(0) -------------------------------------------------------------------------------- /docs/database-migrations.md: -------------------------------------------------------------------------------- 1 | # Database Migrations in Tyler 2 | 3 | This document describes how to manage database migrations in Tyler. 4 | 5 | ## Running Migrations 6 | 7 | When Tyler is updated, you may need to run database migrations to update your database schema. There are several ways to do this depending on how you installed Tyler. 8 | 9 | ### Using the Tyler CLI (Recommended) 10 | 11 | If you installed Tyler through pip, pypi, or uv, you can use the built-in CLI command: 12 | 13 | ```bash 14 | # Upgrade to latest version 15 | tyler db upgrade 16 | 17 | # Check current migration version 18 | tyler db current 19 | 20 | # View migration history 21 | tyler db history 22 | ``` 23 | 24 | ### Using the Direct Database CLI 25 | 26 | Tyler also provides a dedicated database CLI: 27 | 28 | ```bash 29 | tyler-db upgrade 30 | ``` 31 | 32 | ### Using Programmatic API 33 | 34 | You can also run migrations from your Python code: 35 | 36 | ```python 37 | import asyncio 38 | from alembic import command 39 | from tyler.database.cli import get_alembic_config 40 | 41 | async def run_migrations(): 42 | alembic_cfg = get_alembic_config() 43 | command.upgrade(alembic_cfg, "head") 44 | 45 | # Run migrations 46 | asyncio.run(run_migrations()) 47 | ``` 48 | 49 | ## When to Run Migrations 50 | 51 | You should run migrations: 52 | 53 | 1. After updating Tyler to a new version 54 | 2. When instructed to in the release notes 55 | 3. Before using new features that require database schema changes 56 | 57 | ## Common Migration Commands 58 | 59 | | Command | Description | 60 | |---------|-------------| 61 | | `tyler db upgrade` | Update database to latest schema version | 62 | | `tyler db downgrade` | Downgrade database by one version | 63 | | `tyler db current` | Show current database version | 64 | | `tyler db history` | Show migration history | 65 | | `tyler db migrate` | Generate a new migration based on model changes (developer use) | 66 | 67 | ## Troubleshooting 68 | 69 | If you encounter issues running migrations: 70 | 71 | 1. Check your database connection settings 72 | 2. Ensure you're using the latest version of Tyler 73 | 3. Make sure your database user has sufficient permissions 74 | 4. Check the logs for detailed error messages -------------------------------------------------------------------------------- /tyler/database/migrations/alembic.ini: -------------------------------------------------------------------------------- 1 | [alembic] 2 | # path to migration scripts 3 | script_location = tyler/database/migrations 4 | 5 | # template used to generate migration files 6 | file_template = %%(year)d%%(month).2d%%(day).2d_%%(hour).2d%%(minute).2d_%%(rev)s_%%(slug)s 7 | 8 | # timezone to use when rendering the date 9 | # within the migration file as well as the filename. 10 | # string value is passed to dateutil.tz.gettz() 11 | timezone = UTC 12 | 13 | # max length of characters to apply to the 14 | # "slug" field 15 | truncate_slug_length = 40 16 | 17 | # set to 'true' to run the environment during 18 | # the 'revision' command, regardless of autogenerate 19 | revision_environment = false 20 | 21 | # set to 'true' to allow .pyc and .pyo files without 22 | # a source .py file to be detected as revisions in the 23 | # versions/ directory 24 | sourceless = false 25 | 26 | # version location specification; this defaults 27 | # to migrations/versions. When using multiple version 28 | # directories, initial revisions must be specified with --version-path 29 | version_locations = %(here)s/versions 30 | 31 | # the output encoding used when revision files 32 | # are written from script.py.mako 33 | output_encoding = utf-8 34 | 35 | [post_write_hooks] 36 | # post_write_hooks defines scripts or Python functions that are run 37 | # on newly generated revision scripts. See the documentation for further 38 | # detail and examples 39 | 40 | # format using "black" - use the console_scripts runner 41 | hooks = black 42 | black.type = console_scripts 43 | black.entrypoint = black 44 | black.options = -l 79 REVISION_SCRIPT_FILENAME 45 | 46 | # Logging configuration 47 | [loggers] 48 | keys = root,sqlalchemy,alembic 49 | 50 | [handlers] 51 | keys = console 52 | 53 | [formatters] 54 | keys = generic 55 | 56 | [logger_root] 57 | level = WARN 58 | handlers = console 59 | qualname = 60 | 61 | [logger_sqlalchemy] 62 | level = WARN 63 | handlers = 64 | qualname = sqlalchemy.engine 65 | 66 | [logger_alembic] 67 | level = INFO 68 | handlers = 69 | qualname = alembic 70 | 71 | [handler_console] 72 | class = StreamHandler 73 | args = (sys.stderr,) 74 | level = NOTSET 75 | formatter = generic 76 | 77 | [formatter_generic] 78 | format = %(levelname)-5.5s [%(name)s] %(message)s 79 | datefmt = %H:%M:%S -------------------------------------------------------------------------------- /examples/selective_tools.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Example demonstrating the use of selective tools loading from built-in modules. 4 | """ 5 | # Load environment variables and configure logging first 6 | from dotenv import load_dotenv 7 | load_dotenv() 8 | 9 | from tyler.utils.logging import get_logger 10 | logger = get_logger(__name__) 11 | 12 | # Now import everything else 13 | import os 14 | import asyncio 15 | import weave 16 | import sys 17 | from tyler import Agent, Thread, Message 18 | 19 | # Initialize weave for tracing if configured 20 | try: 21 | if os.getenv("WANDB_API_KEY"): 22 | weave.init("tyler") 23 | logger.debug("Weave tracing initialized successfully") 24 | except Exception as e: 25 | logger.warning(f"Failed to initialize weave tracing: {e}. Continuing without weave.") 26 | 27 | # Initialize an agent with selective tools from the notion module 28 | # This agent can only search Notion but can't create/edit pages 29 | agent = Agent( 30 | model_name="gpt-4.1", 31 | purpose="To help with searching Notion without being able to modify anything", 32 | tools=[ 33 | "notion:notion-search,notion-get_page", # Only include search and get_page tools 34 | "web", # Include all web tools 35 | ] 36 | ) 37 | 38 | # For demonstration, let's display what tools were actually loaded 39 | logger.info("Available tools:") 40 | for tool_name in agent._processed_tools: 41 | logger.info(f"- {tool_name['function']['name']}") 42 | 43 | async def main(): 44 | # Create a thread 45 | thread = Thread() 46 | 47 | # Example conversation 48 | user_input = "What tools do you have available to interact with Notion? Can you create a new Notion page?" 49 | logger.info("User: %s", user_input) 50 | 51 | # Add user message 52 | message = Message( 53 | role="user", 54 | content=user_input 55 | ) 56 | thread.add_message(message) 57 | 58 | # Process the thread 59 | processed_thread, new_messages = await agent.go(thread) 60 | 61 | # Log responses 62 | for message in new_messages: 63 | if message.role == "assistant": 64 | logger.info("Assistant: %s", message.content) 65 | 66 | logger.info("-" * 50) 67 | 68 | if __name__ == "__main__": 69 | try: 70 | asyncio.run(main()) 71 | except KeyboardInterrupt: 72 | logger.warning("Exiting gracefully...") 73 | sys.exit(0) -------------------------------------------------------------------------------- /docs/src/components/HomepageFeatures/index.tsx: -------------------------------------------------------------------------------- 1 | import type {ReactNode} from 'react'; 2 | import clsx from 'clsx'; 3 | import Heading from '@theme/Heading'; 4 | import styles from './styles.module.css'; 5 | 6 | type FeatureItem = { 7 | title: string; 8 | description: ReactNode; 9 | }; 10 | 11 | const FeatureList: FeatureItem[] = [ 12 | { 13 | title: 'Multimodal Support', 14 | description: ( 15 | <> 16 | Process and understand images, audio, PDFs, and more out of the box. Built-in support for handling various file types with automatic content extraction. 17 | > 18 | ), 19 | }, 20 | { 21 | title: 'Ready-to-Use Tools', 22 | description: ( 23 | <> 24 | Comprehensive set of built-in tools for common tasks, with easy integration of custom capabilities. Connect with services like Slack and Notion seamlessly. 25 | > 26 | ), 27 | }, 28 | { 29 | title: 'Structured Data Model', 30 | description: ( 31 | <> 32 | Built-in support for threads, messages, and attachments to maintain conversation context. Choose between in-memory, SQLite, or PostgreSQL for persistence. 33 | > 34 | ), 35 | }, 36 | ]; 37 | 38 | function Feature({title, description}: FeatureItem) { 39 | return ( 40 | 41 | 42 | {title} 43 | {description} 44 | 45 | 46 | ); 47 | } 48 | 49 | export default function HomepageFeatures(): ReactNode { 50 | return ( 51 | <> 52 | 53 | 54 | 55 | 56 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | {FeatureList.map((props, idx) => ( 70 | 71 | ))} 72 | 73 | 74 | 75 | > 76 | ); 77 | } 78 | -------------------------------------------------------------------------------- /tests/test_examples.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Test file to run all examples as integration tests. 4 | This ensures that all examples are working correctly. 5 | """ 6 | import os 7 | import sys 8 | import pytest 9 | import importlib.util 10 | import asyncio 11 | from pathlib import Path 12 | 13 | # Get the examples directory path 14 | EXAMPLES_DIR = Path(__file__).parent.parent / "examples" 15 | 16 | # Get all Python files in the examples directory 17 | example_files = [f for f in EXAMPLES_DIR.glob("*.py") if f.is_file() and not f.name.startswith("__")] 18 | 19 | # Skip these examples in automated tests (if any are problematic or require user interaction) 20 | SKIP_EXAMPLES = [] 21 | 22 | 23 | def import_module_from_path(path): 24 | """Import a module from a file path.""" 25 | module_name = path.stem 26 | spec = importlib.util.spec_from_file_location(module_name, path) 27 | module = importlib.util.module_from_spec(spec) 28 | sys.modules[module_name] = module 29 | spec.loader.exec_module(module) 30 | return module 31 | 32 | 33 | def run_example_main(module): 34 | """Run the main function of an example module.""" 35 | if hasattr(module, "main"): 36 | if asyncio.iscoroutinefunction(module.main): 37 | return asyncio.run(module.main()) 38 | else: 39 | return module.main() 40 | return None 41 | 42 | 43 | @pytest.mark.examples # Mark all example tests with 'examples' marker 44 | @pytest.mark.integration # Also mark as integration tests 45 | @pytest.mark.parametrize("example_path", example_files) 46 | def test_example(example_path, monkeypatch): 47 | """Test that an example runs without errors.""" 48 | example_name = example_path.name 49 | 50 | # Skip examples that are in the skip list 51 | if example_name in SKIP_EXAMPLES: 52 | pytest.skip(f"Skipping {example_name} as it's in the skip list") 53 | 54 | # Set up environment for examples 55 | monkeypatch.setattr("sys.argv", [str(example_path)]) 56 | 57 | # Some examples might use input() - mock it to return empty string 58 | monkeypatch.setattr("builtins.input", lambda _: "") 59 | 60 | # Import the example module 61 | try: 62 | module = import_module_from_path(example_path) 63 | 64 | # If the module has a main function, run it 65 | # Otherwise, the import itself is the test 66 | if hasattr(module, "main"): 67 | run_example_main(module) 68 | 69 | # If we got here, the example ran without errors 70 | assert True 71 | except Exception as e: 72 | pytest.fail(f"Example {example_name} failed with error: {str(e)}") -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "tyler-agent" 7 | version = "1.1.0" 8 | description = "Tyler: A development kit for manifesting AI agents with a complete lack of conventional limitations" 9 | readme = "README.md" 10 | requires-python = ">=3.12" 11 | license = {text = "CC BY-NC 4.0"} 12 | authors = [ 13 | {name = "adamwdraper"} 14 | ] 15 | classifiers = [ 16 | "Development Status :: 3 - Alpha", 17 | "Intended Audience :: Developers", 18 | "Programming Language :: Python :: 3", 19 | "Programming Language :: Python :: 3.12", 20 | "Operating System :: OS Independent", 21 | "License :: Free for non-commercial use", 22 | ] 23 | dependencies = [ 24 | "litellm>=1.60.2", 25 | "openai>=1.61.0", 26 | "tiktoken>=0.8.0", 27 | "pdf2image>=1.17.0", 28 | "pandas>=2.2.3", 29 | "pypdf>=5.3.0", 30 | "python-magic>=0.4.0", 31 | "pillow>=11.0.0", 32 | "SQLAlchemy>=2.0.36", 33 | "greenlet>=3.1.1", 34 | "alembic>=1.14.1", 35 | "asyncpg>=0.30.0", 36 | "aiosqlite>=0.21.0", 37 | "psycopg2-binary>=2.9.9", 38 | "aiohttp>=3.11.11", 39 | "httpx>=0.27.2", 40 | "requests>=2.32.3", 41 | "beautifulsoup4>=4.12.0", 42 | "python-dotenv>=1.0.1", 43 | "click>=8.1.8", 44 | "pydantic>=2.10.4", 45 | "backoff>=2.2.1", 46 | "uuid_utils>=0.10.0", 47 | "weave>=0.51.32", 48 | "wandb>=0.19.1", 49 | "slack_sdk>=3.34.0", 50 | "huggingface-hub>=0.27.0", 51 | ] 52 | 53 | [project.optional-dependencies] 54 | dev = [ 55 | "pytest>=8.3.4", 56 | "pytest-asyncio>=0.25.2", 57 | "pytest-cov>=6.0.0", 58 | "coverage>=7.6.10", 59 | "pip-tools>=7.4.1", 60 | "pipdeptree>=2.25.0", 61 | ] 62 | 63 | [project.urls] 64 | Homepage = "https://github.com/adamwdraper/tyler" 65 | Documentation = "https://github.com/adamwdraper/tyler#readme" 66 | Repository = "https://github.com/adamwdraper/tyler" 67 | "Bug Tracker" = "https://github.com/adamwdraper/tyler/issues" 68 | 69 | [project.scripts] 70 | tyler = "tyler.cli.main:main" 71 | tyler-db = "tyler.database.cli:main" 72 | tyler-chat = "tyler.cli.chat:main" 73 | 74 | [tool.hatch.build.targets.wheel] 75 | packages = ["tyler"] 76 | 77 | [tool.hatch.build] 78 | include = [ 79 | "tyler/**/*.py", 80 | "tyler/database/migrations/alembic.ini", 81 | "tyler/database/migrations/script.py.mako", 82 | "tyler/database/migrations/env.py", 83 | "tyler/database/migrations/versions/*.py", 84 | ] 85 | exclude = [ 86 | "**/.env", 87 | "**/*.pyc", 88 | "**/__pycache__", 89 | "**/*.db", 90 | "**/*.sqlite", 91 | "**/data", 92 | "**/.coverage", 93 | "**/htmlcov", 94 | "**/dist", 95 | "**/build", 96 | "**/*.egg-info", 97 | ] -------------------------------------------------------------------------------- /docs/docs/api-reference/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 1 3 | --- 4 | 5 | # API Reference 6 | 7 | This section provides detailed documentation for Tyler's core components and APIs. Each page covers a specific component's interface, methods, and usage patterns. 8 | 9 | ## Core Components 10 | 11 | 1. [Agent](./agent.md) - The central component for managing conversations and executing tasks 12 | 2. [Thread](./thread.md) - Manages conversation history and context 13 | 3. [Message](./message.md) - Handles individual interactions and content 14 | 4. [Attachment](./attachment.md) - Manages file attachments and content storage 15 | 16 | ## Installation 17 | 18 | To use these components, install Tyler: 19 | 20 | ```bash 21 | pip install tyler-agent 22 | ``` 23 | 24 | ## Basic Usage 25 | 26 | Here's a quick example of using the core components: 27 | 28 | ```python 29 | from tyler import Agent, Thread, Message 30 | 31 | # Create an agent 32 | agent = Agent( 33 | model_name="gpt-4.1", 34 | purpose="To help with tasks" 35 | ) 36 | 37 | # Create a thread 38 | thread = Thread() 39 | 40 | # Add a message 41 | message = Message( 42 | role="user", 43 | content="Hello!" 44 | ) 45 | thread.add_message(message) 46 | 47 | # Process the thread 48 | processed_thread, new_messages = await agent.go(thread) 49 | ``` 50 | 51 | ## Component Relationships 52 | 53 | The components work together in the following way: 54 | - `Agent` processes threads and manages tools 55 | - `Thread` contains messages and maintains context 56 | - `Message` holds content and metadata 57 | 58 | ## Type Definitions 59 | 60 | All components use TypeScript-style type definitions: 61 | 62 | ```python 63 | class Agent: 64 | """The main agent class.""" 65 | model_name: str 66 | purpose: str 67 | tools: List[Tool] 68 | ... 69 | 70 | class Thread: 71 | """Conversation thread class.""" 72 | messages: List[Message] 73 | system_prompt: Optional[str] 74 | ... 75 | 76 | class Message: 77 | """Individual message class.""" 78 | role: str 79 | content: str 80 | name: Optional[str] 81 | ... 82 | ``` 83 | 84 | ## Error Handling 85 | 86 | All components use standard Python exceptions: 87 | 88 | ```python 89 | try: 90 | await agent.go(thread) 91 | except AgentError as e: 92 | print(f"Agent error: {e}") 93 | except ThreadError as e: 94 | print(f"Thread error: {e}") 95 | except MessageError as e: 96 | print(f"Message error: {e}") 97 | ``` 98 | 99 | ## See Also 100 | 101 | - [Examples](../examples/index.md) - Practical usage examples 102 | - [Configuration](../configuration.md) - Configuration options 103 | - [Core Concepts](../core-concepts.md) - Architecture overview -------------------------------------------------------------------------------- /tyler/database/models.py: -------------------------------------------------------------------------------- 1 | import json 2 | from sqlalchemy.types import TypeDecorator, TEXT, JSON 3 | 4 | """Database models for SQLAlchemy""" 5 | from sqlalchemy import Column, String, DateTime, Text, ForeignKey, Integer 6 | from sqlalchemy.dialects.postgresql import JSONB 7 | from sqlalchemy.orm import declarative_base 8 | from sqlalchemy.orm import relationship 9 | from datetime import datetime, UTC 10 | 11 | class JSONBCompat(TypeDecorator): 12 | impl = TEXT 13 | cache_ok = True 14 | 15 | def load_dialect_impl(self, dialect): 16 | if dialect.name == 'postgresql': 17 | return dialect.type_descriptor(JSONB()) 18 | else: 19 | return dialect.type_descriptor(JSON()) 20 | 21 | def process_bind_param(self, value, dialect): 22 | if dialect.name == 'postgresql': 23 | return value 24 | if value is not None: 25 | return value 26 | return value 27 | 28 | def process_result_value(self, value, dialect): 29 | if dialect.name == 'postgresql': 30 | return value 31 | if value is not None: 32 | return value 33 | return value 34 | 35 | Base = declarative_base() 36 | 37 | class ThreadRecord(Base): 38 | __tablename__ = 'threads' 39 | 40 | id = Column(String, primary_key=True) 41 | title = Column(String, nullable=True) 42 | attributes = Column(JSONBCompat, nullable=False, default={}) 43 | platforms = Column(JSONBCompat, nullable=True) 44 | created_at = Column(DateTime(timezone=True), default=lambda: datetime.now(UTC)) 45 | updated_at = Column(DateTime(timezone=True), default=lambda: datetime.now(UTC), onupdate=lambda: datetime.now(UTC)) 46 | 47 | messages = relationship("MessageRecord", back_populates="thread", cascade="all, delete-orphan") 48 | 49 | class MessageRecord(Base): 50 | __tablename__ = 'messages' 51 | 52 | id = Column(String, primary_key=True) 53 | thread_id = Column(String, ForeignKey('threads.id', ondelete='CASCADE'), nullable=False) 54 | sequence = Column(Integer, nullable=False) 55 | role = Column(String, nullable=False) 56 | content = Column(Text, nullable=True) 57 | name = Column(String, nullable=True) 58 | tool_call_id = Column(String, nullable=True) 59 | tool_calls = Column(JSONBCompat, nullable=True) 60 | attributes = Column(JSONBCompat, nullable=False, default={}) 61 | timestamp = Column(DateTime(timezone=True), default=lambda: datetime.now(UTC)) 62 | source = Column(JSONBCompat, nullable=True) 63 | platforms = Column(JSONBCompat, nullable=True) 64 | attachments = Column(JSONBCompat, nullable=True) 65 | metrics = Column(JSONBCompat, nullable=False, default={}) 66 | reactions = Column(JSONBCompat, nullable=True) 67 | 68 | thread = relationship("ThreadRecord", back_populates="messages") -------------------------------------------------------------------------------- /scripts/bump_version.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import re 3 | import sys 4 | from pathlib import Path 5 | 6 | def update_version_in_file(file_path, current_version, new_version, pattern=None, dry_run=False): 7 | content = file_path.read_text() 8 | if pattern is None: 9 | pattern = f'"{current_version}"' 10 | replacement = f'"{new_version}"' 11 | else: 12 | replacement = pattern.replace(current_version, new_version) 13 | 14 | new_content = content.replace(pattern, replacement) 15 | if new_content == content: 16 | print(f"Warning: No version update made in {file_path}") 17 | return False 18 | 19 | if not dry_run: 20 | file_path.write_text(new_content) 21 | print(f"Updated version in {file_path}") 22 | return True 23 | 24 | def bump_version(version_type='patch', dry_run=False): 25 | pyproject_path = Path('pyproject.toml') 26 | init_path = Path('tyler/__init__.py') 27 | 28 | # Find current version in pyproject.toml 29 | content = pyproject_path.read_text() 30 | version_match = re.search(r'version\s*=\s*"(\d+\.\d+\.\d+)"', content) 31 | if not version_match: 32 | print("Could not find version in pyproject.toml") 33 | sys.exit(1) 34 | 35 | current_version = version_match.group(1) 36 | major, minor, patch = map(int, current_version.split('.')) 37 | 38 | # Bump version according to type 39 | if version_type == 'major': 40 | major += 1 41 | minor = 0 42 | patch = 0 43 | elif version_type == 'minor': 44 | minor += 1 45 | patch = 0 46 | else: # patch 47 | patch += 1 48 | 49 | new_version = f"{major}.{minor}.{patch}" 50 | 51 | if not dry_run: 52 | # Update version in pyproject.toml 53 | pyproject_pattern = f'version = "{current_version}"' 54 | update_version_in_file(pyproject_path, current_version, new_version, pyproject_pattern, dry_run) 55 | 56 | # Update version in __init__.py 57 | init_pattern = f'__version__ = "{current_version}"' 58 | update_version_in_file(init_path, current_version, new_version, init_pattern, dry_run) 59 | 60 | print(f"Version bumped from {current_version} to {new_version}") 61 | 62 | return new_version 63 | 64 | if __name__ == '__main__': 65 | version_type = 'patch' 66 | dry_run = False 67 | 68 | # Parse arguments 69 | args = sys.argv[1:] 70 | if '--dry-run' in args: 71 | dry_run = True 72 | args.remove('--dry-run') 73 | 74 | if args: 75 | version_type = args[0] 76 | 77 | if version_type not in ('major', 'minor', 'patch'): 78 | print("Version type must be one of: major, minor, patch") 79 | sys.exit(1) 80 | 81 | new_version = bump_version(version_type, dry_run) 82 | if dry_run: 83 | print(new_version) # Only print version number for dry run -------------------------------------------------------------------------------- /tyler/database/migrations/env.py: -------------------------------------------------------------------------------- 1 | from logging.config import fileConfig 2 | from sqlalchemy import engine_from_config 3 | from sqlalchemy import pool 4 | from alembic import context 5 | from tyler.database.models import Base 6 | import os 7 | 8 | # Load our database configuration 9 | def get_url(): 10 | """Get database URL from environment or use default SQLite.""" 11 | # If a URL is already set in the config, use that (for testing) 12 | if context.config.get_main_option("sqlalchemy.url"): 13 | return context.config.get_main_option("sqlalchemy.url") 14 | 15 | # Otherwise use environment configuration 16 | db_type = os.getenv("TYLER_DB_TYPE", "sqlite") 17 | 18 | if db_type == "postgresql": 19 | host = os.getenv("TYLER_DB_HOST", "localhost") 20 | port = os.getenv("TYLER_DB_PORT", "5432") 21 | database = os.getenv("TYLER_DB_NAME", "tyler") 22 | user = os.getenv("TYLER_DB_USER", "tyler") 23 | password = os.getenv("TYLER_DB_PASSWORD", "tyler_dev") 24 | return f"postgresql://{user}:{password}@{host}:{port}/{database}" 25 | else: 26 | data_dir = os.path.expanduser("~/.tyler/data") 27 | os.makedirs(data_dir, exist_ok=True) 28 | return f"sqlite:///{data_dir}/tyler.db" 29 | 30 | config = context.config 31 | 32 | # Set the database URL in the config if not already set 33 | if not config.get_main_option("sqlalchemy.url"): 34 | config.set_main_option("sqlalchemy.url", get_url()) 35 | 36 | # Interpret the config file for Python logging 37 | if config.config_file_name is not None: 38 | fileConfig(config.config_file_name) 39 | 40 | # Add your model's MetaData object here for 'autogenerate' support 41 | target_metadata = Base.metadata 42 | 43 | def run_migrations_offline() -> None: 44 | """Run migrations in 'offline' mode.""" 45 | url = config.get_main_option("sqlalchemy.url") 46 | context.configure( 47 | url=url, 48 | target_metadata=target_metadata, 49 | literal_binds=True, 50 | dialect_opts={"paramstyle": "named"}, 51 | ) 52 | 53 | with context.begin_transaction(): 54 | context.run_migrations() 55 | 56 | def run_migrations_online() -> None: 57 | """Run migrations in 'online' mode.""" 58 | # For testing, we want to use NullPool to ensure we get a fresh connection 59 | # each time and don't reuse connections between tests 60 | connectable = engine_from_config( 61 | config.get_section(config.config_ini_section, {}), 62 | prefix="sqlalchemy.", 63 | poolclass=pool.NullPool, 64 | ) 65 | 66 | with connectable.connect() as connection: 67 | context.configure( 68 | connection=connection, 69 | target_metadata=target_metadata 70 | ) 71 | 72 | with context.begin_transaction(): 73 | context.run_migrations() 74 | 75 | if context.is_offline_mode(): 76 | run_migrations_offline() 77 | else: 78 | run_migrations_online() -------------------------------------------------------------------------------- /docs/docs/installation.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 2 3 | --- 4 | 5 | # Installation Guide 6 | 7 | This guide will walk you through the process of installing Tyler and setting up your development environment on macOS. 8 | 9 | ## System Requirements 10 | 11 | ### Python Version 12 | Tyler requires Python 3.12.8 or later. You can check your Python version by running: 13 | 14 | ```bash 15 | python --version 16 | ``` 17 | 18 | If you need to install or update Python, visit the [official Python website](https://www.python.org/downloads/). 19 | 20 | ### System Dependencies 21 | Tyler requires some system libraries for processing PDFs and images. Install them using Homebrew: 22 | 23 | ```bash 24 | brew install libmagic poppler 25 | ``` 26 | 27 | ## Installation Methods 28 | 29 | ### From PyPI (Recommended) 30 | Install the latest version from PyPI: 31 | 32 | ```bash 33 | pip install tyler-agent 34 | ``` 35 | 36 | ### Development Installation 37 | For contributing or development: 38 | 39 | ```bash 40 | # Clone the repository 41 | git clone https://github.com/adamwdraper/tyler.git 42 | cd tyler 43 | 44 | # Create and activate a virtual environment (recommended) 45 | python -m venv venv 46 | source venv/bin/activate 47 | 48 | # Install in development mode with all dependencies 49 | pip install -e ".[dev]" 50 | ``` 51 | 52 | ## Configuration 53 | 54 | ### Environment Variables 55 | Create a `.env` file in your project directory. You can start by copying the example: 56 | 57 | ```bash 58 | curl -O https://raw.githubusercontent.com/adamwdraper/tyler/main/.env.example 59 | cp .env.example .env 60 | ``` 61 | 62 | Edit the `.env` file with your settings. The minimal required configuration is: 63 | 64 | ```bash 65 | # OpenAI API Key (or other LLM provider) 66 | OPENAI_API_KEY=your-api-key-here 67 | ``` 68 | 69 | For a complete list of available configuration options, see the [Configuration Guide](./configuration.md). 70 | 71 | ## Verifying Installation 72 | 73 | You can verify your installation by running a simple test: 74 | 75 | ```python 76 | from tyler import Agent, Thread, Message 77 | import asyncio 78 | 79 | async def test_installation(): 80 | agent = Agent( 81 | model_name="gpt-4.1", 82 | purpose="To test the installation" 83 | ) 84 | thread = Thread() 85 | message = Message( 86 | role="user", 87 | content="Hello! Are you working correctly?" 88 | ) 89 | thread.add_message(message) 90 | 91 | processed_thread, new_messages = await agent.go(thread) 92 | 93 | for message in new_messages: 94 | if message.role == "assistant": 95 | print(f"Assistant: {message.content}") 96 | 97 | if __name__ == "__main__": 98 | asyncio.run(test_installation()) 99 | ``` 100 | 101 | If everything is set up correctly, you should see a response from the AI assistant. 102 | 103 | ## Next Steps 104 | 105 | Now that you have Tyler installed, head over to the [Quickstart guide](./quickstart.md) to learn how to create your first AI agent. -------------------------------------------------------------------------------- /docs/sidebars.ts: -------------------------------------------------------------------------------- 1 | import type {SidebarsConfig} from '@docusaurus/plugin-content-docs'; 2 | 3 | // This runs in Node.js - Don't use client-side code here (browser APIs, JSX...) 4 | 5 | /** 6 | * Creating a sidebar enables you to: 7 | - create an ordered group of docs 8 | - render a sidebar for each doc of that group 9 | - provide next/previous navigation 10 | 11 | The sidebars can be generated from the filesystem, or explicitly defined here. 12 | 13 | Create as many sidebars as you want. 14 | */ 15 | const sidebars: SidebarsConfig = { 16 | // Documentation sidebar 17 | documentationSidebar: [ 18 | { 19 | type: 'category', 20 | label: 'Get started', 21 | className: 'category-no-arrow', 22 | collapsed: false, 23 | collapsible: false, 24 | items: [ 25 | 'intro', 26 | 'quickstart', 27 | 'how-it-works', 28 | 'chat-with-tyler', 29 | ], 30 | }, 31 | { 32 | type: 'category', 33 | label: 'Components', 34 | className: 'category-no-arrow', 35 | collapsed: false, 36 | collapsible: false, 37 | items: [ 38 | 'core-concepts', 39 | ], 40 | }, 41 | { 42 | type: 'category', 43 | label: 'Tools', 44 | className: 'category-no-arrow', 45 | collapsed: false, 46 | collapsible: false, 47 | items: [ 48 | 'tools/overview', 49 | 'tools/web', 50 | 'tools/slack', 51 | 'tools/notion', 52 | 'tools/image', 53 | 'tools/command-line', 54 | 'tools/audio', 55 | 'tools/files', 56 | 'tools/mcp', 57 | ], 58 | }, 59 | { 60 | type: 'category', 61 | label: 'Guides', 62 | className: 'category-no-arrow', 63 | collapsed: false, 64 | collapsible: false, 65 | items: [ 66 | 'examples/using-tools', 67 | 'examples/tools-streaming', 68 | 'examples/streaming', 69 | 'examples/database-storage', 70 | 'examples/file-storage', 71 | 'examples/message-attachments', 72 | 'examples/interrupt-tools', 73 | 'examples/full-configuration', 74 | ], 75 | }, 76 | ], 77 | 78 | // Reference sidebar 79 | referenceSidebar: [ 80 | { 81 | type: 'category', 82 | label: 'Guides', 83 | className: 'category-no-arrow', 84 | collapsed: false, 85 | collapsible: false, 86 | items: [ 87 | 'configuration', 88 | 'troubleshooting', 89 | ], 90 | }, 91 | { 92 | type: 'category', 93 | label: 'API Reference', 94 | className: 'category-no-arrow', 95 | collapsed: false, 96 | collapsible: false, 97 | items: [ 98 | 'api-reference/agent', 99 | 'api-reference/thread', 100 | 'api-reference/message', 101 | 'api-reference/attachment', 102 | 'api-reference/file-store', 103 | 'api-reference/thread-store', 104 | 'api-reference/mcp' 105 | ], 106 | }, 107 | ], 108 | }; 109 | 110 | export default sidebars; 111 | -------------------------------------------------------------------------------- /docs/docs/examples/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 1 3 | --- 4 | 5 | # Examples 6 | 7 | This section contains examples demonstrating various features and use cases of Tyler. 8 | 9 | ## Basic Examples 10 | 11 | - [Using Tools](./using-tools.md) - Learn how to use built-in and custom tools 12 | - [Basic Streaming](./streaming.md) - Build interactive applications with real-time streaming 13 | - [Tools with Streaming](./tools-streaming.md) - Combine tools with streaming responses 14 | - [Message Attachments](./message-attachments.md) - Work with file attachments in messages 15 | - [Full Configuration](./full-configuration.md) - See all configuration options in action 16 | 17 | ## Advanced Examples 18 | 19 | - [Database Storage](./database-storage.md) - Store threads in SQLite or PostgreSQL 20 | - [File Storage](./file-storage.md) - Store and process files with Tyler 21 | - [Interrupt Tools](./interrupt-tools.md) - Use tools that can interrupt the agent's processing 22 | 23 | Each example includes complete code and explanations to help you understand and implement Tyler's features in your applications. 24 | 25 | ## Quick Links 26 | 27 | 1. [Using Tools](./using-tools.md) - Learn how to use and create custom tools 28 | 2. [Basic Streaming](./streaming.md) - Build real-time interactive applications 29 | 3. [Tools with Streaming](./tools-streaming.md) - Combine tools with streaming responses 30 | 4. [Full Configuration](./full-configuration.md) - Explore all configuration options 31 | 5. [Database Storage](./database-storage.md) - Set up persistent storage 32 | 6. [Interrupt Tools](./interrupt-tools.md) - Implement content moderation and control flow 33 | 7. [Message Attachments](./message-attachments.md) - Handle file attachments and processing 34 | 35 | ## Running the Examples 36 | 37 | All examples are available in the [examples directory](https://github.com/adamwdraper/tyler/tree/main/examples) of the Tyler repository. 38 | 39 | To run any example: 40 | 41 | 1. Clone the repository: 42 | ```bash 43 | git clone https://github.com/adamwdraper/tyler.git 44 | cd tyler 45 | ``` 46 | 47 | 2. Install dependencies: 48 | ```bash 49 | pip install tyler-agent[dev] 50 | ``` 51 | 52 | 3. Set up environment variables: 53 | ```bash 54 | cp .env.example .env 55 | # Edit .env with your API keys and configuration 56 | ``` 57 | 58 | 4. Run an example: 59 | ```bash 60 | python examples/2-using_tools.py 61 | ``` 62 | 63 | ## Example Structure 64 | 65 | Each example in this section includes: 66 | - Complete source code 67 | - Step-by-step explanation 68 | - Key concepts covered 69 | - Configuration requirements 70 | - Expected output 71 | 72 | ## Prerequisites 73 | 74 | Before running the examples, ensure you have: 75 | - Python 3.12.8 or later 76 | - Required system libraries (libmagic, poppler) 77 | - API keys for services used (OpenAI, etc.) 78 | - Database setup (if using PostgreSQL examples) 79 | 80 | ## Getting Help 81 | 82 | If you encounter issues while running the examples: 83 | 1. Check the [Configuration Guide](../configuration.md) 84 | 2. Search [GitHub Issues](https://github.com/adamwdraper/tyler/issues) 85 | 3. Ask in [GitHub Discussions](https://github.com/adamwdraper/tyler/discussions) -------------------------------------------------------------------------------- /docs/docs/chat-with-tyler.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 4 3 | --- 4 | 5 | # Chat with Tyler 6 | 7 | Tyler provides two interactive interfaces for chatting with your agent: 8 | 1. A web-based chat interface 9 | 2. A command-line interface (CLI) 10 | 11 | ## Web interface 12 | 13 | The web interface is available as a separate repository at [tyler-chat](https://github.com/adamwdraper/tyler-chat). 14 | 15 | ### Features 16 | 17 | - Modern, responsive design 18 | - Real-time streaming responses 19 | - File attachment support 20 | - Message history 21 | - Tool execution visualization 22 | - Weave monitoring integration 23 | 24 | ### Installation 25 | 26 | ```bash 27 | # Clone the repository 28 | git clone https://github.com/adamwdraper/tyler-chat.git 29 | cd tyler-chat 30 | 31 | # Install dependencies 32 | npm install 33 | 34 | # Start the development server 35 | npm run dev 36 | ``` 37 | 38 | ### Configuration 39 | 40 | Create a `.env` file in the project root: 41 | 42 | ```bash 43 | # Tyler API configuration 44 | TYLER_API_URL=http://localhost:8000 45 | TYLER_API_KEY=your-api-key 46 | 47 | # Optional Weave configuration 48 | WANDB_API_KEY=your-wandb-api-key 49 | ``` 50 | 51 | ### Usage 52 | 53 | 1. Start the development server: 54 | ```bash 55 | npm run dev 56 | ``` 57 | 58 | 2. Open your browser to `http://localhost:3000` 59 | 60 | 3. Start chatting with your agent! 61 | 62 | ## Command line interface 63 | 64 | The CLI is included with the Tyler package and provides a simple way to interact with your agent from the terminal. 65 | 66 | ### Installation 67 | 68 | The CLI is installed automatically when you install Tyler: 69 | 70 | ```bash 71 | pip install tyler-agent 72 | ``` 73 | 74 | ### Basic usage 75 | 76 | Start a chat session: 77 | 78 | ```bash 79 | tyler-chat 80 | ``` 81 | 82 | With custom configuration: 83 | 84 | ```bash 85 | tyler-chat --model gpt-4.1 --purpose "Technical support" 86 | ``` 87 | 88 | ### Available commands 89 | 90 | During a chat session: 91 | - `/help` - Show available commands 92 | - `/clear` - Clear the conversation 93 | - `/exit` - End the chat session 94 | - `/save` - Save the conversation 95 | - `/load` - Load a saved conversation 96 | - `/tools` - List available tools 97 | - `/purpose` - Show/set agent purpose 98 | - `/model` - Show/change model 99 | - `/system` - Show/set system prompt 100 | 101 | ### Configuration 102 | 103 | The CLI uses the same configuration as the main Tyler package. Set your environment variables in a `.env` file: 104 | 105 | ```bash 106 | # Required 107 | OPENAI_API_KEY=your-openai-api-key 108 | 109 | # Optional 110 | WANDB_API_KEY=your-wandb-api-key 111 | TYLER_DB_TYPE=postgresql 112 | TYLER_DB_URL=postgresql://user:pass@localhost/db 113 | ``` 114 | 115 | ## Next steps 116 | 117 | - Learn about [Configuration](./configuration.md) 118 | - Explore [Examples](./category/examples) 119 | - Read the [API reference](./category/api-reference) 120 | 121 | ## License 122 | 123 | This project is licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0). 124 | 125 | For commercial use, please contact the author. -------------------------------------------------------------------------------- /docs/blog/2019-05-29-long-blog-post.md: -------------------------------------------------------------------------------- 1 | --- 2 | slug: long-blog-post 3 | title: Long Blog Post 4 | authors: yangshun 5 | tags: [hello, docusaurus] 6 | --- 7 | 8 | This is the summary of a very long blog post, 9 | 10 | Use a `` comment to limit blog post size in the list view. 11 | 12 | 13 | 14 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 15 | 16 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 17 | 18 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 19 | 20 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 21 | 22 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 23 | 24 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 25 | 26 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 27 | 28 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 29 | 30 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 31 | 32 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 33 | 34 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 35 | 36 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 37 | 38 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 39 | 40 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 41 | 42 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 43 | 44 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 45 | -------------------------------------------------------------------------------- /docs/docs/intro.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 1 3 | slug: / 4 | --- 5 | 6 | # Meet Tyler 7 | 8 | ### A development kit for manifesting AI agents with a complete lack of conventional limitations. 9 | 10 | 11 | Tyler makes it easy to start building effective AI agents in just a few lines of code. Tyler provides all the essential components needed to build production-ready AI agents that can understand context, manage conversations, and effectively use tools. 12 | 13 | ### Key Features 14 | 15 | - **Multimodal support**: Process and understand images, audio, PDFs, and more out of the box 16 | - **Ready-to-use tools**: Comprehensive set of built-in tools with easy integration of custom built tools 17 | - **MCP compatibility**: Seamless integration with [Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) compatible servers and tools 18 | - **Real-time streaming**: Build interactive applications with streaming responses from both the assistant and tools 19 | - **Structured data model**: Built-in support for threads, messages, and attachments to maintain conversation context 20 | - **Persistent storage**: Choose between in-memory, SQLite, or PostgreSQL to store conversation history and files 21 | - **Advanced debugging**: Integration with [W&B Weave](https://weave-docs.wandb.ai/) for powerful tracing and debugging capabilities 22 | - **Flexible model support**: Use any LLM provider supported by LiteLLM (100+ providers including OpenAI, Anthropic, etc.) 23 | 24 | Get started quickly by installing Tyler via pip: `pip install tyler-agent` and check out our [quickstart guide](./quickstart.md) to build your first agent in minutes. 25 | 26 | ## Overview 27 | 28 | 29 | 34 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | # Chat with Tyler 55 | 56 | While Tyler can be used as a library, it also has a web-based chat interface that allows you to interact with your agent. The interface is available as a separate repository at [tyler-chat](https://github.com/adamwdraper/tyler-chat). 57 | 58 |  59 | 60 | ### Key features of Chat with Tyler 61 | - Modern, responsive web interface 62 | - Real-time interaction with Tyler agents 63 | - Support for file attachments 64 | - Message history and context preservation 65 | - Easy deployment and customization 66 | 67 | To get started with the chat interface, visit the [Chat with Tyler documentation](./chat-with-tyler.md). 68 | 69 | ## Next Steps 70 | 71 | - [Installation Guide](./installation.md) - Detailed installation instructions 72 | - [Configuration](./configuration.md) - Learn about configuration options 73 | - [Core Concepts](./core-concepts.md) - Understand Tyler's architecture 74 | - [API Reference](./category/api-reference) - Explore the API documentation 75 | - [Examples](./category/examples) - See more usage examples 76 | -------------------------------------------------------------------------------- /examples/explicit_stores.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from tyler import Agent, Thread, Message, ThreadStore, FileStore 3 | # Import the individual store classes and registration functions 4 | from tyler.utils.registry import register_thread_store, register_file_store 5 | 6 | async def main(): 7 | """ 8 | Demonstrates explicitly creating and setting stores for an Agent. 9 | """ 10 | # 1. Create default in-memory stores individually. 11 | # Calling .create() without arguments defaults to in-memory. 12 | thread_store = await ThreadStore.create() 13 | file_store = await FileStore.create() 14 | print(f"Created in-memory stores:") 15 | print(f" Thread Store: {type(thread_store)}") 16 | print(f" File Store: {type(file_store)}") 17 | 18 | # 2. Register the stores in the global registry under the name "default". 19 | # This is optional - you can pass stores directly to the Agent constructor. 20 | register_thread_store("default", thread_store) 21 | register_file_store("default", file_store) 22 | print(f"Registered stores with name 'default'") 23 | 24 | # 3. Initialize the agent with explicit stores. 25 | # You can either pass the store instances directly or use store names from the registry. 26 | agent = Agent( 27 | name="StoreAwareAssistant", 28 | purpose="Answer questions concisely using explicitly set stores.", 29 | model_name="gpt-4.1", # Using preferred model 30 | thread_store=thread_store, # Pass store instance directly 31 | file_store=file_store # Pass store instance directly 32 | ) 33 | print(f"Initialized agent: {agent.name}") 34 | print("Agent configured with explicit stores.") 35 | 36 | # 4. Create a new thread. Because the agent is configured with stores, 37 | # operations like saving the thread will use the provided stores. 38 | thread = Thread() 39 | print(f"Created new thread with ID: {thread.id}") 40 | 41 | # 5. Add a user message 42 | user_message = Message(role="user", content="What is the capital of Spain?") 43 | thread.add_message(user_message) 44 | print(f"Added user message: '{user_message.content}'") 45 | 46 | # 6. Run the agent. It will use the configured stores internally. 47 | print("Running agent...") 48 | final_thread, new_messages = await agent.go(thread) 49 | print("Agent finished processing.") 50 | 51 | # 7. Print the assistant's response 52 | if new_messages: 53 | # Filter for the actual assistant response (last non-tool message) 54 | assistant_message = next((msg for msg in reversed(new_messages) if msg.role == 'assistant'), None) 55 | if assistant_message: 56 | print(f"Assistant Response: {assistant_message.content}") 57 | else: 58 | print("No assistant message found in new messages.") 59 | print("All new messages:", new_messages) 60 | else: 61 | print("No new messages were generated.") 62 | 63 | # Note: Since the stores are in-memory, the thread data still only exists 64 | # for the duration of this script run. To persist data, you would provide 65 | # a database URL to ThreadStore.create and a directory path to FileStore.create 66 | 67 | if __name__ == "__main__": 68 | # Added basic error handling for asyncio run 69 | try: 70 | asyncio.run(main()) 71 | except Exception as e: 72 | print(f"An error occurred: {e}") -------------------------------------------------------------------------------- /tyler/utils/files.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Dict 2 | from pathlib import Path 3 | from platformdirs import user_downloads_dir 4 | from urllib.parse import urlparse, unquote 5 | 6 | def get_unique_filepath(base_path: Path) -> Path: 7 | """ 8 | Get a unique file path by adding a number suffix if the file already exists. 9 | 10 | Args: 11 | base_path (Path): The initial file path to check 12 | 13 | Returns: 14 | Path: A unique file path that doesn't exist 15 | """ 16 | if not base_path.exists(): 17 | return base_path 18 | 19 | directory = base_path.parent 20 | stem = base_path.stem 21 | suffix = base_path.suffix 22 | counter = 1 23 | 24 | while True: 25 | new_path = directory / f"{stem} ({counter}){suffix}" 26 | if not new_path.exists(): 27 | return new_path 28 | counter += 1 29 | 30 | def save_to_downloads(*, content: bytes, filename: str = "", content_disposition: Optional[str] = None, url: Optional[str] = None) -> Dict: 31 | """ 32 | Save content to the user's Downloads directory with proper filename handling. 33 | 34 | Args: 35 | content (bytes): The content to save 36 | filename (str): Optional filename to save as 37 | content_disposition (str, optional): Content-Disposition header value for filename extraction 38 | url (str, optional): Original URL to extract filename from if not provided 39 | 40 | Returns: 41 | Dict: Contains file path and filename information 42 | """ 43 | try: 44 | # Use standard Downloads directory 45 | downloads_dir = Path(user_downloads_dir()) 46 | 47 | # Get filename from different sources in order of priority: 48 | # 1. Explicitly provided filename 49 | # 2. Content-Disposition header 50 | # 3. URL path 51 | # 4. Default fallback 52 | if not filename: 53 | if content_disposition: 54 | import re 55 | if match := re.search(r'filename="?([^"]+)"?', content_disposition): 56 | filename = match.group(1) 57 | elif url: 58 | parsed_url = urlparse(url) 59 | path = unquote(parsed_url.path) 60 | if path and '/' in path: 61 | filename = path.split('/')[-1] 62 | # Remove query parameters if they got included 63 | if '?' in filename: 64 | filename = filename.split('?')[0] 65 | 66 | # Fall back to default if still no filename 67 | if not filename: 68 | filename = 'downloaded_file' 69 | 70 | # Create full file path and ensure it's unique 71 | initial_path = downloads_dir / filename 72 | file_path = get_unique_filepath(initial_path) 73 | 74 | # Write the file 75 | with open(file_path, 'wb') as f: 76 | f.write(content) 77 | 78 | return { 79 | 'success': True, 80 | 'file_path': str(file_path), 81 | 'filename': file_path.name, # Use the potentially modified filename 82 | 'error': None 83 | } 84 | except Exception as e: 85 | return { 86 | 'success': False, 87 | 'file_path': None, 88 | 'filename': None, 89 | 'error': str(e) 90 | } -------------------------------------------------------------------------------- /tyler/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tools package initialization. 3 | """ 4 | import importlib 5 | import sys 6 | import os 7 | import glob 8 | from typing import Dict, List 9 | from tyler.utils.logging import get_logger 10 | 11 | # Get configured logger 12 | logger = get_logger(__name__) 13 | 14 | # Initialize empty tool lists for each module 15 | WEB_TOOLS = [] 16 | SLACK_TOOLS = [] 17 | COMMAND_LINE_TOOLS = [] 18 | NOTION_TOOLS = [] 19 | IMAGE_TOOLS = [] 20 | AUDIO_TOOLS = [] 21 | FILES_TOOLS = [] 22 | BROWSER_TOOLS = [] 23 | 24 | # Combined tools list 25 | TOOLS = [] 26 | 27 | # Try to import each tool module 28 | try: 29 | from . import web as web_module 30 | from . import slack as slack_module 31 | from . import command_line as command_line_module 32 | from . import notion as notion_module 33 | from . import image as image_module 34 | from . import audio as audio_module 35 | from . import files as files_module 36 | from . import browser as browser_module 37 | except ImportError as e: 38 | print(f"Warning: Some tool modules could not be imported: {e}") 39 | 40 | # Get tool lists from each module and maintain both individual and combined lists 41 | try: 42 | module_tools = getattr(web_module, "TOOLS", []) 43 | WEB_TOOLS.extend(module_tools) 44 | TOOLS.extend(module_tools) 45 | except Exception as e: 46 | print(f"Warning: Could not load web tools: {e}") 47 | 48 | try: 49 | module_tools = getattr(slack_module, "TOOLS", []) 50 | SLACK_TOOLS.extend(module_tools) 51 | TOOLS.extend(module_tools) 52 | except Exception as e: 53 | print(f"Warning: Could not load slack tools: {e}") 54 | 55 | try: 56 | module_tools = getattr(command_line_module, "TOOLS", []) 57 | COMMAND_LINE_TOOLS.extend(module_tools) 58 | TOOLS.extend(module_tools) 59 | except Exception as e: 60 | print(f"Warning: Could not load command line tools: {e}") 61 | 62 | try: 63 | module_tools = getattr(notion_module, "TOOLS", []) 64 | NOTION_TOOLS.extend(module_tools) 65 | TOOLS.extend(module_tools) 66 | except Exception as e: 67 | print(f"Warning: Could not load notion tools: {e}") 68 | 69 | try: 70 | module_tools = getattr(image_module, "TOOLS", []) 71 | IMAGE_TOOLS.extend(module_tools) 72 | TOOLS.extend(module_tools) 73 | except Exception as e: 74 | print(f"Warning: Could not load image tools: {e}") 75 | 76 | try: 77 | module_tools = getattr(audio_module, "TOOLS", []) 78 | AUDIO_TOOLS.extend(module_tools) 79 | TOOLS.extend(module_tools) 80 | except Exception as e: 81 | print(f"Warning: Could not load audio tools: {e}") 82 | 83 | try: 84 | module_tools = getattr(files_module, "TOOLS", []) 85 | FILES_TOOLS.extend(module_tools) 86 | TOOLS.extend(module_tools) 87 | except Exception as e: 88 | print(f"Warning: Could not load files tools: {e}") 89 | 90 | try: 91 | module_tools = getattr(browser_module, "TOOLS", []) 92 | BROWSER_TOOLS.extend(module_tools) 93 | TOOLS.extend(module_tools) 94 | except Exception as e: 95 | print(f"Warning: Could not load browser tools: {e}") 96 | 97 | __all__ = [ 98 | 'TOOLS', 99 | 'WEB_TOOLS', 100 | 'SLACK_TOOLS', 101 | 'COMMAND_LINE_TOOLS', 102 | 'NOTION_TOOLS', 103 | 'IMAGE_TOOLS', 104 | 'AUDIO_TOOLS', 105 | 'FILES_TOOLS', 106 | 'BROWSER_TOOLS' 107 | ] 108 | 109 | # Map of module names to their tools for dynamic loading 110 | TOOL_MODULES: Dict[str, List] = { 111 | 'web': WEB_TOOLS, 112 | 'slack': SLACK_TOOLS, 113 | 'command_line': COMMAND_LINE_TOOLS, 114 | 'notion': NOTION_TOOLS, 115 | 'image': IMAGE_TOOLS, 116 | 'audio': AUDIO_TOOLS, 117 | 'files': FILES_TOOLS, 118 | 'browser': BROWSER_TOOLS 119 | } -------------------------------------------------------------------------------- /examples/reactions_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Example demonstrating how to use message reactions in Tyler. 4 | 5 | This example shows how to: 6 | 1. Add reactions to messages 7 | 2. Remove reactions from messages 8 | 3. Get reactions for a message 9 | 4. Save/load threads with reactions to/from the database 10 | """ 11 | 12 | import asyncio 13 | from typing import Dict, List 14 | from tyler import Thread, Message, ThreadStore 15 | 16 | async def main(): 17 | # Create an in-memory ThreadStore for this example 18 | thread_store = await ThreadStore.create() 19 | 20 | # Create a new thread with some messages 21 | thread = Thread(title="Reactions Example") 22 | 23 | # Add some messages to the thread 24 | user_msg = Message(role="user", content="Hello! I have a question about reactions.") 25 | thread.add_message(user_msg) 26 | 27 | assistant_msg = Message(role="assistant", content="Sure, I'd be happy to help with reactions!") 28 | thread.add_message(assistant_msg) 29 | 30 | user_msg2 = Message(role="user", content="How do I add a thumbs up?") 31 | thread.add_message(user_msg2) 32 | 33 | assistant_msg2 = Message(role="assistant", content="It's easy! Just click the emoji button.") 34 | thread.add_message(assistant_msg2) 35 | 36 | # Print the thread messages 37 | print(f"Thread '{thread.title}' has {len(thread.messages)} messages:") 38 | for msg in thread.messages: 39 | print(f" - {msg.role}: {msg.content}") 40 | print() 41 | 42 | # Add reactions to messages 43 | print("Adding reactions...") 44 | 45 | # User 1 adds thumbs up to assistant's first message 46 | thread.add_reaction(assistant_msg.id, ":thumbsup:", "user1") 47 | 48 | # User 2 also adds thumbs up to the same message 49 | thread.add_reaction(assistant_msg.id, ":thumbsup:", "user2") 50 | 51 | # User 1 adds heart to assistant's second message 52 | thread.add_reaction(assistant_msg2.id, ":heart:", "user1") 53 | 54 | # User 3 adds rocket to assistant's second message 55 | thread.add_reaction(assistant_msg2.id, ":rocket:", "user3") 56 | 57 | # Display reactions 58 | print("\nReactions after adding:") 59 | for msg in thread.messages: 60 | if msg.reactions: 61 | print(f"Message '{msg.content}' has reactions:") 62 | for emoji, users in msg.reactions.items(): 63 | print(f" - {emoji}: {', '.join(users)}") 64 | 65 | # Remove a reaction 66 | print("\nRemoving User 1's heart reaction from second message...") 67 | thread.remove_reaction(assistant_msg2.id, ":heart:", "user1") 68 | 69 | # Display reactions after removal 70 | print("\nReactions after removal:") 71 | for msg in thread.messages: 72 | if msg.reactions: 73 | print(f"Message '{msg.content}' has reactions:") 74 | for emoji, users in msg.reactions.items(): 75 | print(f" - {emoji}: {', '.join(users)}") 76 | 77 | # Save thread to database 78 | print("\nSaving thread to database...") 79 | await thread_store.save(thread) 80 | 81 | # Retrieve thread from database 82 | print("Retrieving thread from database...") 83 | retrieved_thread = await thread_store.get(thread.id) 84 | 85 | # Display reactions from retrieved thread 86 | print("\nReactions in retrieved thread:") 87 | for msg in retrieved_thread.messages: 88 | if msg.reactions: 89 | print(f"Message '{msg.content}' has reactions:") 90 | for emoji, users in msg.reactions.items(): 91 | print(f" - {emoji}: {', '.join(users)}") 92 | 93 | print("\nExample completed successfully!") 94 | 95 | 96 | if __name__ == "__main__": 97 | asyncio.run(main()) -------------------------------------------------------------------------------- /examples/agent_delegation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Example script demonstrating agent-to-agent delegation using the parent-child approach. 3 | 4 | This script creates multiple specialized agents and attaches them to 5 | a main coordinator agent which can delegate tasks to them. 6 | """ 7 | import asyncio 8 | import os 9 | from tyler import Agent, Thread, Message 10 | from tyler.utils.agent_runner import agent_runner 11 | from tyler.utils.logging import get_logger 12 | import weave 13 | 14 | # Load environment variables and configure logging first 15 | from dotenv import load_dotenv 16 | load_dotenv() 17 | 18 | logger = get_logger(__name__) 19 | 20 | try: 21 | if os.getenv("WANDB_API_KEY"): 22 | weave.init("tyler") 23 | logger.debug("Weave tracing initialized successfully") 24 | except Exception as e: 25 | logger.warning(f"Failed to initialize weave tracing: {e}. Continuing without weave.") 26 | 27 | async def main(): 28 | # Create specialized agents 29 | research_agent = Agent( 30 | name="Research", # Using simple, unique names 31 | model_name="gpt-4.1", 32 | purpose="To conduct in-depth research on topics and provide comprehensive information.", 33 | tools=["web"] # Give research agent web search tools 34 | ) 35 | 36 | code_agent = Agent( 37 | name="Code", # Using simple, unique names 38 | model_name="gpt-4.1", 39 | purpose="To write, review, and explain code in various programming languages.", 40 | tools=[] # No additional tools needed for coding 41 | ) 42 | 43 | creative_agent = Agent( 44 | name="Creative", # Using simple, unique names 45 | model_name="gpt-4.1", 46 | purpose="To generate creative content such as stories, poems, and marketing copy.", 47 | tools=[] # No additional tools needed for creative writing 48 | ) 49 | 50 | # Create main agent with specialized agents as a list 51 | main_agent = Agent( 52 | name="Coordinator", 53 | model_name="gpt-4.1", 54 | purpose="To coordinate work by delegating tasks to specialized agents when appropriate.", 55 | tools=[], # No additional tools needed since agents will be added as tools 56 | agents=[research_agent, code_agent, creative_agent] # Simple list instead of dictionary 57 | ) 58 | 59 | # Initialize a thread with a complex query that requires delegation 60 | thread = Thread() 61 | 62 | # Add a message that will likely require delegation 63 | thread.add_message(Message( 64 | role="user", 65 | content="""I need help with a few things: 66 | 67 | 1. I need research on the latest advancements in quantum computing 68 | 2. I need a short Python script that can convert CSV to JSON 69 | 3. I need a creative tagline for my tech startup called "QuantumLeap" 70 | 71 | Please help me with these tasks. 72 | """ 73 | )) 74 | 75 | # Print available agents from agent_runner 76 | logger.info(f"Available agents: {agent_runner.list_agents()}") 77 | 78 | # Process with the main agent 79 | result_thread, messages = await main_agent.go(thread) 80 | 81 | # Print the results 82 | print("\n=== FINAL CONVERSATION ===\n") 83 | for message in result_thread.messages: 84 | if message.role == "user": 85 | print(f"\nUser: {message.content}\n") 86 | elif message.role == "assistant": 87 | print(f"\nAssistant: {message.content}\n") 88 | if message.tool_calls: 89 | print(f"[Tool calls: {', '.join([tc['function']['name'] for tc in message.tool_calls])}]") 90 | elif message.role == "tool": 91 | print(f"\nTool ({message.name}): {message.content}\n") 92 | 93 | if __name__ == "__main__": 94 | asyncio.run(main()) -------------------------------------------------------------------------------- /docs/docusaurus.config.js: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | // Note: type annotations allow type checking and IDEs autocompletion 3 | 4 | const {themes} = require('prism-react-renderer'); 5 | 6 | /** @type {import('@docusaurus/types').Config} */ 7 | const config = { 8 | title: 'Tyler Documentation', 9 | tagline: 'A powerful AI Agent powered by LLMs', 10 | favicon: 'img/favicon.ico', 11 | 12 | // Set the production url of your site here 13 | url: 'https://adamwdraper.github.io', 14 | // Set the // pathname under which your site is served 15 | // For GitHub pages deployment, it is often '//' 16 | baseUrl: '/tyler/', 17 | 18 | // GitHub pages deployment config. 19 | organizationName: 'adamwdraper', 20 | projectName: 'tyler', 21 | 22 | onBrokenLinks: 'throw', 23 | onBrokenMarkdownLinks: 'warn', 24 | 25 | // Even if you don't use internalization, you can use this field to set useful 26 | // metadata like html lang. For example, if your site is Chinese, you may want 27 | // to replace "en" with "zh-Hans". 28 | i18n: { 29 | defaultLocale: 'en', 30 | locales: ['en'], 31 | }, 32 | 33 | presets: [ 34 | [ 35 | 'classic', 36 | /** @type {import('@docusaurus/preset-classic').Options} */ 37 | ({ 38 | docs: { 39 | sidebarPath: require.resolve('./sidebars.js'), 40 | editUrl: 41 | 'https://github.com/adamwdraper/tyler/tree/main/docs/', 42 | }, 43 | blog: false, 44 | theme: { 45 | customCss: require.resolve('./src/css/custom.css'), 46 | }, 47 | }), 48 | ], 49 | ], 50 | 51 | themeConfig: 52 | /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ 53 | ({ 54 | // Replace with your project's social card 55 | image: 'img/tyler-social-card.jpg', 56 | navbar: { 57 | title: 'Tyler', 58 | logo: { 59 | alt: 'Tyler Logo', 60 | src: 'img/logo.svg', 61 | }, 62 | items: [ 63 | { 64 | type: 'docSidebar', 65 | sidebarId: 'tutorialSidebar', 66 | position: 'left', 67 | label: 'Documentation', 68 | }, 69 | { 70 | href: 'https://github.com/adamwdraper/tyler', 71 | label: 'GitHub', 72 | position: 'right', 73 | }, 74 | ], 75 | }, 76 | footer: { 77 | style: 'dark', 78 | links: [ 79 | { 80 | title: 'Docs', 81 | items: [ 82 | { 83 | label: 'Getting Started', 84 | to: '/docs/intro', 85 | }, 86 | { 87 | label: 'API Reference', 88 | to: '/docs/api', 89 | }, 90 | ], 91 | }, 92 | { 93 | title: 'Community', 94 | items: [ 95 | { 96 | label: 'GitHub Issues', 97 | href: 'https://github.com/adamwdraper/tyler/issues', 98 | }, 99 | { 100 | label: 'GitHub Discussions', 101 | href: 'https://github.com/adamwdraper/tyler/discussions', 102 | }, 103 | ], 104 | }, 105 | { 106 | title: 'More', 107 | items: [ 108 | { 109 | label: 'GitHub', 110 | href: 'https://github.com/adamwdraper/tyler', 111 | }, 112 | ], 113 | }, 114 | ], 115 | copyright: `Copyright © ${new Date().getFullYear()} Tyler. Built with Docusaurus.`, 116 | }, 117 | prism: { 118 | theme: themes.github, 119 | darkTheme: themes.dracula, 120 | }, 121 | }), 122 | }; 123 | 124 | module.exports = config; -------------------------------------------------------------------------------- /examples/mcp_basic.py: -------------------------------------------------------------------------------- 1 | """Example of using Tyler with the Brave Search MCP server. 2 | 3 | This example demonstrates how to use Tyler with the Brave Search MCP server. 4 | """ 5 | # Load environment variables and configure logging first 6 | from dotenv import load_dotenv 7 | load_dotenv() 8 | 9 | from tyler.utils.logging import get_logger 10 | logger = get_logger(__name__) 11 | 12 | # Now import everything else 13 | import asyncio 14 | import os 15 | import sys 16 | import weave 17 | from typing import List, Dict, Any 18 | 19 | from tyler import Agent, Thread, Message 20 | from tyler.mcp.utils import initialize_mcp_service, cleanup_mcp_service 21 | 22 | # Add the parent directory to the path so we can import the example utils 23 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 24 | 25 | # Initialize weave tracing if WANDB_API_KEY is set 26 | try: 27 | if os.getenv("WANDB_API_KEY"): 28 | weave.init("tyler") 29 | logger.debug("Weave tracing initialized successfully") 30 | except Exception as e: 31 | logger.warning(f"Failed to initialize weave tracing: {e}. Continuing without weave.") 32 | 33 | async def main(): 34 | """Run the example.""" 35 | # Check for Brave API key 36 | brave_api_key = os.environ.get("BRAVE_API_KEY") 37 | if not brave_api_key: 38 | logger.warning("BRAVE_API_KEY environment variable not set. " 39 | "Please set it to use the Brave Search API.") 40 | return 41 | 42 | logger.info("Initializing MCP service with Brave Search server...") 43 | 44 | # Configure the Brave Search MCP server 45 | server_configs = [ 46 | { 47 | "name": "brave", 48 | "transport": "stdio", 49 | "command": "npx", 50 | "args": ["-y", "@modelcontextprotocol/server-brave-search"], 51 | "startup_timeout": 5, 52 | "required": True, 53 | "env": { 54 | "BRAVE_API_KEY": brave_api_key 55 | } 56 | } 57 | ] 58 | 59 | # Initialize the MCP service 60 | mcp_service = await initialize_mcp_service(server_configs) 61 | 62 | try: 63 | # Get the MCP tools for the agent 64 | mcp_tools = mcp_service.get_tools_for_agent(["brave"]) 65 | 66 | if not mcp_tools: 67 | logger.error("No tools discovered from the Brave Search MCP server.") 68 | return 69 | 70 | logger.info(f"Discovered {len(mcp_tools)} tools from the Brave Search MCP server.") 71 | 72 | # Create an agent with the MCP tools 73 | agent = Agent( 74 | name="Tyler", 75 | model_name="gpt-4.1", 76 | tools=mcp_tools 77 | ) 78 | 79 | # Create a thread 80 | thread = Thread() 81 | 82 | # Add a user message 83 | thread.add_message(Message( 84 | role="user", 85 | content="What can you tell me about quantum computing?" 86 | )) 87 | 88 | # Process the thread with streaming 89 | logger.info("Processing thread with streaming...") 90 | async for update in agent.go_stream(thread): 91 | if update.type.name == "CONTENT_CHUNK": 92 | print(update.data, end="", flush=True) 93 | elif update.type.name == "TOOL_MESSAGE": 94 | print(f"\n[Tool execution: {update.data['name']}]\n") 95 | elif update.type.name == "COMPLETE": 96 | print("\n\nProcessing complete!") 97 | 98 | finally: 99 | # Clean up the MCP service 100 | logger.info("Cleaning up MCP service...") 101 | await cleanup_mcp_service() 102 | 103 | 104 | if __name__ == "__main__": 105 | asyncio.run(main()) -------------------------------------------------------------------------------- /docs/docs/tools/web.md: -------------------------------------------------------------------------------- 1 | # Web 2 | 3 | The web module provides tools for interacting with web content. These tools allow you to fetch and process web pages in various formats. 4 | 5 | ## Available tools 6 | 7 | ### Web-fetch page 8 | 9 | Fetches content from a web page and returns it in a clean, readable format with preserved structure. 10 | 11 | #### Parameters 12 | 13 | - `url` (string, required) 14 | - The URL to fetch 15 | - Must be a valid HTTP or HTTPS URL 16 | 17 | - `format` (string, optional) 18 | - Output format for the page content 19 | - Options: 20 | - `text` (default): Returns clean, readable text with preserved structure 21 | - `html`: Returns raw HTML content 22 | - Use `text` for most cases when you want to read or analyze the content 23 | - Only use `html` when you need to process the raw HTML structure 24 | 25 | - `headers` (object, optional) 26 | - Custom headers to send with the request 27 | - Useful for: 28 | - Authentication 29 | - Setting user agent 30 | - Custom request headers 31 | 32 | #### Example Usage 33 | 34 | ```python 35 | from tyler.models import Agent, Thread, Message 36 | 37 | # Create an agent with web tools 38 | agent = Agent( 39 | model_name="gpt-4.1", 40 | purpose="To help with web content", 41 | tools=["web"] 42 | ) 43 | 44 | # Create a thread with a request to fetch a web page 45 | thread = Thread() 46 | message = Message( 47 | role="user", 48 | content="Can you fetch and summarize the content from https://example.com?" 49 | ) 50 | thread.add_message(message) 51 | 52 | # Process the thread - agent will use web-fetch_page tool 53 | processed_thread, new_messages = await agent.go(thread) 54 | ``` 55 | 56 | #### Response Format 57 | 58 | The tool returns a dictionary with: 59 | - `content`: The fetched content in the requested format 60 | - `metadata`: Additional information about the page 61 | - `title`: Page title if available 62 | - `url`: Final URL (after any redirects) 63 | - `status`: HTTP status code 64 | - `headers`: Response headers 65 | 66 | #### Error Handling 67 | 68 | The tool handles common errors: 69 | - Invalid URLs 70 | - Network timeouts 71 | - HTTP errors (4xx, 5xx) 72 | - Invalid content types 73 | 74 | Error responses include: 75 | - Error message 76 | - Error type 77 | - HTTP status code (if applicable) 78 | 79 | ## Best practices 80 | 81 | 1. **Use Text Format by Default** 82 | - The `text` format is optimized for readability 83 | - Preserves important structure while removing clutter 84 | - Better for content analysis and summarization 85 | 86 | 2. **Handle Large Pages** 87 | - Consider using pagination for large content 88 | - Process content in chunks if needed 89 | - Be aware of rate limiting and robots.txt 90 | 91 | 3. **Respect Website Policies** 92 | - Check robots.txt 93 | - Use appropriate delays between requests 94 | - Include proper user agent headers 95 | 96 | 4. **Security Considerations** 97 | - Only fetch from trusted sources 98 | - Be cautious with user-provided URLs 99 | - Validate and sanitize content 100 | 101 | ## Configuration 102 | 103 | No special configuration or environment variables are required for web tools. 104 | 105 | ## Common use cases 106 | 107 | 1. **Content Extraction** 108 | - Fetch articles for summarization 109 | - Extract specific information from web pages 110 | - Gather data for analysis 111 | 112 | 2. **Web Scraping** 113 | - Collect structured data from websites 114 | - Monitor page changes 115 | - Archive content 116 | 117 | 3. **Site Analysis** 118 | - Check page availability 119 | - Analyze page structure 120 | - Validate links 121 | 122 | 4. **Content Integration** 123 | - Import content from external sources 124 | - Aggregate information 125 | - Cross-reference data -------------------------------------------------------------------------------- /tyler/database/migrations/versions/20250206_0505_197750e12029_initial.py: -------------------------------------------------------------------------------- 1 | """initial 2 | 3 | Revision ID: 197750e12029 4 | Revises: 5 | Create Date: 2024-02-06 05:05:00.000000 6 | 7 | """ 8 | from typing import Sequence, Union 9 | from alembic import op 10 | import sqlalchemy as sa 11 | from sqlalchemy.dialects import postgresql 12 | from sqlalchemy import text 13 | 14 | # revision identifiers, used by Alembic. 15 | revision: str = '197750e12029' 16 | down_revision: Union[str, None] = None 17 | branch_labels: Union[str, Sequence[str], None] = None 18 | depends_on: Union[str, Sequence[str], None] = None 19 | 20 | def upgrade() -> None: 21 | # Create files table 22 | op.create_table('files', 23 | sa.Column('id', sa.String(), nullable=False), 24 | sa.Column('filename', sa.String(), nullable=False), 25 | sa.Column('mime_type', sa.String(), nullable=True), 26 | sa.Column('storage_path', sa.String(), nullable=False), 27 | sa.Column('storage_backend', sa.String(), nullable=False), 28 | sa.Column('created_at', sa.DateTime(timezone=True), server_default=text("CURRENT_TIMESTAMP"), nullable=False), 29 | sa.Column('metadata', sa.JSON(), nullable=True), 30 | sa.PrimaryKeyConstraint('id') 31 | ) 32 | 33 | # Create threads table 34 | op.create_table('threads', 35 | sa.Column('id', sa.String(), nullable=False), 36 | sa.Column('title', sa.String(), nullable=True), 37 | sa.Column('attributes', sa.JSON(), nullable=False, server_default='{}'), 38 | sa.Column('source', sa.JSON(), nullable=True), 39 | sa.Column('metrics', sa.JSON(), nullable=False, server_default='{}', comment='Thread-level metrics'), 40 | sa.Column('created_at', sa.DateTime(timezone=True), server_default=text("CURRENT_TIMESTAMP"), nullable=False), 41 | sa.Column('updated_at', sa.DateTime(timezone=True), server_default=text("CURRENT_TIMESTAMP"), nullable=False), 42 | sa.PrimaryKeyConstraint('id') 43 | ) 44 | 45 | # Create messages table with sequence column 46 | op.create_table('messages', 47 | sa.Column('id', sa.String(), nullable=False), 48 | sa.Column('thread_id', sa.String(), nullable=False), 49 | sa.Column('sequence', sa.Integer(), nullable=False), # Message order in thread 50 | sa.Column('role', sa.String(), nullable=False), 51 | sa.Column('content', sa.Text(), nullable=True), 52 | sa.Column('name', sa.String(), nullable=True), 53 | sa.Column('tool_call_id', sa.String(), nullable=True), 54 | sa.Column('tool_calls', sa.JSON(), nullable=True), 55 | sa.Column('attributes', sa.JSON(), nullable=False, server_default='{}'), 56 | sa.Column('timestamp', sa.DateTime(timezone=True), server_default=text("CURRENT_TIMESTAMP"), nullable=False), 57 | sa.Column('source', sa.JSON(), nullable=True), 58 | sa.Column('attachments', sa.JSON(), nullable=True), 59 | sa.Column('metrics', sa.JSON(), nullable=False, server_default='{}', comment='Message-level metrics'), 60 | sa.PrimaryKeyConstraint('id'), 61 | sa.ForeignKeyConstraint(['thread_id'], ['threads.id'], ondelete='CASCADE'), 62 | sa.UniqueConstraint('thread_id', 'sequence', name='uq_message_thread_sequence') # Ensure unique sequences per thread 63 | ) 64 | 65 | # Add indexes 66 | op.create_index(op.f('ix_files_filename'), 'files', ['filename'], unique=False) 67 | op.create_index(op.f('ix_threads_updated_at'), 'threads', ['updated_at'], unique=False) 68 | op.create_index(op.f('ix_messages_thread_id'), 'messages', ['thread_id'], unique=False) 69 | op.create_index(op.f('ix_messages_timestamp'), 'messages', ['timestamp'], unique=False) 70 | op.create_index(op.f('ix_messages_sequence'), 'messages', ['sequence'], unique=False) 71 | 72 | def downgrade() -> None: 73 | op.drop_index(op.f('ix_messages_sequence'), table_name='messages') 74 | op.drop_index(op.f('ix_messages_timestamp'), table_name='messages') 75 | op.drop_index(op.f('ix_messages_thread_id'), table_name='messages') 76 | op.drop_index(op.f('ix_threads_updated_at'), table_name='threads') 77 | op.drop_index(op.f('ix_files_filename'), table_name='files') 78 | op.drop_table('messages') 79 | op.drop_table('threads') 80 | op.drop_table('files') -------------------------------------------------------------------------------- /examples/tools_basic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Example demonstrating the use of built-in and custom tools. 4 | """ 5 | # Load environment variables and configure logging first 6 | from dotenv import load_dotenv 7 | load_dotenv() 8 | 9 | from tyler.utils.logging import get_logger 10 | logger = get_logger(__name__) 11 | 12 | # Now import everything else 13 | import os 14 | import asyncio 15 | import weave 16 | import sys 17 | from tyler import Agent, Thread, Message 18 | 19 | def custom_calculator_implementation(operation: str, x: float, y: float) -> str: 20 | """ 21 | Implementation of a simple calculator tool. 22 | """ 23 | try: 24 | if operation == "add": 25 | result = x + y 26 | elif operation == "subtract": 27 | result = x - y 28 | elif operation == "multiply": 29 | result = x * y 30 | elif operation == "divide": 31 | if y == 0: 32 | return "Error: Division by zero" 33 | result = x / y 34 | else: 35 | return f"Error: Unknown operation {operation}" 36 | 37 | return f"Result of {operation}({x}, {y}) = {result}" 38 | except Exception as e: 39 | return f"Error performing calculation: {str(e)}" 40 | 41 | # Define custom calculator tool 42 | custom_calculator_tool = { 43 | "definition": { 44 | "type": "function", 45 | "function": { 46 | "name": "calculate", 47 | "description": "Perform basic mathematical operations", 48 | "parameters": { 49 | "type": "object", 50 | "properties": { 51 | "operation": { 52 | "type": "string", 53 | "description": "The mathematical operation to perform (add, subtract, multiply, divide)", 54 | "enum": ["add", "subtract", "multiply", "divide"] 55 | }, 56 | "x": { 57 | "type": "number", 58 | "description": "First number" 59 | }, 60 | "y": { 61 | "type": "number", 62 | "description": "Second number" 63 | } 64 | }, 65 | "required": ["operation", "x", "y"] 66 | } 67 | } 68 | }, 69 | "implementation": custom_calculator_implementation 70 | } 71 | 72 | try: 73 | if os.getenv("WANDB_API_KEY"): 74 | weave.init("tyler") 75 | logger.debug("Weave tracing initialized successfully") 76 | except Exception as e: 77 | logger.warning(f"Failed to initialize weave tracing: {e}. Continuing without weave.") 78 | 79 | # Initialize the agent with both built-in and custom tools 80 | agent = Agent( 81 | model_name="gpt-4.1", 82 | purpose="To help with calculations and web searches", 83 | tools=[ 84 | "web", # Load the web tools module 85 | custom_calculator_tool, # Add our calculator tool 86 | ] 87 | ) 88 | 89 | async def main(): 90 | # Create a thread 91 | thread = Thread() 92 | 93 | # Example conversation with web page fetch followed by calculations 94 | conversations = [ 95 | "Can you fetch the content from https://adamwdraper.github.io/tyler/?", 96 | "Let's do a calculation: what is 537 divided by 3?" 97 | ] 98 | 99 | for user_input in conversations: 100 | logger.info("User: %s", user_input) 101 | 102 | # Add user message 103 | message = Message( 104 | role="user", 105 | content=user_input 106 | ) 107 | thread.add_message(message) 108 | 109 | # Process the thread 110 | processed_thread, new_messages = await agent.go(thread) 111 | 112 | # Log responses 113 | for message in new_messages: 114 | if message.role == "assistant": 115 | logger.info("Assistant: %s", message.content) 116 | elif message.role == "tool": 117 | logger.info("Tool (%s): %s", message.name, message.content) 118 | 119 | logger.info("-" * 50) 120 | 121 | if __name__ == "__main__": 122 | try: 123 | asyncio.run(main()) 124 | except KeyboardInterrupt: 125 | logger.warning("Exiting gracefully...") 126 | sys.exit(0) -------------------------------------------------------------------------------- /docs/docs/how-it-works.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 3 3 | --- 4 | 5 | # How Tyler works 6 | 7 | Tyler's architecture is designed to make building AI agents simple while providing all the components needed for production use. Let's dive into how Tyler processes requests and manages conversations. 8 | 9 | ## Core architecture 10 | 11 | At its heart, Tyler uses an iterative approach to process messages and execute tools. Here's a high-level overview of how it works: 12 | 13 | ``` 14 | Thread -> Agent.go -> Agent.step -> LLM Call -> Has Tool Calls? - No -> Complete Response 15 | ^ | 16 | | | Yes 17 | | | 18 | +------ Execute Tools ------+ 19 | ``` 20 | 21 | ## The processing loop 22 | 23 | When you call `agent.go()` or `go_stream()`, Tyler follows these steps: 24 | 25 | 1. **Message processing** 26 | - Loads the conversation thread 27 | - Processes any attached files (images, PDFs, etc.) 28 | - Ensures the system prompt is set 29 | 30 | 2. **Step execution** 31 | - Makes an LLM call with the current context 32 | - Processes the response for content and tool calls 33 | - Streams responses in real-time (if using `go_stream`) 34 | 35 | 3. **Tool execution** 36 | - If tool calls are present, executes them in sequence 37 | - Adds tool results back to the conversation 38 | - Returns to step execution if more tools are needed 39 | 40 | 4. **Completion** 41 | - Saves the final thread state 42 | - Returns the processed thread and new messages 43 | 44 | ## Attachment handling 45 | 46 | When files are attached to messages, Tyler automatically handles their processing and storage: 47 | 48 | 1. **Attachment creation** 49 | - Files can be attached to messages using `message.add_attachment()` 50 | - Attachments can contain binary data, base64 strings, or data URLs 51 | 52 | 2. **Automatic processing** 53 | - When a thread is saved via `thread_store.save(thread)`, all pending attachments are automatically processed 54 | - Processing includes MIME type detection, content analysis, and metadata extraction 55 | - Different file types receive specialized processing (text extraction for PDFs, previews for images, etc.) 56 | 57 | 3. **Storage management** 58 | - Processed attachments are stored in the configured file storage backend 59 | - Storage paths and URLs are automatically generated and tracked 60 | - Attachments are accessible via their processed content after storage 61 | 62 | This automatic processing means you don't need to manually handle attachments - simply add them to messages, add messages to threads, and save the thread. 63 | 64 | ## Example flow 65 | 66 | Here's a typical interaction flow: 67 | 68 | ```mermaid 69 | User->>Agent: Send message 70 | Agent->>LLM: Make completion call 71 | LLM-->>Agent: Response with tool calls 72 | Agent->>Tools: Execute tool 73 | Tools-->>Agent: Tool result 74 | Agent->>LLM: Continue with tool result 75 | LLM-->>Agent: Final response 76 | Agent->>User: Return complete response 77 | ``` 78 | 79 | ## Tool runner 80 | 81 | Tools in Tyler are managed by the `ToolRunner`, which: 82 | 83 | - Maintains a registry of available tools 84 | - Handles both synchronous and asynchronous tools 85 | - Processes tool calls from the LLM 86 | - Returns results in a standardized format 87 | 88 | Each tool has: 89 | - A name and description 90 | - Parameter definitions 91 | - Implementation function 92 | - Optional attributes (e.g., for special handling) 93 | 94 | ## Streaming support 95 | 96 | When using `go_stream()`, Tyler provides real-time updates including: 97 | 98 | - Content chunks as they arrive from the LLM 99 | - Tool execution status and results 100 | - Final thread state and messages 101 | 102 | This enables building interactive interfaces where users can see the agent's thought process and tool usage in real-time. 103 | 104 | ## Error handling and limits 105 | 106 | Tyler includes built-in safeguards: 107 | 108 | - Maximum tool iteration limit (default: 10) 109 | - Automatic error recovery 110 | - Structured error responses 111 | - Tool execution timeout handling 112 | 113 | ## Next steps 114 | 115 | - Learn about [Configuration](./configuration.md) 116 | - Explore available [Tools](./tools/overview.md) 117 | - See [Examples](./category/examples) of Tyler in action -------------------------------------------------------------------------------- /tyler/database/migrations/versions/20250208_0000_197750e12032_migrate_json_to_jsonb.py: -------------------------------------------------------------------------------- 1 | """migrate json to jsonb columns 2 | 3 | Revision ID: 197750e12032 4 | Revises: 197750e12031 5 | Create Date: 2025-02-08 00:00:00.000000 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '197750e12032' 14 | down_revision = '197750e12031' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | bind = op.get_bind() 21 | if bind.dialect.name == 'postgresql': 22 | # Table: threads 23 | op.alter_column('threads', 'attributes', 24 | existing_type=sa.JSON(), 25 | type_=postgresql.JSONB(astext_type=sa.Text()), 26 | existing_nullable=False, 27 | postgresql_using='attributes::jsonb') 28 | op.alter_column('threads', 'platforms', 29 | existing_type=sa.JSON(), 30 | type_=postgresql.JSONB(astext_type=sa.Text()), 31 | existing_nullable=True, 32 | postgresql_using='platforms::jsonb') 33 | 34 | # Table: messages 35 | op.alter_column('messages', 'tool_calls', 36 | existing_type=sa.JSON(), 37 | type_=postgresql.JSONB(astext_type=sa.Text()), 38 | existing_nullable=True, 39 | postgresql_using='tool_calls::jsonb') 40 | op.alter_column('messages', 'attributes', 41 | existing_type=sa.JSON(), 42 | type_=postgresql.JSONB(astext_type=sa.Text()), 43 | existing_nullable=False, 44 | postgresql_using='attributes::jsonb') 45 | op.alter_column('messages', 'attachments', 46 | existing_type=sa.JSON(), 47 | type_=postgresql.JSONB(astext_type=sa.Text()), 48 | existing_nullable=True, 49 | postgresql_using='attachments::jsonb') 50 | op.alter_column('messages', 'metrics', 51 | existing_type=sa.JSON(), 52 | type_=postgresql.JSONB(astext_type=sa.Text()), 53 | existing_nullable=False, 54 | postgresql_using='metrics::jsonb') 55 | op.alter_column('messages', 'platforms', 56 | existing_type=sa.JSON(), 57 | type_=postgresql.JSONB(astext_type=sa.Text()), 58 | existing_nullable=True, 59 | postgresql_using='platforms::jsonb') 60 | 61 | 62 | def downgrade(): 63 | bind = op.get_bind() 64 | if bind.dialect.name == 'postgresql': 65 | # Table: messages 66 | op.alter_column('messages', 'platforms', 67 | existing_type=postgresql.JSONB(astext_type=sa.Text()), 68 | type_=sa.JSON(), 69 | existing_nullable=True, 70 | postgresql_using='platforms::text::json') 71 | op.alter_column('messages', 'metrics', 72 | existing_type=postgresql.JSONB(astext_type=sa.Text()), 73 | type_=sa.JSON(), 74 | existing_nullable=False, 75 | postgresql_using='metrics::text::json') 76 | op.alter_column('messages', 'attachments', 77 | existing_type=postgresql.JSONB(astext_type=sa.Text()), 78 | type_=sa.JSON(), 79 | existing_nullable=True, 80 | postgresql_using='attachments::text::json') 81 | op.alter_column('messages', 'attributes', 82 | existing_type=postgresql.JSONB(astext_type=sa.Text()), 83 | type_=sa.JSON(), 84 | existing_nullable=False, 85 | postgresql_using='attributes::text::json') 86 | op.alter_column('messages', 'tool_calls', 87 | existing_type=postgresql.JSONB(astext_type=sa.Text()), 88 | type_=sa.JSON(), 89 | existing_nullable=True, 90 | postgresql_using='tool_calls::text::json') 91 | 92 | # Table: threads 93 | op.alter_column('threads', 'platforms', 94 | existing_type=postgresql.JSONB(astext_type=sa.Text()), 95 | type_=sa.JSON(), 96 | existing_nullable=True, 97 | postgresql_using='platforms::text::json') 98 | op.alter_column('threads', 'attributes', 99 | existing_type=postgresql.JSONB(astext_type=sa.Text()), 100 | type_=sa.JSON(), 101 | existing_nullable=False, 102 | postgresql_using='attributes::text::json') -------------------------------------------------------------------------------- /tyler/mcp/server_manager.py: -------------------------------------------------------------------------------- 1 | """MCP server manager implementation for Tyler.""" 2 | 3 | import asyncio 4 | import logging 5 | import os 6 | import signal 7 | import subprocess 8 | from typing import Any, Dict, Optional 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | class MCPServerManager: 13 | """Manager for MCP server processes.""" 14 | 15 | def __init__(self): 16 | self.processes = {} # server_name -> subprocess.Popen 17 | self.server_configs = {} # server_name -> config 18 | 19 | async def start_server(self, name: str, config: Dict[str, Any]) -> bool: 20 | """Start an MCP server process. 21 | 22 | Args: 23 | name: The name of the server 24 | config: Server configuration dictionary 25 | 26 | Returns: 27 | bool: True if server started successfully, False otherwise 28 | """ 29 | # Check if server is already running 30 | if name in self.processes and self.processes[name].poll() is None: 31 | logger.info(f"MCP server {name} is already running") 32 | return True 33 | 34 | # Get command and args 35 | command = config.get("command") 36 | args = config.get("args", []) 37 | 38 | if not command: 39 | logger.error(f"command is required for MCP server {name}") 40 | return False 41 | 42 | if "args" not in config: 43 | logger.error(f"args is required for MCP server {name}") 44 | return False 45 | 46 | # Get environment variables 47 | env_vars = config.get("env", {}) 48 | 49 | # Merge with current environment 50 | process_env = os.environ.copy() 51 | process_env.update(env_vars) 52 | 53 | # Start the process 54 | try: 55 | logger.info(f"Starting MCP server {name}: {command} {' '.join(args)}") 56 | process = subprocess.Popen( 57 | [command] + args, 58 | env=process_env, 59 | stdout=subprocess.PIPE, 60 | stderr=subprocess.PIPE, 61 | stdin=subprocess.PIPE, 62 | bufsize=0, # Unbuffered 63 | universal_newlines=False # Binary mode 64 | ) 65 | 66 | # Wait a bit to see if the process starts successfully 67 | await asyncio.sleep(0.5) 68 | 69 | # Check if process is still running 70 | if process.poll() is not None: 71 | logger.error(f"MCP server {name} failed to start") 72 | return False 73 | 74 | # Store the process and config 75 | self.processes[name] = process 76 | self.server_configs[name] = config 77 | 78 | logger.info(f"MCP server {name} started successfully") 79 | return True 80 | 81 | except Exception as e: 82 | logger.error(f"Error starting MCP server {name}: {e}") 83 | return False 84 | 85 | async def stop_server(self, name: str) -> bool: 86 | """Stop an MCP server process. 87 | 88 | Args: 89 | name: The name of the server 90 | 91 | Returns: 92 | bool: True if server stopped successfully, False otherwise 93 | """ 94 | if name not in self.processes: 95 | logger.error(f"MCP server {name} not found") 96 | return False 97 | 98 | process = self.processes[name] 99 | 100 | # Check if process is still running 101 | if process.poll() is None: 102 | try: 103 | logger.info(f"Stopping MCP server {name}") 104 | 105 | # Send SIGTERM to the process 106 | process.terminate() 107 | 108 | # Wait for the process to terminate 109 | await asyncio.to_thread(process.wait) 110 | 111 | except Exception as e: 112 | logger.error(f"Error stopping MCP server {name}: {e}") 113 | return False 114 | 115 | # Remove the process and config 116 | del self.processes[name] 117 | del self.server_configs[name] 118 | 119 | logger.info(f"MCP server {name} stopped successfully") 120 | return True 121 | 122 | async def stop_all_servers(self) -> None: 123 | """Stop all MCP server processes.""" 124 | server_names = list(self.processes.keys()) 125 | for name in server_names: 126 | await self.stop_server(name) -------------------------------------------------------------------------------- /examples/tools_streaming.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Example demonstrating streaming updates with tool usage. 4 | """ 5 | # Load environment variables and configure logging first 6 | from dotenv import load_dotenv 7 | load_dotenv() 8 | 9 | from tyler.utils.logging import get_logger 10 | logger = get_logger(__name__) 11 | 12 | # Now import everything else 13 | import os 14 | import asyncio 15 | import weave 16 | import sys 17 | from tyler import Agent, Thread, Message, StreamUpdate 18 | 19 | def custom_calculator_implementation(operation: str, x: float, y: float) -> str: 20 | """ 21 | Implementation of a simple calculator tool. 22 | """ 23 | try: 24 | if operation == "add": 25 | result = x + y 26 | elif operation == "subtract": 27 | result = x - y 28 | elif operation == "multiply": 29 | result = x * y 30 | elif operation == "divide": 31 | if y == 0: 32 | return "Error: Division by zero" 33 | result = x / y 34 | else: 35 | return f"Error: Unknown operation {operation}" 36 | 37 | return f"Result of {operation}({x}, {y}) = {result}" 38 | except Exception as e: 39 | return f"Error performing calculation: {str(e)}" 40 | 41 | # Define custom calculator tool 42 | custom_calculator_tool = { 43 | "definition": { 44 | "type": "function", 45 | "function": { 46 | "name": "calculate", 47 | "description": "Perform basic mathematical operations", 48 | "parameters": { 49 | "type": "object", 50 | "properties": { 51 | "operation": { 52 | "type": "string", 53 | "description": "The mathematical operation to perform (add, subtract, multiply, divide)", 54 | "enum": ["add", "subtract", "multiply", "divide"] 55 | }, 56 | "x": { 57 | "type": "number", 58 | "description": "First number" 59 | }, 60 | "y": { 61 | "type": "number", 62 | "description": "Second number" 63 | } 64 | }, 65 | "required": ["operation", "x", "y"] 66 | } 67 | } 68 | }, 69 | "implementation": custom_calculator_implementation 70 | } 71 | 72 | try: 73 | if os.getenv("WANDB_API_KEY"): 74 | weave.init("tyler") 75 | logger.debug("Weave tracing initialized successfully") 76 | except Exception as e: 77 | logger.warning(f"Failed to initialize weave tracing: {e}. Continuing without weave.") 78 | 79 | # Initialize the agent with both built-in and custom tools 80 | agent = Agent( 81 | model_name="gpt-4.1", 82 | purpose="To help with calculations and web searches", 83 | tools=[ 84 | "web", # Load the web tools module 85 | custom_calculator_tool, # Add our calculator tool 86 | ] 87 | ) 88 | 89 | async def main(): 90 | # Create a thread 91 | thread = Thread() 92 | 93 | # Example conversation with web page fetch followed by calculations 94 | conversations = [ 95 | "Can you fetch the content from https://adamwdraper.github.io/tyler/?", 96 | "Let's do a calculation: what is 537 divided by 3?" 97 | ] 98 | 99 | for user_input in conversations: 100 | logger.info("User: %s", user_input) 101 | 102 | # Add user message 103 | message = Message( 104 | role="user", 105 | content=user_input 106 | ) 107 | thread.add_message(message) 108 | 109 | # Process the thread with streaming 110 | async for update in agent.go_stream(thread): 111 | if update.type == StreamUpdate.Type.CONTENT_CHUNK: 112 | logger.info("Content chunk: %s", update.data) 113 | elif update.type == StreamUpdate.Type.ASSISTANT_MESSAGE: 114 | logger.info("Complete assistant message: %s", update.data.content) 115 | elif update.type == StreamUpdate.Type.TOOL_MESSAGE: 116 | logger.info("Tool message: %s", update.data.content) 117 | elif update.type == StreamUpdate.Type.ERROR: 118 | logger.error("Error: %s", update.data) 119 | elif update.type == StreamUpdate.Type.COMPLETE: 120 | logger.info("Processing complete") 121 | 122 | logger.info("-" * 50) 123 | 124 | if __name__ == "__main__": 125 | try: 126 | asyncio.run(main()) 127 | except KeyboardInterrupt: 128 | logger.warning("Exiting gracefully...") 129 | sys.exit(0) -------------------------------------------------------------------------------- /tyler/utils/registry.py: -------------------------------------------------------------------------------- 1 | """Component registry for managing shared resources.""" 2 | from typing import Any, Dict, Tuple, Optional, TYPE_CHECKING 3 | import logging 4 | 5 | # Type hints with conditional imports to avoid circular dependencies 6 | if TYPE_CHECKING: 7 | from tyler import ThreadStore, FileStore 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | class Registry: 12 | """Registry for managing shared components like thread stores and file stores. 13 | 14 | This registry ensures components have stable identity across multiple 15 | agent initializations. 16 | """ 17 | 18 | # Singleton instance of the registry 19 | _instance = None 20 | 21 | # Dictionary to store registered components 22 | _components: Dict[Tuple[str, str], Any] = {} 23 | 24 | @classmethod 25 | def get_instance(cls) -> 'Registry': 26 | """Get the singleton instance of the registry.""" 27 | if cls._instance is None: 28 | cls._instance = Registry() 29 | return cls._instance 30 | 31 | def register(self, component_type: str, name: str, instance: Any) -> Any: 32 | """Register a component in the registry. 33 | 34 | Args: 35 | component_type: Type of component (e.g., "thread_store", "file_store") 36 | name: Name identifier for the component 37 | instance: The component instance to register 38 | 39 | Returns: 40 | The registered instance 41 | """ 42 | key = (component_type, name) 43 | self._components[key] = instance 44 | logger.debug(f"Registered {component_type} with name '{name}'") 45 | return instance 46 | 47 | def get(self, component_type: str, name: str) -> Optional[Any]: 48 | """Get a component from the registry. 49 | 50 | Args: 51 | component_type: Type of component to retrieve 52 | name: Name identifier of the component 53 | 54 | Returns: 55 | The component instance or None if not found 56 | """ 57 | key = (component_type, name) 58 | component = self._components.get(key) 59 | if component is None: 60 | logger.debug(f"Component {component_type} with name '{name}' not found") 61 | return component 62 | 63 | def list(self, component_type: Optional[str] = None) -> Dict[Tuple[str, str], Any]: 64 | """List all registered components, optionally filtered by type. 65 | 66 | Args: 67 | component_type: Optional type to filter components by 68 | 69 | Returns: 70 | Dictionary of components 71 | """ 72 | if component_type is None: 73 | return self._components.copy() 74 | 75 | return {k: v for k, v in self._components.items() if k[0] == component_type} 76 | 77 | # Basic registry functions 78 | def register(component_type: str, name: str, instance: Any) -> Any: 79 | """Register a component in the global registry.""" 80 | return Registry.get_instance().register(component_type, name, instance) 81 | 82 | def get(component_type: str, name: str) -> Optional[Any]: 83 | """Get a component from the global registry.""" 84 | return Registry.get_instance().get(component_type, name) 85 | 86 | def list(component_type: Optional[str] = None) -> Dict[Tuple[str, str], Any]: 87 | """List components in the global registry.""" 88 | return Registry.get_instance().list(component_type) 89 | 90 | # Thread store specific functions 91 | def register_thread_store(name: str, thread_store: "ThreadStore") -> "ThreadStore": 92 | """Register a thread store with the given name. 93 | 94 | Args: 95 | name: Name identifier for the thread store 96 | thread_store: The thread store instance 97 | 98 | Returns: 99 | The registered thread store 100 | """ 101 | return register("thread_store", name, thread_store) 102 | 103 | def get_thread_store(name: str) -> Optional["ThreadStore"]: 104 | """Get a thread store by name. 105 | 106 | Args: 107 | name: Name of the thread store 108 | 109 | Returns: 110 | The thread store instance or None if not found 111 | """ 112 | return get("thread_store", name) 113 | 114 | # File store specific functions 115 | def register_file_store(name: str, file_store: "FileStore") -> "FileStore": 116 | """Register a file store with the given name. 117 | 118 | Args: 119 | name: Name identifier for the file store 120 | file_store: The file store instance 121 | 122 | Returns: 123 | The registered file store 124 | """ 125 | return register("file_store", name, file_store) 126 | 127 | def get_file_store(name: str) -> Optional["FileStore"]: 128 | """Get a file store by name. 129 | 130 | Args: 131 | name: Name of the file store 132 | 133 | Returns: 134 | The file store instance or None if not found 135 | """ 136 | return get("file_store", name) -------------------------------------------------------------------------------- /docs/docs/tools/notion.md: -------------------------------------------------------------------------------- 1 | # Notion integration 2 | 3 | The Notion module provides tools for interacting with Notion workspaces, allowing you to search, create, and manage content. 4 | 5 | ## Configuration 6 | 7 | Before using Notion tools, you need to set up the following environment variables: 8 | 9 | ```bash 10 | NOTION_API_KEY=your-api-key 11 | NOTION_VERSION=2022-06-28 # Or latest supported version 12 | ``` 13 | 14 | To get these credentials: 15 | 1. Create a Notion integration at https://www.notion.so/my-integrations 16 | 2. Copy the integration token (API key) 17 | 3. Share the pages/databases you want to access with your integration 18 | 19 | ## Available tools 20 | 21 | ### Notion-search 22 | 23 | Searches all titles of pages and databases in Notion that have been shared with the integration. 24 | 25 | #### Parameters 26 | 27 | - `query` (string, optional) 28 | - The search query to find in page/database titles 29 | - Query should focus on subject matter likely to be in titles 30 | - If not provided, returns all accessible pages/databases 31 | - Case-insensitive 32 | - Supports partial matches 33 | 34 | - `filter` (object, optional) 35 | - Filter to only return specific types of content 36 | - Properties: 37 | - `value`: Type of content to filter 38 | - "page": Only return pages 39 | - "database": Only return databases 40 | - `property`: Must be "object" 41 | 42 | - `start_cursor` (string, optional) 43 | - Cursor for pagination 44 | - Use to fetch next page of results 45 | - Obtained from previous search response 46 | 47 | - `page_size` (integer, optional) 48 | - Number of results to return 49 | - Default: 100 50 | - Range: 1-100 51 | - Use with start_cursor for pagination 52 | 53 | #### Response format 54 | 55 | The tool returns a dictionary with: 56 | - `results`: Array of found pages/databases 57 | - Each result includes: 58 | - `id`: Notion page/database ID 59 | - `title`: Content title 60 | - `url`: Notion URL 61 | - `object`: Type ("page" or "database") 62 | - `created_time`: Creation timestamp 63 | - `last_edited_time`: Last edit timestamp 64 | - `next_cursor`: Cursor for next page (if more results) 65 | - `has_more`: Boolean indicating if more results exist 66 | 67 | #### Example Usage 68 | 69 | ```python 70 | from tyler.models import Agent, Thread, Message 71 | 72 | # Create an agent with Notion tools 73 | agent = Agent( 74 | model_name="gpt-4.1", 75 | purpose="To help with Notion content", 76 | tools=["notion"] 77 | ) 78 | 79 | # Create a thread with a search request 80 | thread = Thread() 81 | message = Message( 82 | role="user", 83 | content="Find all project planning documents in Notion" 84 | ) 85 | thread.add_message(message) 86 | 87 | # Process the thread - agent will use notion-search tool 88 | processed_thread, new_messages = await agent.go(thread) 89 | ``` 90 | 91 | ## Best practices 92 | 93 | 1. **Search Optimization** 94 | - Use specific search terms 95 | - Consider title patterns 96 | - Handle pagination for large results 97 | 98 | 2. **Error Handling** 99 | - Check API responses 100 | - Handle rate limits 101 | - Validate permissions 102 | 103 | 3. **Content Access** 104 | - Share pages explicitly 105 | - Manage integration permissions 106 | - Track accessed content 107 | 108 | 4. **Performance** 109 | - Use appropriate page sizes 110 | - Implement pagination 111 | - Cache results when appropriate 112 | 113 | ## Common use cases 114 | 115 | 1. **Content Discovery** 116 | - Find relevant documents 117 | - Locate databases 118 | - Search project resources 119 | 120 | 2. **Content Organization** 121 | - Audit page access 122 | - Track document changes 123 | - Manage workspaces 124 | 125 | 3. **Integration** 126 | - Connect with other tools 127 | - Automate workflows 128 | - Sync content 129 | 130 | ## Security considerations 131 | 132 | 1. **Access Control** 133 | - Share only necessary content 134 | - Review integration permissions 135 | - Monitor access patterns 136 | 137 | 2. **API Keys** 138 | - Secure storage of keys 139 | - Regular key rotation 140 | - Access logging 141 | 142 | 3. **Content Security** 143 | - Validate content access 144 | - Handle sensitive data 145 | - Respect workspace boundaries 146 | 147 | ## Limitations 148 | 149 | 1. **API Constraints** 150 | - Rate limits apply 151 | - Maximum page size of 100 152 | - Some operations require specific permissions 153 | 154 | 2. **Search Limitations** 155 | - Title-only search 156 | - No full-text search 157 | - Limited filter options 158 | 159 | 3. **Integration Scope** 160 | - Access limited to shared content 161 | - Some operations restricted 162 | - Workspace boundaries enforced 163 | 164 | ## Error handling 165 | 166 | Common issues and solutions: 167 | 168 | 1. **Authentication Errors** 169 | - Verify API key is set 170 | - Check environment variables 171 | - Validate API version 172 | 173 | 2. **Access Denied** 174 | - Confirm page is shared 175 | - Check integration permissions 176 | - Verify workspace access 177 | 178 | 3. **Rate Limiting** 179 | - Implement backoff 180 | - Monitor usage 181 | - Optimize requests -------------------------------------------------------------------------------- /docs/docs/examples/streaming.md: -------------------------------------------------------------------------------- 1 | # Streaming Responses 2 | 3 | Tyler supports streaming responses from the agent, allowing you to build highly interactive applications that show responses in real-time. This example demonstrates how to use streaming with both basic responses and tool execution. 4 | 5 | ## Basic Streaming Example 6 | 7 | Here's a complete example that shows how to use streaming responses with multiple conversation turns: 8 | 9 | ```python 10 | from dotenv import load_dotenv 11 | from tyler.models.agent import Agent, StreamUpdate, Thread, Message 12 | import asyncio 13 | import weave 14 | import os 15 | import logging 16 | import sys 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | # Load environment variables from .env file 21 | load_dotenv() 22 | 23 | try: 24 | if os.getenv("WANDB_API_KEY"): 25 | weave.init("tyler") 26 | logger.info("Weave tracing initialized successfully") 27 | except Exception as e: 28 | logger.warning(f"Failed to initialize weave tracing: {e}. Continuing without weave.") 29 | 30 | # Initialize the agent 31 | agent = Agent( 32 | model_name="gpt-4.1", # Using latest GPT-4.1 model 33 | purpose="To be a helpful assistant that can answer questions and perform tasks.", 34 | tools=[ 35 | "web", # Enable web tools for fetching and processing web content 36 | "command_line" # Enable command line tools for system operations 37 | ], 38 | temperature=0.7 39 | ) 40 | 41 | async def main(): 42 | # Example conversation with multiple turns 43 | conversations = [ 44 | "Tell me about the benefits of exercise.", 45 | "What specific exercises are good for beginners?", 46 | "How often should beginners exercise?" 47 | ] 48 | 49 | # Create a single thread for the entire conversation 50 | thread = Thread() 51 | 52 | for user_input in conversations: 53 | print(f"\nUser: {user_input}") 54 | 55 | # Add user message to thread 56 | message = Message( 57 | role="user", 58 | content=user_input 59 | ) 60 | thread.add_message(message) 61 | 62 | print("\nAssistant: ", end='', flush=True) 63 | 64 | # Process the thread using go_stream 65 | async for update in agent.go_stream(thread): 66 | if update.type == StreamUpdate.Type.CONTENT_CHUNK: 67 | # Print content chunks as they arrive 68 | print(update.data, end='', flush=True) 69 | elif update.type == StreamUpdate.Type.TOOL_MESSAGE: 70 | # Print tool results on new lines 71 | tool_message = update.data 72 | print(f"\nTool ({tool_message.name}): {tool_message.content}") 73 | elif update.type == StreamUpdate.Type.ERROR: 74 | # Print any errors that occur 75 | print(f"\nError: {update.data}") 76 | elif update.type == StreamUpdate.Type.COMPLETE: 77 | # Final update contains (thread, new_messages) 78 | print() # Add newline after completion 79 | 80 | print("\n" + "-"*50) # Separator between conversations 81 | 82 | if __name__ == "__main__": 83 | try: 84 | asyncio.run(main()) 85 | except KeyboardInterrupt: 86 | print("\nExiting gracefully...") 87 | sys.exit(0) 88 | 89 | ## How It Works 90 | 91 | 1. **Environment Setup**: 92 | - Loads environment variables from `.env` 93 | - Initializes Weave tracing if configured 94 | - Sets up logging 95 | 96 | 2. **Agent Configuration**: 97 | - Uses the latest GPT-4.1 model 98 | - Enables web and command line tools 99 | - Sets a specific purpose and temperature 100 | 101 | 3. **Conversation Management**: 102 | - Creates a single thread for the entire conversation 103 | - Handles multiple conversation turns 104 | - Maintains context between messages 105 | 106 | 4. **Streaming Updates**: 107 | - Processes content chunks in real-time 108 | - Handles tool execution results 109 | - Manages errors and completion states 110 | 111 | ## Best Practices 112 | 113 | 1. **Environment Management**: 114 | - Use `.env` for configuration 115 | - Handle missing environment variables gracefully 116 | - Set up proper logging 117 | 118 | 2. **Error Handling**: 119 | - Catch and log initialization errors 120 | - Handle streaming errors appropriately 121 | - Provide graceful shutdown 122 | 123 | 3. **User Experience**: 124 | - Show clear user/assistant separation 125 | - Use proper output formatting 126 | - Maintain conversation context 127 | 128 | 4. **Tool Integration**: 129 | - Enable relevant tools for the use case 130 | - Handle tool results appropriately 131 | - Display tool output clearly 132 | 133 | ## Running the Example 134 | 135 | 1. Install Tyler and dependencies: 136 | ```bash 137 | pip install tyler-agent 138 | ``` 139 | 140 | 2. Set up your environment variables in `.env`: 141 | ```bash 142 | OPENAI_API_KEY=your_api_key_here 143 | WANDB_API_KEY=your_wandb_key_here # Optional, for tracing 144 | ``` 145 | 146 | 3. Run the example: 147 | ```bash 148 | python examples/streaming.py 149 | ``` 150 | 151 | ## Expected Output 152 | 153 | ``` 154 | User: Tell me about the benefits of exercise. -------------------------------------------------------------------------------- /docs/docs/api-reference/mcp.md: -------------------------------------------------------------------------------- 1 | # MCP Reference 2 | 3 | This page provides detailed reference information for Tyler's [Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) integration components. 4 | 5 | ## MCPService 6 | 7 | The `MCPService` class manages connections to MCP servers, discovers tools, and handles tool execution. 8 | 9 | ### Methods 10 | 11 | #### `async initialize(server_configs: List[Dict[str, Any]]) -> None` 12 | 13 | Initializes the MCP service with the provided server configurations. 14 | 15 | **Parameters:** 16 | - `server_configs`: List of server configuration dictionaries 17 | 18 | **Example:** 19 | ```python 20 | await mcp_service.initialize([ 21 | { 22 | "name": "brave-search", 23 | "transport": "stdio", 24 | "command": ["python", "-m", "brave_search.server"], 25 | "auto_start": True 26 | } 27 | ]) 28 | ``` 29 | 30 | #### `async _connect_to_server(name: str, config: Dict[str, Any]) -> Optional[ClientSession]` 31 | 32 | Connects to an MCP server using the specified configuration. 33 | 34 | **Parameters:** 35 | - `name`: Server name 36 | - `config`: Server configuration dictionary 37 | 38 | **Returns:** 39 | - `ClientSession` object if connection is successful, `None` otherwise 40 | 41 | #### `async _discover_tools(name: str, session: ClientSession) -> None` 42 | 43 | Discovers available tools from an MCP server. 44 | 45 | **Parameters:** 46 | - `name`: Server name 47 | - `session`: MCP client session 48 | 49 | #### `_convert_mcp_tool_to_tyler_tool(server_name: str, tool, session: ClientSession) -> Dict` 50 | 51 | Converts an MCP tool definition to a Tyler-compatible tool definition. 52 | 53 | **Parameters:** 54 | - `server_name`: Server name 55 | - `tool`: MCP tool definition 56 | - `session`: MCP client session 57 | 58 | **Returns:** 59 | - Tyler-compatible tool definition dictionary 60 | 61 | #### `_create_tool_implementation(server_name: str, tool_name: str)` 62 | 63 | Creates a function that implements an MCP tool. 64 | 65 | **Parameters:** 66 | - `server_name`: Server name 67 | - `tool_name`: Tool name 68 | 69 | **Returns:** 70 | - Function that executes the MCP tool 71 | 72 | #### `get_tools_for_agent(server_names=None)` 73 | 74 | Gets all available tools for use with a Tyler agent. 75 | 76 | **Parameters:** 77 | - `server_names`: Optional list of server names to filter tools by 78 | 79 | **Returns:** 80 | - List of Tyler-compatible tool definitions 81 | 82 | #### `async cleanup()` 83 | 84 | Cleans up all MCP server connections. 85 | 86 | ## MCPServerManager 87 | 88 | The `MCPServerManager` class handles starting, stopping, and managing MCP server processes. 89 | 90 | ### Methods 91 | 92 | #### `async start_server(name: str, config: Dict[str, Any]) -> bool` 93 | 94 | Starts an MCP server with the specified configuration. 95 | 96 | **Parameters:** 97 | - `name`: Server name 98 | - `config`: Server configuration dictionary 99 | 100 | **Returns:** 101 | - `True` if server started successfully, `False` otherwise 102 | 103 | **Example:** 104 | ```python 105 | success = await server_manager.start_server("brave-search", { 106 | "transport": "stdio", 107 | "command": ["python", "-m", "brave_search.server"], 108 | "auto_start": True 109 | }) 110 | ``` 111 | 112 | #### `async stop_server(name: str) -> bool` 113 | 114 | Stops a running MCP server. 115 | 116 | **Parameters:** 117 | - `name`: Server name 118 | 119 | **Returns:** 120 | - `True` if server stopped successfully, `False` otherwise 121 | 122 | **Example:** 123 | ```python 124 | success = await server_manager.stop_server("brave-search") 125 | ``` 126 | 127 | #### `async stop_all_servers() -> None` 128 | 129 | Stops all running MCP servers. 130 | 131 | **Example:** 132 | ```python 133 | await server_manager.stop_all_servers() 134 | ``` 135 | 136 | ## Utility Functions 137 | 138 | ### `async initialize_mcp_service(server_configs: List[Dict[str, Any]]) -> MCPService` 139 | 140 | Initializes and returns an MCPService instance. 141 | 142 | **Parameters:** 143 | - `server_configs`: List of server configuration dictionaries 144 | 145 | **Returns:** 146 | - Initialized `MCPService` instance 147 | 148 | **Example:** 149 | ```python 150 | mcp_service = await initialize_mcp_service([ 151 | { 152 | "name": "brave-search", 153 | "transport": "stdio", 154 | "command": ["python", "-m", "brave_search.server"], 155 | "auto_start": True 156 | } 157 | ]) 158 | ``` 159 | 160 | ### `async cleanup_mcp_service(mcp_service: MCPService) -> None` 161 | 162 | Cleans up an MCPService instance. 163 | 164 | **Parameters:** 165 | - `mcp_service`: MCPService instance to clean up 166 | 167 | **Example:** 168 | ```python 169 | await cleanup_mcp_service(mcp_service) 170 | ``` 171 | 172 | ## Server Configuration 173 | 174 | MCP servers can be configured with the following options: 175 | 176 | | Option | Description | Required | Default | 177 | |--------|-------------|----------|---------| 178 | | `name` | Unique identifier for the server | Yes | - | 179 | | `transport` | Transport protocol: `stdio`, `sse`, or `websocket` | Yes | - | 180 | | `command` | Command to start the server (for `stdio` transport with `auto_start: true`) | For `stdio` with `auto_start: true` | - | 181 | | `auto_start` | Whether Tyler should automatically start and manage the server | No | `False` | 182 | | `url` | URL for connecting to the server (for `sse` and `websocket` transports) | For `sse` and `websocket` | - | 183 | | `headers` | Optional HTTP headers for connection (for `sse` and `websocket` transports) | No | `{}` | -------------------------------------------------------------------------------- /tests/tools/test_command_line.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import subprocess 3 | from unittest.mock import patch, MagicMock 4 | from tyler.tools.command_line import ( 5 | is_safe_path, 6 | is_safe_command, 7 | validate_file_operation, 8 | run_command, 9 | SAFE_COMMANDS, 10 | FILE_MODIFYING_COMMANDS 11 | ) 12 | 13 | @pytest.fixture 14 | def mock_cwd(): 15 | """Fixture to mock current working directory""" 16 | with patch('os.getcwd') as mock: 17 | mock.return_value = '/workspace' 18 | yield mock 19 | 20 | def test_is_safe_path(mock_cwd): 21 | """Test path safety validation""" 22 | # Valid paths 23 | assert is_safe_path('/workspace/test.txt') 24 | assert is_safe_path('test.txt') 25 | assert is_safe_path('./test.txt') 26 | 27 | # Invalid paths 28 | assert not is_safe_path('/etc/passwd') 29 | assert not is_safe_path('../outside.txt') 30 | assert not is_safe_path(None) 31 | assert not is_safe_path('') 32 | assert not is_safe_path(' ') 33 | assert not is_safe_path('/workspace/../etc/passwd') 34 | assert not is_safe_path('\0malicious') 35 | 36 | def test_is_safe_command(): 37 | """Test command safety validation""" 38 | # Valid commands 39 | assert is_safe_command('ls') 40 | assert is_safe_command('ls -la') 41 | assert is_safe_command('cat test.txt') 42 | assert is_safe_command('grep pattern file.txt') 43 | 44 | # Invalid commands 45 | assert not is_safe_command('rm -rf /') 46 | assert not is_safe_command('ls && rm -rf /') 47 | assert not is_safe_command('ls; rm -rf /') 48 | assert not is_safe_command('ls | rm -rf /') 49 | assert not is_safe_command('`rm -rf /`') 50 | assert not is_safe_command('$(rm -rf /)') 51 | assert not is_safe_command('sudo rm -rf /') 52 | assert not is_safe_command('not_whitelisted_cmd') 53 | 54 | def test_validate_file_operation(mock_cwd): 55 | """Test validation of file modification commands""" 56 | # Valid operations 57 | assert validate_file_operation('rm', ['rm', 'test.txt']) 58 | assert validate_file_operation('cp', ['cp', 'source.txt', 'dest.txt']) 59 | assert validate_file_operation('mv', ['mv', 'old.txt', 'new.txt']) 60 | assert validate_file_operation('echo', ['echo', 'text', '>', 'file.txt']) 61 | assert validate_file_operation('mkdir', ['mkdir', 'newdir']) 62 | 63 | # Invalid operations 64 | assert not validate_file_operation('rm', ['rm', '-rf', '/']) 65 | assert not validate_file_operation('rm', ['rm', '-r', 'dir']) 66 | assert not validate_file_operation('cp', ['cp', '/etc/passwd', 'hack.txt']) 67 | assert not validate_file_operation('mv', ['mv', 'file.txt', '/etc/passwd']) 68 | assert not validate_file_operation('cp', ['cp', 'file1', 'file2', 'file3']) 69 | 70 | @patch('subprocess.run') 71 | def test_run_command_success(mock_run, mock_cwd): 72 | """Test successful command execution""" 73 | mock_process = MagicMock() 74 | mock_process.stdout = "command output" 75 | mock_process.stderr = "" 76 | mock_process.returncode = 0 77 | mock_run.return_value = mock_process 78 | 79 | result = run_command(command="ls -la") 80 | 81 | assert result["command"] == "ls -la" 82 | assert result["output"] == "command output" 83 | assert result["error"] is None 84 | assert result["exit_code"] == 0 85 | 86 | mock_run.assert_called_once_with( 87 | "ls -la", 88 | shell=True, 89 | cwd=".", 90 | capture_output=True, 91 | text=True, 92 | timeout=30 93 | ) 94 | 95 | @patch('subprocess.run') 96 | def test_run_command_with_error(mock_run): 97 | """Test command execution with error""" 98 | mock_process = MagicMock() 99 | mock_process.stdout = "" 100 | mock_process.stderr = "error message" 101 | mock_process.returncode = 1 102 | mock_run.return_value = mock_process 103 | 104 | result = run_command(command="cat nonexistent.txt") 105 | 106 | assert result["command"] == "cat nonexistent.txt" 107 | assert result["output"] == "" 108 | assert result["error"] == "error message" 109 | assert result["exit_code"] == 1 110 | 111 | def test_run_command_unsafe(): 112 | """Test rejection of unsafe commands""" 113 | result = run_command(command="rm -rf /") 114 | assert "error" in result 115 | assert "Command not allowed" in result["error"] 116 | 117 | @patch('subprocess.run') 118 | def test_run_command_timeout(mock_run): 119 | """Test command timeout handling""" 120 | mock_run.side_effect = subprocess.TimeoutExpired(cmd="sleep 100", timeout=30) 121 | 122 | result = run_command(command="sleep 100") 123 | assert "error" in result 124 | assert "Command timed out" in result["error"] 125 | 126 | def test_safe_commands_consistency(): 127 | """Test consistency between SAFE_COMMANDS and FILE_MODIFYING_COMMANDS""" 128 | # All file modifying commands should be in safe commands 129 | assert all(cmd in SAFE_COMMANDS for cmd in FILE_MODIFYING_COMMANDS) 130 | 131 | # All commands should have descriptions 132 | assert all(isinstance(desc, str) and desc for desc in SAFE_COMMANDS.values()) 133 | 134 | @patch('subprocess.run') 135 | def test_run_command_working_dir(mock_run): 136 | """Test command execution with custom working directory""" 137 | mock_process = MagicMock() 138 | mock_process.stdout = "command output" 139 | mock_process.stderr = "" 140 | mock_process.returncode = 0 141 | mock_run.return_value = mock_process 142 | 143 | result = run_command(command="ls", working_dir="/workspace/subdir") 144 | 145 | mock_run.assert_called_once_with( 146 | "ls", 147 | shell=True, 148 | cwd="/workspace/subdir", 149 | capture_output=True, 150 | text=True, 151 | timeout=30 152 | ) -------------------------------------------------------------------------------- /docs/docs/tools/audio.md: -------------------------------------------------------------------------------- 1 | # Audio processing 2 | 3 | The audio module provides tools for text-to-speech synthesis and speech-to-text transcription using advanced AI models. 4 | 5 | ## Available tools 6 | 7 | ### Text-to-speech 8 | 9 | Convert text to natural-sounding speech using AI voices. 10 | 11 | #### Parameters 12 | 13 | - `input` (string, required) 14 | - Text to convert to speech 15 | - Maximum 4096 characters 16 | 17 | - `voice` (string, optional) 18 | - Voice to use for speech generation 19 | - Options: 20 | - `alloy` (default) 21 | - `echo` 22 | - `fable` 23 | - `onyx` 24 | - `nova` 25 | - `shimmer` 26 | 27 | - `model` (string, optional) 28 | - Model to use for generation 29 | - Options: 30 | - `tts-1` (default) 31 | - `tts-1-hd` 32 | 33 | - `response_format` (string, optional) 34 | - Audio file format 35 | - Options: 36 | - `mp3` (default) 37 | - `opus` 38 | - `aac` 39 | - `flac` 40 | 41 | - `speed` (float, optional) 42 | - Speed of generated audio 43 | - Range: 0.25 to 4.0 44 | - Default: 1.0 45 | 46 | #### Returns 47 | 48 | A tuple containing: 49 | 50 | 1. Status dictionary: 51 | - `success`: Boolean indicating success 52 | - `description`: Description of generated audio 53 | - `details`: Dictionary containing: 54 | - `filename`: Generated audio filename 55 | - `voice`: Voice used 56 | - `model`: Model used 57 | - `format`: Audio format 58 | - `speed`: Speed setting 59 | - `text_length`: Length of input text 60 | - `error`: Error message if failed 61 | 62 | 2. Files array containing: 63 | - `content`: Audio file content (bytes) 64 | - `filename`: Audio filename 65 | - `mime_type`: Audio MIME type 66 | - `description`: Audio description 67 | 68 | ### Speech-to-text 69 | 70 | Transcribe speech from audio files to text. 71 | 72 | #### Parameters 73 | 74 | - `file_url` (string, required) 75 | - Path to the audio file 76 | - Must be a valid local file path 77 | 78 | - `language` (string, optional) 79 | - Language code in ISO-639-1 format 80 | - If not specified, auto-detects language 81 | 82 | - `prompt` (string, optional) 83 | - Text to guide transcription style 84 | - Useful for continuing previous segments 85 | 86 | #### Returns 87 | 88 | A dictionary containing: 89 | - `success`: Boolean indicating success 90 | - `text`: Transcribed text 91 | - `details`: Dictionary containing: 92 | - `model`: Model used 93 | - `language`: Language detected/used 94 | - `file_url`: Original file path 95 | - `error`: Error message if failed 96 | 97 | ## Example usage 98 | 99 | ```python 100 | from tyler.models import Agent, Thread, Message 101 | 102 | # Create an agent with audio tools 103 | agent = Agent( 104 | model_name="gpt-4.1", 105 | purpose="To help with audio processing", 106 | tools=["audio"] 107 | ) 108 | 109 | # Create a thread for text-to-speech 110 | thread = Thread() 111 | message = Message( 112 | role="user", 113 | content='Convert this text to speech: "Hello, how are you today?"' 114 | ) 115 | thread.add_message(message) 116 | 117 | # Process the thread - agent will use text-to-speech tool 118 | processed_thread, new_messages = await agent.go(thread) 119 | 120 | # Example of speech-to-text 121 | transcribe_thread = Thread() 122 | message = Message( 123 | role="user", 124 | content="Transcribe the audio from recording.mp3" 125 | ) 126 | transcribe_thread.add_message(message) 127 | 128 | # Process the thread - agent will use speech-to-text tool 129 | processed_transcription, new_messages = await agent.go(transcribe_thread) 130 | ``` 131 | 132 | ## Best practices 133 | 134 | 1. **Text-to-Speech** 135 | - Keep text within length limits 136 | - Choose appropriate voice for content 137 | - Consider audio quality needs 138 | - Use natural language input 139 | 140 | 2. **Speech-to-Text** 141 | - Use high-quality audio input 142 | - Specify language when known 143 | - Provide context with prompts 144 | - Consider audio format support 145 | 146 | 3. **Audio Quality** 147 | - Select appropriate formats 148 | - Use HD models when needed 149 | - Adjust speed carefully 150 | - Monitor file sizes 151 | 152 | 4. **Resource Management** 153 | - Handle large files properly 154 | - Monitor API usage 155 | - Manage storage space 156 | - Consider bandwidth usage 157 | 158 | ## Common use cases 159 | 160 | 1. **Content Creation** 161 | - Audiobook generation 162 | - Voice-over production 163 | - Podcast content 164 | - Educational materials 165 | 166 | 2. **Accessibility** 167 | - Text-to-speech for visually impaired 168 | - Transcription for hearing impaired 169 | - Multi-language support 170 | - Audio documentation 171 | 172 | 3. **Audio Processing** 173 | - Meeting transcription 174 | - Voice note conversion 175 | - Audio content analysis 176 | - Language learning tools 177 | 178 | ## Limitations 179 | 180 | 1. **Text-to-Speech** 181 | - 4096 character limit per request 182 | - Limited voice options 183 | - Language constraints 184 | - Pronunciation accuracy 185 | 186 | 2. **Speech-to-Text** 187 | - Background noise sensitivity 188 | - Accent recognition 189 | - Speaker separation 190 | - Technical terminology 191 | 192 | 3. **General Constraints** 193 | - API rate limits 194 | - File size limits 195 | - Processing time 196 | - Cost considerations 197 | 198 | ## Error handling 199 | 200 | Common errors and solutions: 201 | 202 | 1. **Input Validation** 203 | - Check text length 204 | - Verify file formats 205 | - Validate parameters 206 | - Handle special characters 207 | 208 | 2. **Processing Issues** 209 | - Handle API errors 210 | - Manage timeouts 211 | - Process format errors 212 | - Handle quality issues 213 | 214 | 3. **Resource Errors** 215 | - Monitor API quotas 216 | - Handle storage limits 217 | - Manage bandwidth 218 | - Control concurrency -------------------------------------------------------------------------------- /docs/docs/tools/image.md: -------------------------------------------------------------------------------- 1 | # Image generation and analysis 2 | 3 | The image module provides tools for generating and analyzing images using advanced AI models. These tools allow you to create high-quality images from text descriptions and analyze existing images. 4 | 5 | ## Configuration 6 | 7 | Before using image tools, you need to set up your OpenAI API key: 8 | 9 | ```bash 10 | OPENAI_API_KEY=your-openai-key 11 | ``` 12 | 13 | You can get an API key from the [OpenAI platform](https://platform.openai.com/api-keys). 14 | 15 | ## Available tools 16 | 17 | ### Image-generate 18 | 19 | Generates images based on text descriptions using DALL-E 3. 20 | 21 | #### Parameters 22 | 23 | - `prompt` (string, required) 24 | - Text description of the desired image 25 | - Maximum 4000 characters 26 | 27 | - `size` (string, optional) 28 | - Size of the generated image 29 | - Options: 30 | - `1024x1024` (default) 31 | - `1792x1024` 32 | - `1024x1792` 33 | 34 | - `quality` (string, optional) 35 | - Quality level of the generated image 36 | - Options: 37 | - `standard` (default) 38 | - `hd` 39 | 40 | - `style` (string, optional) 41 | - Visual style of the generated image 42 | - Options: 43 | - `vivid` (default) 44 | - `natural` 45 | 46 | #### Returns 47 | 48 | A dictionary containing: 49 | - `success`: Boolean indicating success 50 | - `description`: Revised prompt used for generation 51 | - `details`: Dictionary containing: 52 | - `filename`: Generated image filename 53 | - `size`: Image dimensions used 54 | - `quality`: Quality setting used 55 | - `style`: Style setting used 56 | - `created`: Timestamp 57 | - `error`: Error message if failed 58 | 59 | Files array containing: 60 | - `content`: Base64 encoded image data 61 | - `filename`: Image filename 62 | - `mime_type`: Image format (e.g., "image/png") 63 | - `description`: Image description 64 | 65 | ### Analyze-image 66 | 67 | Analyzes and describes the contents of an image using GPT-4V. 68 | 69 | #### Parameters 70 | 71 | - `file_url` (string, required) 72 | - Path to the image file 73 | - Must be a valid local file path or URL 74 | 75 | - `prompt` (string, optional) 76 | - Custom prompt to guide the analysis 77 | - Use to focus on specific aspects of the image 78 | 79 | #### Returns 80 | 81 | A dictionary containing: 82 | - `success`: Boolean indicating success 83 | - `analysis`: Detailed analysis of the image 84 | - `file_url`: Original image path 85 | - `error`: Error message if failed 86 | 87 | ## Example usage 88 | 89 | ```python 90 | from tyler.models import Agent, Thread, Message 91 | 92 | # Create an agent with image tools 93 | agent = Agent( 94 | model_name="gpt-4.1", 95 | purpose="To help with image generation and analysis", 96 | tools=["image"] 97 | ) 98 | 99 | # Create a thread for image generation 100 | thread = Thread() 101 | message = Message( 102 | role="user", 103 | content="Generate an image of a serene Japanese garden" 104 | ) 105 | thread.add_message(message) 106 | 107 | # Process the thread - agent will use image-generate tool 108 | processed_thread, new_messages = await agent.go(thread) 109 | 110 | # Example of image analysis 111 | analysis_thread = Thread() 112 | message = Message( 113 | role="user", 114 | content="Analyze the artistic style of the image at /path/to/image.jpg" 115 | ) 116 | analysis_thread.add_message(message) 117 | 118 | # Process the thread - agent will use analyze-image tool 119 | processed_analysis, new_messages = await agent.go(analysis_thread) 120 | ``` 121 | 122 | ## Best practices 123 | 124 | 1. **Effective Prompting** 125 | - Be specific and detailed in descriptions 126 | - Include style preferences when relevant 127 | - Consider composition and layout 128 | - Use clear, unambiguous language 129 | 130 | 2. **Image Generation Settings** 131 | - Choose appropriate size for the use case: 132 | - `1024x1024` for balanced compositions 133 | - `1792x1024` for landscapes 134 | - `1024x1792` for portraits 135 | - Select quality based on needs: 136 | - `hd` for professional/detailed work 137 | - `standard` for prototypes/drafts 138 | - Pick style to match content: 139 | - `vivid` for dramatic/digital art 140 | - `natural` for photorealistic results 141 | 142 | 3. **Image Analysis** 143 | - Provide clear analysis prompts 144 | - Focus on specific aspects 145 | - Use domain-specific terminology 146 | - Consider context and purpose 147 | 148 | 4. **Error Handling** 149 | - Validate input parameters 150 | - Check file paths and URLs 151 | - Handle API rate limits 152 | - Process responses appropriately 153 | 154 | ## Common use cases 155 | 156 | 1. **Content Creation** 157 | - Marketing materials 158 | - Website illustrations 159 | - Social media content 160 | - Educational resources 161 | 162 | 2. **Visual Analysis** 163 | - Art critique 164 | - Design feedback 165 | - Content moderation 166 | - Technical inspection 167 | 168 | 3. **Creative Assistance** 169 | - Concept visualization 170 | - Storyboarding 171 | - Mood boards 172 | - Style exploration 173 | 174 | ## Limitations 175 | 176 | 1. **Generation Constraints** 177 | - 4000 character prompt limit 178 | - Fixed size options 179 | - Content safety filters 180 | - No real person generation 181 | 182 | 2. **Analysis Constraints** 183 | - Text recognition accuracy varies 184 | - Complex scene understanding 185 | - Cultural context awareness 186 | - Technical detail precision 187 | 188 | ## Error handling 189 | 190 | Common errors and solutions: 191 | 192 | 1. **API Errors** 193 | - Check API key validity 194 | - Monitor rate limits 195 | - Handle timeouts 196 | - Validate responses 197 | 198 | 2. **Content Filters** 199 | - Review content guidelines 200 | - Adjust descriptions 201 | - Check restricted content 202 | 203 | 3. **File Operations** 204 | - Verify file paths 205 | - Check permissions 206 | - Validate formats 207 | - Handle large files -------------------------------------------------------------------------------- /examples/attachments.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Example demonstrating how to work with attachments in threads. 4 | Shows both manual attachment creation and handling tool-generated attachments. 5 | """ 6 | from dotenv import load_dotenv 7 | load_dotenv() 8 | 9 | from tyler.utils.logging import get_logger 10 | logger = get_logger(__name__) 11 | 12 | import os 13 | import asyncio 14 | import weave 15 | import sys 16 | from tyler import Agent, Thread, Message, Attachment, ThreadStore, FileStore 17 | 18 | try: 19 | if os.getenv("WANDB_API_KEY"): 20 | weave.init("tyler") 21 | logger.debug("Weave tracing initialized successfully") 22 | except Exception as e: 23 | logger.warning(f"Failed to initialize weave tracing: {e}. Continuing without weave.") 24 | 25 | async def setup(): 26 | """Initialize storage backends""" 27 | # Create thread store using factory pattern 28 | thread_store = await ThreadStore.create() 29 | 30 | # Create file store using factory pattern 31 | file_store = await FileStore.create() 32 | logger.info("Storage backends initialized") 33 | 34 | # Initialize the agent with thread_store, file_store and image tools 35 | agent = Agent( 36 | model_name="gpt-4.1", 37 | purpose="To help with image generation and analysis.", 38 | temperature=0.7, 39 | tools=["image"], # Include image tools for this example 40 | thread_store=thread_store, 41 | file_store=file_store 42 | ) 43 | 44 | return thread_store, agent, file_store 45 | 46 | async def example_manual_attachment(thread_store, file_store): 47 | """Example of manually creating and adding attachments""" 48 | logger.info("Example 1: Manual Attachment Creation") 49 | 50 | # Create a thread 51 | thread = Thread(title="Thread with Manual Attachments") 52 | 53 | # Create a text file attachment 54 | text_content = "Hello, this is a sample text file".encode('utf-8') 55 | text_attachment = Attachment( 56 | filename="sample.txt", 57 | content=text_content, 58 | mime_type="text/plain" 59 | ) 60 | 61 | # Create a message with the attachment 62 | message = Message( 63 | role="user", 64 | content="Here's a text file to analyze", 65 | attachments=[text_attachment] 66 | ) 67 | thread.add_message(message) 68 | 69 | # Save the thread - this will process and store the attachment 70 | await thread_store.save(thread) 71 | 72 | logger.info("Created message with text attachment") 73 | logger.info(f"Attachment status: {text_attachment.status}") 74 | logger.info(f"Storage path: {text_attachment.storage_path}") 75 | 76 | return thread 77 | 78 | async def example_tool_generated_attachment(thread_store, agent, file_store): 79 | """Example of handling attachments generated by tools""" 80 | logger.info("\nExample 2: Tool-Generated Attachments") 81 | 82 | # Create a thread 83 | thread = Thread(title="Thread with Tool-Generated Attachments") 84 | 85 | # Add a message requesting image generation 86 | message = Message( 87 | role="user", 88 | content=( 89 | "Please generate a image in the style of a wood block print of a serene Japanese garden " 90 | "with a traditional wooden bridge over a koi pond, cherry blossoms in full bloom, " 91 | "and a small tea house in the background." 92 | ) 93 | ) 94 | thread.add_message(message) 95 | 96 | # Process the thread - this will trigger image generation 97 | processed_thread, new_messages = await agent.go(thread) 98 | 99 | # The thread is automatically saved by the agent, which processes any attachments 100 | 101 | # Log information about generated attachments 102 | for msg in new_messages: 103 | if msg.attachments: 104 | logger.info(f"Message from {msg.role} has {len(msg.attachments)} attachments:") 105 | for att in msg.attachments: 106 | logger.info(f"- {att.filename} ({att.mime_type})") 107 | logger.info(f" Status: {att.status}") 108 | logger.info(f" Storage path: {att.storage_path}") 109 | 110 | return processed_thread 111 | 112 | async def example_adding_attachment_to_existing_message(thread_store, file_store): 113 | """Example of adding an attachment to an existing message""" 114 | logger.info("\nExample 3: Adding Attachment to Existing Message") 115 | 116 | # Create a thread with a message 117 | thread = Thread(title="Thread with Added Attachments") 118 | message = Message( 119 | role="user", 120 | content="Here's some data to analyze" 121 | ) 122 | thread.add_message(message) 123 | 124 | # Add an attachment to the existing message 125 | json_content = b'{"key": "value"}' 126 | message.add_attachment( 127 | attachment=json_content, 128 | filename="data.json" 129 | ) 130 | 131 | # Save the thread - this will process and store the new attachment 132 | await thread_store.save(thread) 133 | 134 | logger.info(f"Added JSON attachment to message") 135 | logger.info(f"Attachment status: {message.attachments[0].status}") 136 | logger.info(f"Storage path: {message.attachments[0].storage_path}") 137 | 138 | return thread 139 | 140 | async def main(): 141 | # Run examples 142 | try: 143 | thread_store, agent, file_store = await setup() 144 | thread1 = await example_manual_attachment(thread_store, file_store) 145 | thread2 = await example_tool_generated_attachment(thread_store, agent, file_store) 146 | thread3 = await example_adding_attachment_to_existing_message(thread_store, file_store) 147 | 148 | except Exception as e: 149 | logger.error(f"Error in example: {str(e)}") 150 | raise 151 | 152 | if __name__ == "__main__": 153 | try: 154 | asyncio.run(main()) 155 | except KeyboardInterrupt: 156 | logger.warning("Exiting gracefully...") 157 | sys.exit(0) -------------------------------------------------------------------------------- /tests/utils/test_agent_runner.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for the AgentRunner class. 3 | 4 | This file tests the agent runner functionality, including registration, 5 | listing, retrieval, and execution of agents. 6 | """ 7 | import os 8 | os.environ["OPENAI_API_KEY"] = "dummy" 9 | import pytest 10 | from unittest.mock import patch, AsyncMock, MagicMock 11 | from tyler.utils.agent_runner import AgentRunner, agent_runner 12 | from tyler import Agent, Thread, Message 13 | 14 | @pytest.fixture 15 | def mock_agent(): 16 | agent = MagicMock() 17 | agent.name = "test_agent" 18 | agent.go = AsyncMock(return_value=(Thread(), [ 19 | Message(role="assistant", content="Mock agent response") 20 | ])) 21 | return agent 22 | 23 | @pytest.fixture 24 | def agent_runner_instance(): 25 | # Create a fresh instance for each test 26 | return AgentRunner() 27 | 28 | @pytest.mark.asyncio 29 | async def test_register_agent(agent_runner_instance, mock_agent): 30 | """Test registering an agent with the agent runner""" 31 | # Register the agent 32 | agent_runner_instance.register_agent(mock_agent.name, mock_agent) 33 | 34 | # Verify agent is registered 35 | assert mock_agent.name in agent_runner_instance.agents 36 | assert agent_runner_instance.agents[mock_agent.name] == mock_agent 37 | 38 | @pytest.mark.asyncio 39 | async def test_register_duplicate_agent(agent_runner_instance, mock_agent): 40 | """Test registering an agent with a duplicate name""" 41 | # Register the agent twice 42 | agent_runner_instance.register_agent(mock_agent.name, mock_agent) 43 | 44 | # Create a second agent with the same name 45 | second_agent = MagicMock() 46 | second_agent.name = mock_agent.name 47 | 48 | # Register the second agent with the same name 49 | agent_runner_instance.register_agent(second_agent.name, second_agent) 50 | 51 | # Verify the second agent replaced the first 52 | assert mock_agent.name in agent_runner_instance.agents 53 | assert agent_runner_instance.agents[mock_agent.name] == second_agent 54 | 55 | @pytest.mark.asyncio 56 | async def test_list_agents(agent_runner_instance, mock_agent): 57 | """Test listing registered agents""" 58 | # Register multiple agents 59 | agent_runner_instance.register_agent("agent1", mock_agent) 60 | 61 | agent2 = MagicMock() 62 | agent2.name = "agent2" 63 | agent_runner_instance.register_agent("agent2", agent2) 64 | 65 | # Get list of agents 66 | agents = agent_runner_instance.list_agents() 67 | 68 | # Verify list contains both agents 69 | assert "agent1" in agents 70 | assert "agent2" in agents 71 | assert len(agents) == 2 72 | 73 | @pytest.mark.asyncio 74 | async def test_get_agent(agent_runner_instance, mock_agent): 75 | """Test getting an agent by name""" 76 | # Register an agent 77 | agent_runner_instance.register_agent(mock_agent.name, mock_agent) 78 | 79 | # Get the agent by name 80 | retrieved_agent = agent_runner_instance.get_agent(mock_agent.name) 81 | 82 | # Verify correct agent is returned 83 | assert retrieved_agent == mock_agent 84 | 85 | @pytest.mark.asyncio 86 | async def test_get_nonexistent_agent(agent_runner_instance): 87 | """Test getting a nonexistent agent""" 88 | # Try to get a nonexistent agent 89 | retrieved_agent = agent_runner_instance.get_agent("nonexistent") 90 | 91 | # Verify None is returned 92 | assert retrieved_agent is None 93 | 94 | @pytest.mark.asyncio 95 | async def test_run_agent(agent_runner_instance, mock_agent): 96 | """Test running an agent on a task""" 97 | # Register an agent 98 | agent_runner_instance.register_agent(mock_agent.name, mock_agent) 99 | 100 | # Run the agent 101 | response, metrics = await agent_runner_instance.run_agent(mock_agent.name, "test task") 102 | 103 | # Verify agent.go was called 104 | mock_agent.go.assert_called_once() 105 | 106 | # Verify correct response string 107 | assert response == "Mock agent response" 108 | 109 | # Verify metrics were returned 110 | assert isinstance(metrics, dict) 111 | assert "agent_name" in metrics 112 | assert metrics["agent_name"] == mock_agent.name 113 | assert "timing" in metrics 114 | 115 | @pytest.mark.asyncio 116 | async def test_run_agent_with_context(agent_runner_instance, mock_agent): 117 | """Test running an agent with context""" 118 | # Register an agent 119 | agent_runner_instance.register_agent(mock_agent.name, mock_agent) 120 | 121 | # Run the agent with context 122 | context = {"key": "value"} 123 | response, metrics = await agent_runner_instance.run_agent(mock_agent.name, "test task", context) 124 | 125 | # Verify agent.go was called 126 | mock_agent.go.assert_called_once() 127 | 128 | # Verify correct response 129 | assert response == "Mock agent response" 130 | 131 | # Verify metrics were returned 132 | assert isinstance(metrics, dict) 133 | assert "agent_name" in metrics 134 | assert metrics["agent_name"] == mock_agent.name 135 | 136 | # Verify thread contains system message with context 137 | thread = mock_agent.go.call_args[0][0] 138 | system_messages = [m for m in thread.messages if m.role == "system"] 139 | assert len(system_messages) == 1 140 | assert "key: value" in system_messages[0].content 141 | 142 | @pytest.mark.asyncio 143 | async def test_run_nonexistent_agent(agent_runner_instance): 144 | """Test running a nonexistent agent""" 145 | # Try to run a nonexistent agent 146 | with pytest.raises(ValueError) as excinfo: 147 | await agent_runner_instance.run_agent("nonexistent", "test task") 148 | 149 | # Verify error message 150 | assert "not found" in str(excinfo.value) 151 | 152 | @pytest.mark.asyncio 153 | async def test_global_agent_runner_instance(): 154 | """Test the global agent_runner instance""" 155 | # Verify global instance exists 156 | assert agent_runner is not None 157 | assert isinstance(agent_runner, AgentRunner) -------------------------------------------------------------------------------- /tests/tools/test_web.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from unittest.mock import patch, MagicMock 3 | from pathlib import Path 4 | from tyler.tools.web import fetch_page, download_file, extract_text_from_html, fetch_html 5 | 6 | # Mock responses 7 | MOCK_HTML_CONTENT = """ 8 | 9 | 10 | Test Page 11 | 14 | 17 | 18 | 19 | Test Header 20 | Test paragraph 21 | Test div content 22 | 23 | 24 | """ 25 | 26 | MOCK_CLEAN_TEXT = """Test Header 27 | 28 | Test paragraph 29 | 30 | Test div content""" 31 | 32 | @pytest.fixture(autouse=True) 33 | def mock_requests(): 34 | """Mock all requests to prevent any real API calls""" 35 | with patch('requests.get') as mock_get, patch('requests.head') as mock_head: 36 | mock_response = MagicMock() 37 | mock_response.text = MOCK_HTML_CONTENT 38 | mock_response.headers = { 39 | 'content-type': 'text/html', 40 | 'content-length': '1000' 41 | } 42 | mock_get.return_value = mock_response 43 | mock_head.return_value = mock_response 44 | yield mock_get, mock_head 45 | 46 | @pytest.fixture 47 | def mock_downloads_dir(tmp_path): 48 | """Create a temporary downloads directory""" 49 | downloads = tmp_path / "downloads" 50 | downloads.mkdir() 51 | with patch('tyler.utils.files.user_downloads_dir', return_value=str(downloads)): 52 | yield downloads 53 | 54 | def test_fetch_html_success(): 55 | """Test successful HTML fetching""" 56 | html = fetch_html("https://example.com") 57 | assert html == MOCK_HTML_CONTENT 58 | 59 | def test_fetch_html_with_headers(): 60 | """Test HTML fetching with custom headers""" 61 | headers = {"User-Agent": "Test Bot"} 62 | with patch('requests.get') as mock_get: 63 | mock_response = MagicMock() 64 | mock_response.text = MOCK_HTML_CONTENT 65 | mock_get.return_value = mock_response 66 | 67 | fetch_html("https://example.com", headers) 68 | 69 | mock_get.assert_called_with( 70 | "https://example.com", 71 | headers=headers, 72 | timeout=30 73 | ) 74 | 75 | def test_fetch_html_error(): 76 | """Test error handling in fetch_html""" 77 | with patch('requests.get') as mock_get: 78 | mock_get.side_effect = Exception("Connection error") 79 | with pytest.raises(Exception, match="Error fetching URL: Connection error"): 80 | fetch_html("https://example.com") 81 | 82 | def test_extract_text_from_html(): 83 | """Test HTML to text extraction""" 84 | text = extract_text_from_html(MOCK_HTML_CONTENT) 85 | assert text == MOCK_CLEAN_TEXT 86 | 87 | def test_fetch_page_text_format(): 88 | """Test fetch_page with text format""" 89 | result = fetch_page(url="https://example.com", format="text") 90 | assert result["success"] is True 91 | assert result["status_code"] == 200 92 | assert result["content"] == MOCK_CLEAN_TEXT 93 | assert result["content_type"] == "text" 94 | assert result["error"] is None 95 | 96 | def test_fetch_page_html_format(): 97 | """Test fetch_page with HTML format""" 98 | result = fetch_page(url="https://example.com", format="html") 99 | assert result["success"] is True 100 | assert result["status_code"] == 200 101 | assert result["content"] == MOCK_HTML_CONTENT 102 | assert result["content_type"] == "html" 103 | assert result["error"] is None 104 | 105 | def test_fetch_page_error(): 106 | """Test fetch_page error handling""" 107 | with patch('requests.get') as mock_get: 108 | mock_get.side_effect = Exception("Test error") 109 | result = fetch_page(url="https://example.com") 110 | assert result["success"] is False 111 | assert result["status_code"] is None 112 | assert result["content"] is None 113 | assert result["content_type"] is None 114 | assert result["error"] == "Error fetching URL: Test error" 115 | 116 | def test_download_file_success(mock_downloads_dir): 117 | """Test successful file download""" 118 | result, files = download_file(url="https://example.com/file.txt") 119 | 120 | assert result["success"] is True 121 | assert result["content_type"] == "text/html" 122 | assert result["file_size"] == 1000 123 | assert result["filename"] == "file.txt" 124 | assert len(files) == 1 125 | assert files[0]["filename"] == "file.txt" 126 | assert files[0]["mime_type"] == "text/html" 127 | assert "content" in files[0] 128 | assert "url" in files[0]["attributes"] 129 | 130 | def test_download_file_with_content_disposition(mock_downloads_dir): 131 | """Test file download with Content-Disposition header""" 132 | with patch('requests.get') as mock_get: 133 | mock_response = MagicMock() 134 | mock_response.headers = { 135 | 'Content-Disposition': 'attachment; filename="server_file.txt"', 136 | 'content-type': 'text/plain', 137 | 'content-length': '1000' 138 | } 139 | mock_response.iter_content.return_value = [b'test content'] 140 | mock_get.return_value = mock_response 141 | 142 | result, files = download_file(url="https://example.com/file") 143 | 144 | assert result["success"] is True 145 | assert result["filename"] == "server_file.txt" 146 | assert files[0]["filename"] == "server_file.txt" 147 | 148 | def test_download_file_error(): 149 | """Test download_file error handling""" 150 | with patch('requests.get') as mock_get: 151 | mock_get.side_effect = Exception("Download failed") 152 | result, files = download_file(url="https://example.com/file.txt") 153 | 154 | assert result["success"] is False 155 | assert "error" in result 156 | assert result["error"] == "Download failed" 157 | assert len(files) == 0 -------------------------------------------------------------------------------- /docs/static/img/logo.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/docs/examples/file-storage.md: -------------------------------------------------------------------------------- 1 | # File Storage Example 2 | 3 | This example demonstrates how to use Tyler's file storage capabilities to save and retrieve files. 4 | 5 | ## Configuration 6 | 7 | First, set up the file storage configuration: 8 | 9 | ```python 10 | from tyler.storage import get_file_store, FileStore 11 | 12 | # Get default file store 13 | file_store = get_file_store() 14 | 15 | # Or create with custom configuration 16 | file_store = FileStore( 17 | base_path="/path/to/files", 18 | max_file_size=100 * 1024 * 1024, # 100MB 19 | max_storage_size=10 * 1024 * 1024 * 1024, # 10GB 20 | allowed_mime_types={"application/pdf", "image/jpeg", "text/plain"} 21 | ) 22 | ``` 23 | 24 | ## Saving Files 25 | 26 | ```python 27 | # Save a file 28 | file_content = b"Hello, World!" 29 | result = await file_store.save("example.txt", file_content) 30 | 31 | print(f"File ID: {result['id']}") 32 | print(f"Storage path: {result['storage_path']}") 33 | print(f"MIME type: {result['mime_type']}") 34 | ``` 35 | 36 | ## Retrieving Files 37 | 38 | ```python 39 | # Get file content using ID and storage path 40 | file_id = result['id'] 41 | storage_path = result['storage_path'] 42 | content = await file_store.get(file_id, storage_path) 43 | 44 | print(f"Content: {content.decode('utf-8')}") 45 | ``` 46 | 47 | ## Deleting Files 48 | 49 | ```python 50 | # Delete a file 51 | await file_store.delete(file_id, storage_path) 52 | ``` 53 | 54 | ## Working with Attachments 55 | 56 | The FileStore integrates seamlessly with the Attachment model: 57 | 58 | ```python 59 | from tyler import Attachment, Message, Thread, ThreadStore 60 | 61 | # Create an attachment 62 | attachment = Attachment( 63 | filename="document.pdf", 64 | content=pdf_bytes, 65 | mime_type="application/pdf" 66 | ) 67 | 68 | # Process and store the attachment directly 69 | await attachment.process_and_store() 70 | 71 | # Check the results 72 | print(f"File ID: {attachment.file_id}") 73 | print(f"Storage path: {attachment.storage_path}") 74 | print(f"Status: {attachment.status}") 75 | print(f"URL: {attachment.attributes.get('url')}") 76 | 77 | # Or use with messages and threads (recommended approach) 78 | message = Message(role="user", content="Here's a document") 79 | message.add_attachment(pdf_bytes, filename="document.pdf") 80 | 81 | thread = Thread() 82 | thread.add_message(message) 83 | 84 | # Create thread store (will initialize automatically when needed) 85 | thread_store = ThreadStore() 86 | await thread_store.save(thread) # Automatically processes and stores attachments 87 | 88 | # Access attachment information 89 | for attachment in message.attachments: 90 | if attachment.status == "stored": 91 | print(f"File ID: {attachment.file_id}") 92 | print(f"Storage path: {attachment.storage_path}") 93 | print(f"URL: {attachment.attributes.get('url')}") 94 | 95 | # Access file-specific attributes 96 | file_type = attachment.attributes.get("type") 97 | if file_type == "document": 98 | print(f"Extracted text: {attachment.attributes.get('text')}") 99 | elif file_type == "image": 100 | print(f"Image description: {attachment.attributes.get('overview')}") 101 | ``` 102 | 103 | ## Batch Operations 104 | 105 | ```python 106 | # Save multiple files at once 107 | files = [ 108 | (b"File 1 content", "file1.txt", "text/plain"), 109 | (b"File 2 content", "file2.txt", "text/plain") 110 | ] 111 | results = await file_store.batch_save(files) 112 | 113 | # Delete multiple files 114 | file_ids = [result["id"] for result in results] 115 | await file_store.batch_delete(file_ids) 116 | ``` 117 | 118 | ## Storage Management 119 | 120 | ```python 121 | # Check storage health 122 | health = await file_store.check_health() 123 | print(f"Status: {health['status']}") 124 | print(f"Storage size: {health['storage_size']} bytes") 125 | print(f"File count: {health['file_count']}") 126 | print(f"Usage: {health['usage_percent']}%") 127 | 128 | # Get storage size 129 | size = await file_store.get_storage_size() 130 | print(f"Total storage size: {size} bytes") 131 | 132 | # Get file count 133 | count = await file_store.get_file_count() 134 | print(f"Total files: {count}") 135 | ``` 136 | 137 | ## URL Generation 138 | 139 | ```python 140 | # Generate URL for a file 141 | from tyler.storage import FileStore 142 | 143 | storage_path = "ab/cdef1234.pdf" # Example storage path 144 | url = FileStore.get_file_url(storage_path) 145 | print(f"File URL: {url}") 146 | ``` 147 | 148 | ## Error Handling 149 | 150 | ```python 151 | from tyler.storage import ( 152 | FileStoreError, 153 | FileNotFoundError, 154 | StorageFullError, 155 | UnsupportedFileTypeError, 156 | FileTooLargeError 157 | ) 158 | 159 | try: 160 | # Try to save a file 161 | result = await file_store.save(large_content, "large_file.bin") 162 | except UnsupportedFileTypeError: 163 | print("File type not allowed") 164 | except FileTooLargeError: 165 | print("File too large") 166 | except StorageFullError: 167 | print("Storage full") 168 | except FileStoreError as e: 169 | print(f"General storage error: {e}") 170 | ``` 171 | 172 | ## Complete Example 173 | 174 | ```python 175 | import asyncio 176 | from tyler.storage import get_file_store 177 | 178 | async def main(): 179 | # Initialize file store 180 | file_store = get_file_store() 181 | 182 | # Save a file 183 | content = b"Hello, World!" 184 | result = await file_store.save(content, "example.txt") 185 | print(f"Saved file: {result['filename']}") 186 | print(f"Storage path: {result['storage_path']}") 187 | 188 | # Retrieve the file 189 | retrieved = await file_store.get(result['id'], result['storage_path']) 190 | print(f"Retrieved content: {retrieved.decode('utf-8')}") 191 | 192 | # Delete the file 193 | await file_store.delete(result['id'], result['storage_path']) 194 | print("File deleted") 195 | 196 | # Check storage health 197 | health = await file_store.check_health() 198 | print(f"Storage health: {health['status']}") 199 | print(f"Storage usage: {health['usage_percent']}%") 200 | 201 | if __name__ == "__main__": 202 | asyncio.run(main()) 203 | ``` --------------------------------------------------------------------------------
{description}
Test paragraph