├── .gitignore
├── 2024-05-pycon-us-logfire
├── 01_manual_tracing.py
├── 02_auto_tracing_get_deps.py
├── 02_auto_tracing_get_deps_run.py
├── 03_pretty_python.py
├── 04_stripe_server.py
├── 04_stripe_server_auto.py
├── 05_pydantic_plugin.py
├── 06_openai.py
├── 07_cat_bacon
│ ├── __init__.py
│ ├── db.py
│ ├── images.py
│ ├── main.py
│ └── page.py
├── README.md
├── pyproject.toml
└── uv.lock
├── 2024-09-django-london-logfire
├── .gitignore
├── LICENSE
├── README.md
├── cat_bacon
│ ├── __init__.py
│ ├── admin.py
│ ├── apps.py
│ ├── forms.py
│ ├── migrations
│ │ ├── 0001_initial.py
│ │ └── __init__.py
│ ├── models.py
│ ├── templates
│ │ ├── base.html
│ │ ├── image.html
│ │ └── index.html
│ ├── tests.py
│ ├── urls.py
│ └── views.py
├── create_many.py
├── logfire_django_demo
│ ├── __init__.py
│ ├── asgi.py
│ ├── settings.py
│ ├── urls.py
│ └── wsgi.py
├── manage.py
├── pyproject.toml
└── uv.lock
├── 2024-12-boston-ae
├── README.md
├── example_openai.py
├── example_pydantic.py
├── example_pydanticai.py
├── example_weather.py
├── logfire-weather-agent.png
├── pyproject.toml
├── slides.md
└── uv.lock
├── 2025-02-ai-engineer-pydantic-ai
├── .python-version
├── Makefile
├── README.md
├── app
│ ├── __init__.py
│ ├── analyse.py
│ ├── prompt.toml
│ ├── send_reply.py
│ └── server.py
├── cf-worker
│ ├── package-lock.json
│ ├── package.json
│ ├── src
│ │ └── index.ts
│ ├── tsconfig.json
│ └── wrangler.toml
├── pyproject.toml
├── raw_send.py
└── uv.lock
├── 2025-04-cli-demo
├── cli.py
└── pyproject.toml
├── 2025-04-data-council
├── .python-version
├── README.md
├── agent-loop.png
├── browser_mcp.py
├── browser_mcp_graph.py
├── evals
│ ├── 01_generate_dataset.py
│ ├── 02_add_custom_evaluators.py
│ ├── 03_unit_testing.py
│ ├── 04_compare_models.py
│ ├── __init__.py
│ ├── agent.py
│ ├── custom_evaluators.py
│ └── datasets
│ │ ├── time_range_v1.yaml
│ │ ├── time_range_v1_schema.json
│ │ ├── time_range_v2.yaml
│ │ └── time_range_v2_schema.json
├── memory_messages.py
├── memory_tools.py
├── pyproject.toml
└── uv.lock
├── 2025-05-16-fastapi-demo
├── .gitignore
├── README.md
├── main.py
├── pydantic_ai_evals.yaml
├── pydantic_ai_evals_schema.json
├── pyproject.toml
├── src
│ ├── agent.py
│ ├── app.py
│ └── mcp_agent.py
├── tests
│ └── evals.py
└── uv.lock
├── 2025-05-odsc
├── .python-version
├── README.md
├── agent-loop.png
├── browser_mcp.py
├── browser_mcp_graph.py
├── evals
│ ├── 01_generate_dataset.py
│ ├── 02_add_custom_evaluators.py
│ ├── 03_unit_testing.py
│ ├── 04_compare_models.py
│ ├── __init__.py
│ ├── agent.py
│ ├── custom_evaluators.py
│ └── datasets
│ │ ├── time_range_v1.yaml
│ │ ├── time_range_v1_schema.json
│ │ ├── time_range_v2.yaml
│ │ └── time_range_v2_schema.json
├── memory_messages.py
├── memory_tools.py
├── pyproject.toml
└── uv.lock
├── 2025-05-pycon-us
├── .python-version
├── README.md
├── agent-loop.png
├── browser_mcp.py
├── evals
│ ├── 01_generate_dataset.py
│ ├── 02_add_custom_evaluators.py
│ ├── 03_unit_testing.py
│ ├── 04_compare_models.py
│ ├── __init__.py
│ ├── agent.py
│ ├── custom_evaluators.py
│ └── datasets
│ │ ├── time_range_v1.yaml
│ │ ├── time_range_v1_schema.json
│ │ ├── time_range_v2.yaml
│ │ └── time_range_v2_schema.json
├── memory_messages.py
├── memory_tools.py
├── pyproject.toml
└── uv.lock
├── 2025-06-ai-engineer-mcp
├── .gitignore
├── .python-version
├── README.md
├── images
│ ├── agents-with-mcp-recursive.svg
│ └── agents-with-mcp.svg
├── libs_mcp_client.py
├── make_slides.py
├── pypi_mcp_server.py
├── pyproject.toml
├── slides.template.html
├── static
│ ├── favicon.ico
│ ├── fonts
│ │ ├── ibm-plex-mono-italic-400.ttf
│ │ ├── ibm-plex-mono-normal-400.ttf
│ │ ├── ibm-plex-mono-normal-500.ttf
│ │ └── ibm-plex-mono-normal-600.ttf
│ ├── google_fonts_ibm_plex.css
│ ├── highlight.min.css
│ ├── highlight.min.js
│ ├── marked.min.js
│ └── mermaid.min.js
└── uv.lock
├── LICENSE
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | # python generated files
2 | __pycache__/
3 | *.py[oc]
4 | build/
5 | dist/
6 | wheels/
7 | *.egg-info
8 |
9 | # venv
10 | .venv
11 |
12 | # pycharm
13 | .idea/
14 |
15 | # misc space for stuff not in VCS
16 | scratch/
17 | **/.DS_Store
18 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/01_manual_tracing.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from time import sleep
3 | import logfire
4 |
5 | # With kwargs:
6 | logfire.configure()
7 |
8 | @dataclass
9 | class Foo:
10 | bar: str
11 | baz: int
12 |
13 |
14 | logfire.info('Hello {name}', name='world', foo=Foo('qux', 123))
15 |
16 | activity = 'work'
17 | with logfire.span('doing some slow {activity}...', activity=activity):
18 | logfire.info('{fruit=}', fruit='banana')
19 | sleep(0.123)
20 | with logfire.span('more nesting'):
21 | status = 'ominous'
22 | logfire.warn('this is {status}', status=status)
23 | sleep(0.456)
24 | logfire.info('done')
25 |
26 |
27 | # With f-strings:
28 | name = 'world'
29 | logfire.info(f'Hello {name}')
30 |
31 | activity = 'work'
32 | with logfire.span(f'doing some slow {activity}...'):
33 | fruit = 'banana'
34 | logfire.info(f'{fruit=}')
35 | sleep(0.123)
36 | with logfire.span('more nesting'):
37 | status = 'ominous'
38 | logfire.warn(f'this is {status}')
39 | sleep(0.456)
40 | logfire.info('done')
41 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/02_auto_tracing_get_deps.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import base64
3 | import json
4 | from pathlib import Path
5 |
6 | from httpx import AsyncClient
7 | from bs4 import BeautifulSoup
8 |
9 | START_URL = 'https://github.com/pydantic/pydantic/network/dependents'
10 | CACHE_DIR = Path('cache')
11 | CACHE_DIR.mkdir(exist_ok=True)
12 | REPOS = {}
13 |
14 |
15 | async def download_package_deps(client: AsyncClient, url: str) -> str:
16 | file_path = CACHE_DIR / f'get_{base64.urlsafe_b64encode(url.encode()).decode()}.html'
17 | if file_path.exists():
18 | return file_path.read_text()
19 | else:
20 | # print('Cache miss', url)
21 | # try to prevent 429 errors
22 | await asyncio.sleep(1)
23 | r = await client.get(url)
24 | if r.status_code != 200:
25 | raise RuntimeError(f'{r.status_code} from {url}')
26 |
27 | ct = r.headers['content-type']
28 | assert ct.startswith('text/html'), f'Unexpected mimetype: {ct!r} from {url}'
29 |
30 | file_path.write_text(r.text)
31 | return r.text
32 |
33 |
34 | async def get_dependents(client: AsyncClient, url: str) -> str | None:
35 | html = await download_package_deps(client, url)
36 | soup = BeautifulSoup(html, 'html.parser')
37 |
38 | repos = {}
39 |
40 | for a in soup.find_all('div', {'class': 'Box-row'}):
41 | user_org = a.find('a', {'data-hovercard-type': 'user'}) or a.find('a', {'data-hovercard-type': 'organization'})
42 | repo = a.find('a', {'data-hovercard-type': 'repository'})
43 | star = a.find('svg', {'class': 'octicon-star'}).parent
44 | star_text = star.getText().strip().replace(',', '')
45 |
46 | repos[f'{user_org.getText()}/{repo.getText()}'] = int(star_text)
47 |
48 | REPOS.update(repos)
49 |
50 | next_link = soup.find('a', string='Next', href=True)
51 | if next_link:
52 | return next_link['href']
53 |
54 |
55 | async def main():
56 | i = 0
57 | next_url = START_URL
58 | try:
59 | async with AsyncClient() as client:
60 | while next_url:
61 | print(f'{i} {next_url} {len(REPOS)}')
62 | next_url = await get_dependents(client, next_url)
63 | i += 1
64 | if i > 5:
65 | return
66 |
67 | finally:
68 | if REPOS:
69 | repos_path = Path('repos.json')
70 | print(f'Saving {len(REPOS)} new repos to {repos_path}...')
71 | if repos_path.exists():
72 | with repos_path.open('r') as f:
73 | existing_repos = json.load(f)
74 | else:
75 | existing_repos = {}
76 |
77 | existing_repos.update(REPOS)
78 | existing_repos = dict(sorted(existing_repos.items(), key=lambda x: x[1], reverse=True))
79 | print(f'Total of {len(existing_repos)} repos')
80 | with repos_path.open('w') as f:
81 | json.dump(existing_repos, f, indent=2)
82 |
83 |
84 | if __name__ == '__main__':
85 | asyncio.run(main())
86 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/02_auto_tracing_get_deps_run.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | import logfire
4 |
5 | logfire.configure()
6 | del sys.modules['pathlib']
7 | del sys.modules['os']
8 | logfire.install_auto_tracing(modules=['dependants', 'bs4.*'], min_duration=0.03)
9 |
10 | from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor
11 |
12 | HTTPXClientInstrumentor().instrument()
13 |
14 |
15 | import asyncio
16 | import dependants
17 |
18 | with logfire.span('downloading dependants'):
19 | asyncio.run(dependants.main())
20 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/03_pretty_python.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from pydantic import BaseModel
3 | import logfire
4 |
5 | logfire.configure()
6 |
7 |
8 | class Delivery(BaseModel):
9 | timestamp: datetime
10 | dims: tuple[int, int]
11 |
12 |
13 | input_json = [
14 | '{"timestamp": "2020-01-02T03:04:05Z", "dims": ["10", "20"]}',
15 | '{"timestamp": "2020-01-02T04:04:05Z", "dims": ["15", "25"]}',
16 | '{"timestamp": "2020-01-02T05:04:05Z", "dims": ["20", "30"]}',
17 | ]
18 | deliveries = [Delivery.model_validate_json(json) for json in input_json]
19 |
20 | logfire.info(f'{len(deliveries)} deliveries', deliveries=deliveries)
21 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/04_stripe_server.py:
--------------------------------------------------------------------------------
1 | from time import sleep
2 |
3 | import stripe
4 | import logfire
5 | from fastapi import FastAPI
6 | from fastapi.responses import JSONResponse
7 | from opentelemetry.instrumentation.requests import RequestsInstrumentor
8 |
9 | logfire.configure()
10 |
11 | # you can get a testing key by just signing up for stripe
12 | STRIPE_KEY = 'sk_test_...'
13 | stripe.api_key = STRIPE_KEY
14 | RequestsInstrumentor().instrument()
15 |
16 | app = FastAPI()
17 | logfire.instrument_fastapi(app)
18 | logfire.info('Starting the app')
19 |
20 |
21 | RequestsInstrumentor().instrument()
22 |
23 |
24 | @app.post('/payments/{user_id:int}/complete/')
25 | def payment_complete(user_id: int):
26 | amount, currency, payment_method = get_payment_details(user_id)
27 | with logfire.span(f'stripe payment {amount=} {currency=} {user_id=}') as span:
28 | try:
29 | intent = stripe.PaymentIntent.create(
30 | amount=amount,
31 | currency=currency,
32 | payment_method=payment_method,
33 | confirm=True,
34 | return_url='https://example.com/return',
35 | )
36 | except stripe.CardError as e:
37 | span.set_level('warning')
38 | span.set_attribute('error', e.error)
39 | else:
40 | span.set_attribute('payment_intent', intent)
41 | if intent is None:
42 | store_payment_failure(user_id)
43 | return JSONResponse(content={'detail': 'Card error'}, status_code=400)
44 | else:
45 | store_payment_success(user_id, intent)
46 |
47 |
48 | @logfire.instrument()
49 | def get_payment_details(user_id: int) -> [int, str, str]:
50 | sleep(0.2)
51 | if user_id == 42:
52 | return 20_00, 'usd', 'pm_card_visa'
53 | else:
54 | return 20_00, 'usd', 'pm_card_visa_chargeDeclinedInsufficientFunds'
55 |
56 |
57 | @logfire.instrument()
58 | def store_payment_success(user_id: int, _indent) -> None:
59 | sleep(0.2)
60 |
61 |
62 | @logfire.instrument()
63 | def store_payment_failure(user_id: int) -> None:
64 | sleep(0.2)
65 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/04_stripe_server_auto.py:
--------------------------------------------------------------------------------
1 | from time import sleep
2 |
3 | import stripe
4 | import logfire
5 | from fastapi import FastAPI
6 | from fastapi.responses import JSONResponse
7 | from opentelemetry.instrumentation.requests import RequestsInstrumentor
8 |
9 | logfire.configure()
10 |
11 | # you can get a testing key by just signing up for stripe
12 | STRIPE_KEY = 'sk_test_...'
13 | stripe.api_key = STRIPE_KEY
14 | RequestsInstrumentor().instrument()
15 |
16 | app = FastAPI()
17 | logfire.instrument_fastapi(app)
18 | logfire.info('Starting the app')
19 |
20 |
21 | RequestsInstrumentor().instrument()
22 |
23 |
24 | @app.post('/payments/{user_id:int}/complete/')
25 | def hello(user_id: int):
26 | amount, currency, payment_method = get_payment_details(user_id)
27 |
28 | try:
29 | intent = stripe.PaymentIntent.create(
30 | amount=amount,
31 | currency=currency,
32 | payment_method=payment_method,
33 | confirm=True,
34 | return_url='https://example.com/return',
35 | )
36 | except stripe.CardError:
37 | store_payment_failure(user_id)
38 | return JSONResponse(content={'detail': 'Card error'}, status_code=400)
39 | else:
40 | store_payment_success(user_id, intent)
41 |
42 |
43 | @logfire.instrument()
44 | def get_payment_details(user_id: int) -> [int, str, str]:
45 | sleep(0.2)
46 | if user_id == 42:
47 | return 20_00, 'usd', 'pm_card_visa'
48 | else:
49 | return 20_00, 'usd', 'pm_card_visa_chargeDeclinedInsufficientFunds'
50 |
51 |
52 | @logfire.instrument()
53 | def store_payment_success(user_id: int, _indent) -> None:
54 | sleep(0.2)
55 |
56 |
57 | @logfire.instrument()
58 | def store_payment_failure(user_id: int) -> None:
59 | sleep(0.2)
60 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/05_pydantic_plugin.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from pydantic import BaseModel
3 | import logfire
4 |
5 | logfire.configure(pydantic_plugin=logfire.PydanticPlugin(record='all'))
6 |
7 |
8 | class Delivery(BaseModel):
9 | timestamp: datetime
10 | dims: tuple[int, int]
11 |
12 |
13 | input_json = [
14 | '{"timestamp": "2020-01-02T03:04:05Z", "dims": ["10", "20"]}',
15 | '{"timestamp": "2020-01-02T04:04:05Z", "dims": ["15", "25"]}',
16 | '{"timestamp": "2020-01-02T05:04:05Z", "dims": ["20", "30"]}',
17 | ]
18 | deliveries = [Delivery.model_validate_json(json) for json in input_json]
19 | Delivery.model_validate_json('{"timestamp": "2020-01-02T03:04:05Z", "dims": ["10"]}')
20 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/06_openai.py:
--------------------------------------------------------------------------------
1 | import webbrowser
2 | import openai
3 | import logfire
4 |
5 | logfire.configure()
6 | client = openai.Client()
7 | logfire.instrument_openai(client)
8 |
9 | with logfire.span('Picture of a cat in the style of a famous painter'):
10 | response = client.chat.completions.create(
11 | model='gpt-4',
12 | messages=[
13 | {'role': 'system', 'content': 'Response entirely in plain text, with just a name'},
14 | {'role': 'user', 'content': 'Who was the influential painter in the 20th century?'},
15 | ],
16 | )
17 | chat_response = response.choices[0].message.content
18 | print(chat_response)
19 |
20 | response = client.images.generate(
21 | prompt=f'Create an image of a cat in the style of {chat_response}',
22 | model='dall-e-3',
23 | )
24 | url = response.data[0].url
25 | print(url)
26 | webbrowser.open(url)
27 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/07_cat_bacon/__init__.py:
--------------------------------------------------------------------------------
1 | from contextlib import asynccontextmanager
2 |
3 | from fastapi import FastAPI
4 | from fastapi.responses import HTMLResponse
5 | from fastui import prebuilt_html
6 | from pydantic_settings import BaseSettings
7 | from openai import AsyncClient
8 | import logfire
9 |
10 | from .db import Database
11 | from .main import router as main_router
12 |
13 | logfire.configure()
14 |
15 |
16 | class Settings(BaseSettings):
17 | create_database: bool = True
18 | pg_dsn: str = 'postgres://postgres:postgres@localhost/cat_bacon_fastapi'
19 |
20 |
21 | settings = Settings() # type: ignore
22 |
23 |
24 | @asynccontextmanager
25 | async def lifespan(app_: FastAPI):
26 | async with Database.create(settings.pg_dsn, True, settings.create_database) as db:
27 | app_.state.db = db
28 | openai = AsyncClient()
29 | logfire.instrument_openai(openai)
30 | app_.state.openai = openai
31 | yield
32 |
33 |
34 | logfire.instrument_asyncpg()
35 | app = FastAPI(lifespan=lifespan)
36 | logfire.instrument_fastapi(app)
37 |
38 | app.include_router(main_router, prefix='/api')
39 |
40 |
41 | @app.get('/{path:path}')
42 | async def html_landing() -> HTMLResponse:
43 | """Simple HTML page which serves the React app, comes last as it matches all paths."""
44 | return HTMLResponse(prebuilt_html(title='Logfire Cat Bacon'))
45 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/07_cat_bacon/db.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from contextlib import asynccontextmanager
3 | from dataclasses import dataclass
4 | from typing import AsyncIterator, Self, Annotated
5 | from urllib.parse import urlparse
6 |
7 | import logfire
8 | from fastapi import Request, Depends
9 |
10 | import asyncpg
11 | from asyncpg.connection import Connection
12 |
13 | __all__ = ('Database',)
14 |
15 |
16 | @dataclass
17 | class _Database:
18 | """
19 | Wrapper for asyncpg with some utilities, also usable as a fastapi dependency.
20 | """
21 |
22 | _pool: asyncpg.Pool
23 |
24 | @classmethod
25 | @asynccontextmanager
26 | async def create(cls, dsn: str, prepare_db: bool = False, create_database: bool = False) -> AsyncIterator[Self]:
27 | if prepare_db:
28 | await _prepare_db(dsn, create_database)
29 | pool = await asyncpg.create_pool(dsn)
30 | try:
31 | yield cls(_pool=pool)
32 | finally:
33 | await asyncio.wait_for(pool.close(), timeout=2.0)
34 |
35 | @asynccontextmanager
36 | async def acquire(self) -> AsyncIterator[Connection]:
37 | con = await self._pool.acquire()
38 | try:
39 | yield con
40 | finally:
41 | await self._pool.release(con)
42 |
43 | @asynccontextmanager
44 | async def acquire_trans(self) -> AsyncIterator[Connection]:
45 | async with self._pool.acquire() as conn:
46 | async with conn.transaction():
47 | yield conn
48 |
49 |
50 | def _get_db(request: Request) -> _Database:
51 | return request.app.state.db
52 |
53 |
54 | Database = Annotated[_Database, Depends(_get_db)]
55 |
56 |
57 | @logfire.instrument()
58 | async def _prepare_db(dsn: str, create_database: bool) -> None:
59 | x = {'foobar': 123, 'baz': 'qux'}
60 | logfire.info(f'Preparing database {x}')
61 | if create_database:
62 | parse_result = urlparse(dsn)
63 | database = parse_result.path.lstrip('/')
64 | server_dsn = dsn[: dsn.rindex('/')]
65 | conn = await asyncpg.connect(server_dsn)
66 | try:
67 | db_exists = await conn.fetchval('SELECT 1 FROM pg_database WHERE datname = $1', database)
68 | if not db_exists:
69 | await conn.execute(f'CREATE DATABASE {database}')
70 | finally:
71 | await conn.close()
72 |
73 | conn = await asyncpg.connect(dsn)
74 | try:
75 | async with conn.transaction():
76 | await _create_schema(conn)
77 | finally:
78 | await conn.close()
79 |
80 |
81 | async def _create_schema(conn: Connection) -> None:
82 | await conn.execute("""
83 | CREATE TABLE IF NOT EXISTS images (
84 | id SERIAL PRIMARY KEY,
85 | ts TIMESTAMP NOT NULL DEFAULT NOW(),
86 | prompt TEXT NOT NULL,
87 | url TEXT NOT NULL
88 | );
89 | -- CREATE INDEX IF NOT EXISTS images_ts_idx ON images (ts DESC);
90 | """)
91 | from .images import create_images
92 |
93 | await create_images(conn)
94 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/07_cat_bacon/images.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from typing import Annotated
3 |
4 | from asyncpg import Connection
5 | from openai import AsyncClient
6 |
7 | from pydantic import BaseModel, Field, TypeAdapter, BeforeValidator
8 |
9 |
10 | class Image(BaseModel):
11 | # model_config = config_record
12 | id: int = Field(title='ID')
13 | ts: datetime = Field(title='Timestamp')
14 | prompt: str = Field(title='Prompt')
15 | url: str = Field(title='URL')
16 |
17 |
18 | images_adapter = TypeAdapter(list[Annotated[Image, BeforeValidator(dict)]])
19 | PAGE_LIMIT = 50
20 |
21 |
22 | async def list_images(conn: Connection, page: int) -> tuple[list[Image], int]:
23 | offset = (page - 1) * PAGE_LIMIT
24 | rows = await conn.fetch('SELECT * FROM images ORDER BY ts desc OFFSET $1 LIMIT $2', offset, PAGE_LIMIT)
25 | # ids = await conn.fetch('SELECT id FROM images ORDER BY ts desc OFFSET $1 LIMIT $2', offset, PAGE_LIMIT)
26 | # rows = []
27 | # for row in ids:
28 | # rows.append(await conn.fetchrow('SELECT * FROM images where id=$1', row['id']))
29 | images = images_adapter.validate_python(rows)
30 | total = await conn.fetchval('SELECT COUNT(*) FROM images')
31 | return images, total
32 |
33 |
34 | async def get_image(conn: Connection, image_id: int) -> Image:
35 | row = await conn.fetchrow('SELECT ts, prompt, url from images where id=$1', image_id)
36 | return Image(id=image_id, **row)
37 |
38 |
39 | async def create_image(conn: Connection, openai_client: AsyncClient, animal: str) -> int:
40 | prompt = f'Create an image of a {animal} in the style of Francis Bacon'
41 | response = await openai_client.images.generate(prompt=prompt, model='dall-e-3')
42 | url = response.data[0].url
43 | return await conn.fetchval('INSERT INTO images (prompt, url) VALUES ($1, $2) RETURNING id', prompt, url)
44 |
45 |
46 | async def create_images(conn: Connection) -> None:
47 | image_count = await conn.fetchval('SELECT count(*) from images')
48 | if image_count > 100_000:
49 | return
50 |
51 | ts = datetime(2024, 1, 1)
52 | images = []
53 | for _ in range(100_000):
54 | ts = ts + timedelta(seconds=1)
55 | images.append((ts, 'test data', 'https://example.com'))
56 | await conn.executemany(f'INSERT INTO images (ts, prompt, url) VALUES ($1, $2, $3)', images)
57 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/07_cat_bacon/main.py:
--------------------------------------------------------------------------------
1 | from typing import Annotated
2 |
3 | from fastapi import APIRouter, Request
4 | from fastui import FastUI, AnyComponent, components as c, events
5 | from fastui.components.display import DisplayMode, DisplayLookup
6 | from fastui.events import GoToEvent
7 | from fastui.forms import fastui_form
8 | from pydantic import BaseModel
9 |
10 | from .db import Database
11 | from .page import demo_page
12 | from . import images
13 |
14 |
15 | router = APIRouter()
16 |
17 |
18 | class AnimalModel(BaseModel):
19 | animal: str
20 |
21 |
22 | @router.get('/', response_model=FastUI, response_model_exclude_none=True)
23 | async def generate_image() -> list[AnyComponent]:
24 | return demo_page(
25 | c.Heading(text='Generate Image', level=2),
26 | c.Paragraph(text='Generate an image of an animal in the style of Francis Bacon.'),
27 | c.ModelForm(
28 | model=AnimalModel,
29 | display_mode='page',
30 | submit_url='/api/generate/',
31 | loading=[c.Spinner(text='Generating Image...')],
32 | ),
33 | )
34 |
35 |
36 | @router.post('/generate/', response_model=FastUI, response_model_exclude_none=True)
37 | async def login_form_post(form: Annotated[AnimalModel, fastui_form(AnimalModel)], db: Database, request: Request):
38 | async with db.acquire() as conn:
39 | image_id = await images.create_image(conn, request.app.state.openai, form.animal)
40 | return [c.FireEvent(event=GoToEvent(url=f'/images/{image_id}/'))]
41 |
42 |
43 | @router.get('/images/', response_model=FastUI, response_model_exclude_none=True)
44 | async def images_table_view(db: Database, page: int = 1) -> list[AnyComponent]:
45 | async with db.acquire() as conn:
46 | image_list, count = await images.list_images(conn, page)
47 |
48 | return demo_page(
49 | c.Heading(text='List of Images', level=2),
50 | c.Table(
51 | data=image_list,
52 | data_model=images.Image,
53 | columns=[
54 | DisplayLookup(field='prompt', on_click=GoToEvent(url='/images/{id}/')),
55 | DisplayLookup(field='ts', mode=DisplayMode.datetime),
56 | ],
57 | ),
58 | c.Pagination(page=page, page_size=images.PAGE_LIMIT, total=count)
59 | )
60 |
61 |
62 | @router.get('/images/{image_id:int}/', response_model=FastUI, response_model_exclude_none=True)
63 | async def image_view(db: Database, image_id: int) -> list[AnyComponent]:
64 | async with db.acquire() as conn:
65 | image = await images.get_image(conn, image_id)
66 |
67 | return demo_page(
68 | c.Link(components=[c.Text(text='Back')], on_click=events.BackEvent()),
69 | c.Details(
70 | data=image,
71 | fields=[
72 | DisplayLookup(field='id'),
73 | DisplayLookup(field='ts', mode=DisplayMode.datetime),
74 | DisplayLookup(field='prompt'),
75 | ]
76 | ),
77 | c.Image(src=image.url, alt=image.prompt, width=600, height=600),
78 | title=image.prompt,
79 | )
80 |
81 |
82 | @router.get('/{path:path}', status_code=404)
83 | async def api_404():
84 | # so we don't fall through to the index page
85 | return {'message': 'Not Found'}
86 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/07_cat_bacon/page.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations as _annotations
2 |
3 | from fastui import AnyComponent
4 | from fastui import components as c
5 | from fastui.events import GoToEvent
6 |
7 |
8 | def demo_page(*components: AnyComponent, title: str | None = None) -> list[AnyComponent]:
9 | return [
10 | c.PageTitle(text=f'Cat Bacon — {title}' if title else 'Cat Bacon'),
11 | c.Navbar(
12 | title='Cat Bacon',
13 | title_event=GoToEvent(url='/'),
14 | start_links=[
15 | c.Link(
16 | components=[c.Text(text='Previous Images')],
17 | on_click=GoToEvent(url='/images/'),
18 | active='startswith:/images',
19 | ),
20 | ]
21 | ),
22 | c.Page(
23 | components=[
24 | *((c.Heading(text=title),) if title else ()),
25 | *components,
26 | ],
27 | ),
28 | c.Footer(
29 | links=[
30 | c.Link(components=[c.Text(text='Logfire Docs')], on_click=GoToEvent(url='https://docs.logfire.dev')),
31 | ],
32 | ),
33 | ]
34 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/README.md:
--------------------------------------------------------------------------------
1 | # Logfire Pycon 2024
2 |
3 | Code snippets from:
4 | * [this](https://slides.com/samuelcolvin-pydantic/deck) talk at Pycon US on 2024-05-16
5 | * [this](https://slides.com/samuelcolvin-pydantic/logfire-pycon-2024) at EuroPython on 2024-07-12 (most of the talk was a live demo, so only a few slides)
6 |
7 | The rest of the example from the [logfire-demo](https://github.com/pydantic/logfire-demo) project.
8 |
9 | ## Running the demo
10 |
11 | 1. Install uv
12 | 2. Run `uv sync` to install dependencies
13 | 3. Run `uv run python {example file name}`
14 |
15 | ## SQL Queries
16 |
17 | The queries from the SQL slide were:
18 |
19 | ```sql
20 | select start_timestamp, (attributes->'response_data'->'usage'->>'total_tokens')::int as usage, attributes->'response_data'->'message'->>'content' as message
21 | from records
22 | where otel_scope_name='logfire.openai' and attributes->'response_data'->'message' ? 'content'
23 | order by start_timestamp desc
24 | ```
25 |
26 | and
27 |
28 | ```sql
29 | select sum((attributes->'response_data'->'usage'->>'total_tokens')::int)
30 | from records where otel_scope_name='logfire.openai'
31 | ```
32 |
--------------------------------------------------------------------------------
/2024-05-pycon-us-logfire/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "logfire-talk"
3 | version = "0.1.0"
4 | description = "Code snippets from talk on Pydantic Logfire."
5 | authors = [
6 | { name = "Samuel Colvin", email = "s@muelcolvin.com" }
7 | ]
8 | dependencies = [
9 | "fastapi>=0.110.0",
10 | "logfire[fastapi,httpx,asyncpg,system-metrics,requests,openai]>=0.46.1",
11 | "httpx>=0.27.0",
12 | "uvicorn[standard]>=0.28.0",
13 | "asyncpg>=0.29.0",
14 | "python-multipart>=0.0.9",
15 | "openai>=1.14.1",
16 | "pillow>=10.2.0",
17 | "stripe>=9.6.0",
18 | "pandas>=2.2.2",
19 | "beautifulsoup4>=4.12.3",
20 | "fastui>=0.6.0",
21 | "pydantic-settings>=2.3.4",
22 | ]
23 | readme = "README.md"
24 | requires-python = ">= 3.12"
25 |
26 | [tool.ruff]
27 | line-length = 120
28 | target-version = "py312"
29 | lint.extend-select = ["Q", "RUF100", "C90", "UP", "I"]
30 | lint.flake8-quotes = {inline-quotes = "single", multiline-quotes = "double"}
31 | lint.mccabe = { max-complexity = 14 }
32 | format.quote-style = "single"
33 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/.gitignore:
--------------------------------------------------------------------------------
1 | *.py[cod]
2 | /.venv
3 | /db.sqlite3
4 | /static/
5 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2023 - present Pydantic Services inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/README.md:
--------------------------------------------------------------------------------
1 | # Django Cat Bacon
2 |
3 | A demo of [Pydantic Logfire](https://pydantic.dev/logfire).
4 |
5 | Slides from talk at Django London meetup in September 2024 available [here](https://slides.com/samuelcolvin-pydantic/logfire-europython-2024/).
6 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2024-09-django-london-logfire/cat_bacon/__init__.py
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/admin.py:
--------------------------------------------------------------------------------
1 | from django.contrib import admin
2 |
3 | from .models import Image
4 |
5 |
6 | class ImageAdmin(admin.ModelAdmin):
7 | list_display = '__str__', 'timestamp'
8 | ordering = ('-timestamp',)
9 |
10 | fields = 'animal', 'artist', 'timestamp', 'url', 'file_path'
11 | readonly_fields = 'animal', 'artist', 'timestamp', 'url', 'file_path'
12 |
13 |
14 | admin.site.register(Image, ImageAdmin)
15 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/apps.py:
--------------------------------------------------------------------------------
1 | from django.apps import AppConfig
2 |
3 |
4 | class CatBaconConfig(AppConfig):
5 | default_auto_field = 'django.db.models.BigAutoField'
6 | name = 'cat_bacon'
7 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/forms.py:
--------------------------------------------------------------------------------
1 | from django import forms
2 | from .models import Image
3 |
4 | ARTISTS = (
5 | 'Francis Bacon',
6 | 'Edvard Munch',
7 | 'Pablo Picasso',
8 | 'Salvador Dali',
9 | 'Vincent van Gogh',
10 | 'Andy Warhol',
11 | )
12 |
13 |
14 | class ImageForm(forms.ModelForm):
15 | animal = forms.CharField(max_length=255)
16 | artist = forms.ChoiceField(choices=[(artist, artist) for artist in ARTISTS])
17 |
18 | class Meta:
19 | model = Image
20 | fields = ['animal', 'artist']
21 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/migrations/0001_initial.py:
--------------------------------------------------------------------------------
1 | # Generated by Django 5.1.1 on 2024-09-10 12:35
2 |
3 | from django.db import migrations, models
4 |
5 |
6 | class Migration(migrations.Migration):
7 |
8 | initial = True
9 |
10 | dependencies = [
11 | ]
12 |
13 | operations = [
14 | migrations.CreateModel(
15 | name='Image',
16 | fields=[
17 | ('id', models.AutoField(primary_key=True, serialize=False)),
18 | ('timestamp', models.DateTimeField(auto_now_add=True)),
19 | ('animal', models.CharField(max_length=255)),
20 | ('artist', models.CharField(max_length=255)),
21 | ('url', models.URLField(max_length=2000, null=True)),
22 | ('file_path', models.TextField(null=True)),
23 | ],
24 | ),
25 | ]
26 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/migrations/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2024-09-django-london-logfire/cat_bacon/migrations/__init__.py
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/models.py:
--------------------------------------------------------------------------------
1 | from django.db import models
2 |
3 |
4 | class Image(models.Model):
5 | id = models.AutoField(primary_key=True)
6 | timestamp = models.DateTimeField(auto_now_add=True)
7 | animal = models.CharField(max_length=255)
8 | artist = models.CharField(max_length=255)
9 | url = models.URLField(null=True, max_length=2000)
10 | file_path = models.TextField(null=True)
11 |
12 | def __str__(self):
13 | return f'{self.animal} in the style of {self.artist}'
14 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/templates/base.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Cat Bacon
7 |
39 |
40 |
41 |
42 | Cat Bacon
43 | {% block content %}
44 | (content block missing)
45 | {% endblock %}
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/templates/image.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 | {% load static %}
3 |
4 | {% block content %}
5 | {{ image.prompt }}
6 | back
7 |
8 | Animal: {{ image.animal }}
9 |
10 |
11 | Artist: {{ image.artist }}
12 |
13 |
14 | {% if image.file_path %}
15 |
16 | {% else %}
17 |
18 | {% endif %}
19 | {% endblock %}
20 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/templates/index.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 |
3 | {% block content %}
4 |
Generate Image
5 |
10 | {% endblock %}
11 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/tests.py:
--------------------------------------------------------------------------------
1 | from django.test import TestCase
2 |
3 | # Create your tests here.
4 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/urls.py:
--------------------------------------------------------------------------------
1 | from django.urls import path
2 |
3 | from . import views
4 |
5 | urlpatterns = [
6 | path('', views.index, name='index'),
7 | path('image//', views.image_details, name='image-details'),
8 | ]
9 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/cat_bacon/views.py:
--------------------------------------------------------------------------------
1 | from uuid import uuid4
2 |
3 | import httpx
4 | from django.conf import settings
5 | from django.shortcuts import render, redirect, get_object_or_404
6 |
7 | from openai import Client
8 |
9 | from .forms import ImageForm
10 | from .models import Image
11 |
12 | client = Client()
13 |
14 |
15 | def index(request):
16 | if request.method == 'POST':
17 | form = ImageForm(request.POST, request.FILES)
18 | if form.is_valid():
19 | image_file, image_url = create_image(client, form.instance.animal, form.instance.artist)
20 | print(f'image_file: {image_file!r}, image_url: {image_url!r}')
21 | form.instance.file_path = image_file
22 | form.instance.url = image_url
23 | image = form.save()
24 | return redirect('image-details', image_id=image.id)
25 | else:
26 | form = ImageForm()
27 |
28 | return render(request, 'index.html', {'form': form})
29 |
30 |
31 | def image_details(request, image_id):
32 | image = get_object_or_404(Image, id=image_id)
33 | return render(request, 'image.html', {'image': image})
34 |
35 |
36 | def create_image(openai_client: Client, animal: str, artist: str) -> tuple[str | None, str]:
37 | prompt = f'Create an image of a {animal} in the style of {artist}'
38 | response = openai_client.images.generate(prompt=prompt, model='dall-e-3')
39 |
40 | image_url = response.data[0].url
41 | # return None, image_url
42 |
43 | r = httpx.get(image_url)
44 | r.raise_for_status()
45 | path = f'{uuid4().hex}.jpg'
46 | (settings.MAIN_STATIC / path).write_bytes(r.content)
47 | return path, image_url
48 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/create_many.py:
--------------------------------------------------------------------------------
1 | import os
2 | import django
3 |
4 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'logfire_django_demo.settings')
5 |
6 | django.setup()
7 |
8 | from cat_bacon.models import Image
9 |
10 | # create 10_000 images
11 | images = [Image(animal='cat', artist='Francis Bacon', url='https://cataas.com/cat') for _ in range(10_000)]
12 | Image.objects.bulk_create(images)
13 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/logfire_django_demo/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2024-09-django-london-logfire/logfire_django_demo/__init__.py
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/logfire_django_demo/asgi.py:
--------------------------------------------------------------------------------
1 | """
2 | ASGI config for logfire_django_demo project.
3 |
4 | It exposes the ASGI callable as a module-level variable named ``application``.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/5.1/howto/deployment/asgi/
8 | """
9 |
10 | import os
11 |
12 | from django.core.asgi import get_asgi_application
13 |
14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'logfire_django_demo.settings')
15 |
16 | application = get_asgi_application()
17 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/logfire_django_demo/settings.py:
--------------------------------------------------------------------------------
1 | """
2 | Django settings for logfire_django_demo project.
3 |
4 | Generated by 'django-admin startproject' using Django 5.1.1.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/5.1/topics/settings/
8 |
9 | For the full list of settings and their values, see
10 | https://docs.djangoproject.com/en/5.1/ref/settings/
11 | """
12 |
13 | from pathlib import Path
14 |
15 | # Build paths inside the project like this: BASE_DIR / 'subdir'.
16 | BASE_DIR = Path(__file__).resolve().parent.parent
17 |
18 |
19 | # Quick-start development settings - unsuitable for production
20 | # See https://docs.djangoproject.com/en/5.1/howto/deployment/checklist/
21 |
22 | # SECURITY WARNING: keep the secret key used in production secret!
23 | SECRET_KEY = 'django-insecure-a)5f)9((nn=$p%up0w%y9g8hia585&w$k9kn&979k!(2-yszrv'
24 |
25 | # SECURITY WARNING: don't run with debug turned on in production!
26 | DEBUG = True
27 |
28 | ALLOWED_HOSTS = []
29 |
30 |
31 | # Application definition
32 |
33 | INSTALLED_APPS = [
34 | 'django.contrib.admin',
35 | 'django.contrib.auth',
36 | 'django.contrib.contenttypes',
37 | 'django.contrib.sessions',
38 | 'django.contrib.messages',
39 | 'django.contrib.staticfiles',
40 | 'cat_bacon',
41 | ]
42 |
43 | MIDDLEWARE = [
44 | 'django.middleware.security.SecurityMiddleware',
45 | 'django.contrib.sessions.middleware.SessionMiddleware',
46 | 'django.middleware.common.CommonMiddleware',
47 | 'django.middleware.csrf.CsrfViewMiddleware',
48 | 'django.contrib.auth.middleware.AuthenticationMiddleware',
49 | 'django.contrib.messages.middleware.MessageMiddleware',
50 | 'django.middleware.clickjacking.XFrameOptionsMiddleware',
51 | ]
52 |
53 | ROOT_URLCONF = 'logfire_django_demo.urls'
54 |
55 | TEMPLATES = [
56 | {
57 | 'BACKEND': 'django.template.backends.django.DjangoTemplates',
58 | 'DIRS': [],
59 | 'APP_DIRS': True,
60 | 'OPTIONS': {
61 | 'context_processors': [
62 | 'django.template.context_processors.debug',
63 | 'django.template.context_processors.request',
64 | 'django.contrib.auth.context_processors.auth',
65 | 'django.contrib.messages.context_processors.messages',
66 | ],
67 | },
68 | },
69 | ]
70 |
71 | WSGI_APPLICATION = 'logfire_django_demo.wsgi.application'
72 |
73 |
74 | # Database
75 | # https://docs.djangoproject.com/en/5.1/ref/settings/#databases
76 |
77 | DATABASES = {
78 | 'default': {
79 | # 'ENGINE': 'django.db.backends.sqlite3',
80 | # 'NAME': BASE_DIR / 'db.sqlite3',
81 | 'ENGINE': 'django.db.backends.postgresql',
82 | 'NAME': 'cat_bacon',
83 | 'USER': 'postgres',
84 | 'PASSWORD': 'waffle',
85 | 'HOST': 'localhost',
86 | 'PORT': '5432',
87 | }
88 | }
89 |
90 |
91 | # Password validation
92 | # https://docs.djangoproject.com/en/5.1/ref/settings/#auth-password-validators
93 |
94 | AUTH_PASSWORD_VALIDATORS = [
95 | {
96 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
97 | },
98 | {
99 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
100 | },
101 | {
102 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
103 | },
104 | {
105 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
106 | },
107 | ]
108 |
109 |
110 | # Internationalization
111 | # https://docs.djangoproject.com/en/5.1/topics/i18n/
112 |
113 | LANGUAGE_CODE = 'en-us'
114 |
115 | TIME_ZONE = 'UTC'
116 |
117 | USE_I18N = True
118 |
119 | USE_TZ = True
120 |
121 | # Static files (CSS, JavaScript, Images)
122 | # https://docs.djangoproject.com/en/5.1/howto/static-files/
123 |
124 | STATIC_URL = 'static/'
125 | MAIN_STATIC = BASE_DIR / 'static'
126 | STATICFILES_DIRS = [MAIN_STATIC]
127 |
128 | # Default primary key field type
129 | # https://docs.djangoproject.com/en/5.1/ref/settings/#default-auto-field
130 |
131 | DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
132 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/logfire_django_demo/urls.py:
--------------------------------------------------------------------------------
1 | """
2 | URL configuration for logfire_django_demo project.
3 |
4 | The `urlpatterns` list routes URLs to views. For more information please see:
5 | https://docs.djangoproject.com/en/5.1/topics/http/urls/
6 | Examples:
7 | Function views
8 | 1. Add an import: from my_app import views
9 | 2. Add a URL to urlpatterns: path('', views.home, name='home')
10 | Class-based views
11 | 1. Add an import: from other_app.views import Home
12 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
13 | Including another URLconf
14 | 1. Import the include() function: from django.urls import include, path
15 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
16 | """
17 | from django.contrib import admin
18 | from django.urls import path, include
19 |
20 | urlpatterns = [
21 | path('', include('cat_bacon.urls')),
22 | path('admin/', admin.site.urls),
23 | ]
24 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/logfire_django_demo/wsgi.py:
--------------------------------------------------------------------------------
1 | """
2 | WSGI config for logfire_django_demo project.
3 |
4 | It exposes the WSGI callable as a module-level variable named ``application``.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/5.1/howto/deployment/wsgi/
8 | """
9 |
10 | import os
11 |
12 | from django.core.wsgi import get_wsgi_application
13 |
14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'logfire_django_demo.settings')
15 |
16 | application = get_wsgi_application()
17 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/manage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """Django's command-line utility for administrative tasks."""
3 | import os
4 | import sys
5 |
6 |
7 | def main():
8 | """Run administrative tasks."""
9 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'logfire_django_demo.settings')
10 | try:
11 | from django.core.management import execute_from_command_line
12 | except ImportError as exc:
13 | raise ImportError(
14 | "Couldn't import Django. Are you sure it's installed and "
15 | "available on your PYTHONPATH environment variable? Did you "
16 | "forget to activate a virtual environment?"
17 | ) from exc
18 | execute_from_command_line(sys.argv)
19 |
20 |
21 | if __name__ == '__main__':
22 | main()
23 |
--------------------------------------------------------------------------------
/2024-09-django-london-logfire/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "logfire-django-cat-bacon"
3 | version = "0.1.0"
4 | description = "Add your description here"
5 | readme = "README.md"
6 | requires-python = ">=3.11"
7 | dependencies = [
8 | "django>=5.1.1",
9 | "openai>=1.44.1",
10 | "httpx>=0.27.2",
11 | "psycopg>=3.2.1",
12 | ]
13 |
--------------------------------------------------------------------------------
/2024-12-boston-ae/README.md:
--------------------------------------------------------------------------------
1 | # Slides for AI Global Summit, Boston December 2024
2 |
3 | https://www.algopreneurship.ai in Boston in December 2024.
4 |
--------------------------------------------------------------------------------
/2024-12-boston-ae/example_openai.py:
--------------------------------------------------------------------------------
1 | from datetime import date
2 | from pydantic import BaseModel
3 | from openai import OpenAI
4 |
5 |
6 | class User(BaseModel):
7 | """Definition of a user"""
8 | id: int
9 | name: str
10 | dob: date
11 |
12 |
13 | response = OpenAI().chat.completions.create(
14 | model='gpt-4o',
15 | messages=[
16 | {'role': 'system', 'content': 'Extract information about the user'},
17 | {'role': 'user', 'content': 'The user with ID 123 is called Samuel, born on Jan 28th 87'}
18 | ],
19 | tools=[
20 | {
21 | 'function': {
22 | 'name': User.__name__,
23 | 'description': User.__doc__,
24 | 'parameters': User.model_json_schema(),
25 | },
26 | 'type': 'function'
27 | }
28 | ]
29 | )
30 | user = User.model_validate_json(response.choices[0].message.tool_calls[0].function.arguments)
31 | print(repr(user))
32 |
--------------------------------------------------------------------------------
/2024-12-boston-ae/example_pydantic.py:
--------------------------------------------------------------------------------
1 | from datetime import date
2 | from pydantic import BaseModel
3 |
4 | from devtools import debug
5 |
6 |
7 | class User(BaseModel):
8 | id: int
9 | name: str
10 | dob: date
11 |
12 |
13 | user = User(id='123', name='Samuel Colvin', dob='198-1-28')
14 | debug(user)
15 |
16 | # debug(User.model_json_schema())
17 |
--------------------------------------------------------------------------------
/2024-12-boston-ae/example_pydanticai.py:
--------------------------------------------------------------------------------
1 | from datetime import date
2 | from pydantic_ai import Agent
3 | from pydantic import BaseModel
4 |
5 |
6 | class User(BaseModel):
7 | """Definition of a user"""
8 | id: int
9 | name: str
10 | dob: date
11 |
12 |
13 | agent = Agent(
14 | 'openai:gpt-4o',
15 | result_type=User,
16 | system_prompt='Extract information about the user',
17 | )
18 | result = agent.run_sync('The user with ID 123 is called Samuel, born on Jan 28th 87')
19 | print(result.data)
20 |
--------------------------------------------------------------------------------
/2024-12-boston-ae/example_weather.py:
--------------------------------------------------------------------------------
1 |
2 | import asyncio
3 | import os
4 | from dataclasses import dataclass
5 | from typing import Any
6 |
7 | from httpx import AsyncClient
8 |
9 | from pydantic_ai import Agent, ModelRetry, RunContext
10 |
11 | import logfire
12 | logfire.configure()
13 |
14 |
15 | @dataclass
16 | class Deps:
17 | client: AsyncClient
18 | weather_api_key: str
19 | geo_api_key: str
20 |
21 |
22 | weather_agent = Agent(
23 | 'openai:gpt-4o',
24 | system_prompt='Be concise, reply with one sentence.',
25 | deps_type=Deps,
26 | retries=2,
27 | )
28 |
29 |
30 | @weather_agent.tool
31 | async def get_lat_lng(
32 | ctx: RunContext[Deps], location_description: str
33 | ) -> dict[str, float]:
34 | """Get the latitude and longitude of a location.
35 |
36 | Args:
37 | ctx: The context.
38 | location_description: A description of a location.
39 | """
40 | params = {
41 | 'q': location_description,
42 | 'api_key': ctx.deps.geo_api_key,
43 | }
44 | r = await ctx.deps.client.get('https://geocode.maps.co/search', params=params)
45 | r.raise_for_status()
46 | data = r.json()
47 |
48 | if data:
49 | return {'lat': data[0]['lat'], 'lng': data[0]['lon']}
50 | else:
51 | raise ModelRetry('Could not find the location')
52 |
53 |
54 | @weather_agent.tool
55 | async def get_weather(ctx: RunContext[Deps], lat: float, lng: float) -> dict[str, Any]:
56 | """Get the weather at a location.
57 |
58 | Args:
59 | ctx: The context.
60 | lat: Latitude of the location.
61 | lng: Longitude of the location.
62 | """
63 | params = {
64 | 'apikey': ctx.deps.weather_api_key,
65 | 'location': f'{lat},{lng}',
66 | 'units': 'metric',
67 | }
68 | r = await ctx.deps.client.get(
69 | 'https://api.tomorrow.io/v4/weather/realtime', params=params
70 | )
71 | r.raise_for_status()
72 | data = r.json()
73 |
74 | values = data['data']['values']
75 | # https://docs.tomorrow.io/reference/data-layers-weather-codes
76 | code_lookup = {
77 | 1000: 'Clear, Sunny',
78 | 1100: 'Mostly Clear',
79 | 1101: 'Partly Cloudy',
80 | 1102: 'Mostly Cloudy',
81 | 1001: 'Cloudy',
82 | 2000: 'Fog',
83 | 2100: 'Light Fog',
84 | 4000: 'Drizzle',
85 | 4001: 'Rain',
86 | 4200: 'Light Rain',
87 | 4201: 'Heavy Rain',
88 | 5000: 'Snow',
89 | 5001: 'Flurries',
90 | 5100: 'Light Snow',
91 | 5101: 'Heavy Snow',
92 | 6000: 'Freezing Drizzle',
93 | 6001: 'Freezing Rain',
94 | 6200: 'Light Freezing Rain',
95 | 6201: 'Heavy Freezing Rain',
96 | 7000: 'Ice Pellets',
97 | 7101: 'Heavy Ice Pellets',
98 | 7102: 'Light Ice Pellets',
99 | 8000: 'Thunderstorm',
100 | }
101 | return {
102 | 'temperature': f'{values["temperatureApparent"]:0.0f}°C',
103 | 'description': code_lookup.get(values['weatherCode'], 'Unknown'),
104 | }
105 |
106 |
107 | async def main():
108 | async with AsyncClient() as client:
109 | weather_api_key = os.environ['WEATHER_API_KEY']
110 | geo_api_key = os.environ['GEO_API_KEY']
111 | deps = Deps(
112 | client=client, weather_api_key=weather_api_key, geo_api_key=geo_api_key
113 | )
114 | result = await weather_agent.run(
115 | 'What is the weather like in London and in Wiltshire?', deps=deps
116 | )
117 | print('Response:', result.data)
118 |
119 |
120 | if __name__ == '__main__':
121 | asyncio.run(main())
122 |
--------------------------------------------------------------------------------
/2024-12-boston-ae/logfire-weather-agent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2024-12-boston-ae/logfire-weather-agent.png
--------------------------------------------------------------------------------
/2024-12-boston-ae/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "boston"
3 | version = "0.1.0"
4 | description = "Add your description here"
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | dependencies = [
8 | "devtools>=0.12.2",
9 | "pydantic-ai[logfire]>=0.0.11",
10 | ]
11 |
12 | [tool.logfire]
13 | ignore_no_config = true
14 |
--------------------------------------------------------------------------------
/2024-12-boston-ae/slides.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Slides
4 |
5 | ## whoami
6 |
7 | **Samuel Colvin** — creator of Pydantic
8 |
9 | Pydantic:
10 | * Python library for data validation
11 | * Created Pydantic in 2017 — long before Gen AI
12 | * Became a company, backed by Sequoia in 2023 — released Logfire earlier this year
13 | * Released Pydantic V2 last year, core rewritten in Rust
14 | * downloaded >300M per month
15 | * Used by all of FAANG, OpenAI, Anthropic etc.
16 |
17 |
18 | Ubiquitous • Boring • Beloved
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 | ---
52 |
53 | ## Pydantic
54 |
55 | Solving dumb problems for smart people.
56 |
57 | ```py
58 | from datetime import date
59 | from pydantic import BaseModel
60 |
61 | class User(BaseModel):
62 | id: int
63 | name: str
64 | dob: date
65 |
66 |
67 | user = User(id='123', name='Samuel Colvin', dob='1987-01-28')
68 | #> User(id=123, name='Samuel Colvin', dob=date(1987, 1, 28))
69 |
70 | user = User.model_validate_json('{"id: 123, "name": "Samuel Colvin", "dob": "1987-01-28"}')
71 | #> User(id=123, name='Samuel Colvin', dob=date(1987, 1, 28))
72 |
73 | print(User.model_json_schema())
74 | s = {
75 | 'properties': {
76 | 'id': {'title': 'Id', 'type': 'integer'},
77 | 'name': {'title': 'Name', 'type': 'string'},
78 | 'dob': {'format': 'date', 'title': 'Dob', 'type': 'string'},
79 | },
80 | 'required': ['id', 'name', 'dob'],
81 | 'title': 'User',
82 | 'type': 'object',
83 | }
84 | ```
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 | ---
96 |
97 | ## Pydantic in genAI
98 |
99 | Over the last couple of years, Pydantic has been adopted by almost every Python LLM library.
100 |
101 | Why?
102 |
103 | ```py
104 | from datetime import date
105 | from pydantic import BaseModel
106 | from openai import OpenAI
107 |
108 | class User(BaseModel):
109 | """Definition of a user"""
110 | id: int
111 | name: str
112 | dob: date
113 |
114 | response = OpenAI().chat.completions.create(
115 | model='gpt-4o',
116 | messages=[
117 | {'role': 'system', 'content': 'Extract information about the user'},
118 | {'role': 'user', 'content': 'The user with ID 123 is called Samuel, born on Jan 28th 87'}
119 | ],
120 | tools=[
121 | {
122 | 'function': {
123 | 'name': User.__name__,
124 | 'description': User.__doc__,
125 | 'parameters': User.model_json_schema(),
126 | },
127 | 'type': 'function'
128 | }
129 | ]
130 | )
131 | user = User.model_validate_json(response.choices[0].message.tool_calls[0].function.arguments)
132 | print(user)
133 | ```
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 | ---
144 |
145 | ## PydanticAI
146 |
147 | That same example with PydanticAI — Agent Framework for production.
148 |
149 | ```py
150 | from datetime import date
151 | from pydantic_ai import Agent
152 | from pydantic import BaseModel
153 |
154 | class User(BaseModel):
155 | """Definition of a user"""
156 | id: int
157 | name: str
158 | dob: date
159 |
160 | agent = Agent(
161 | 'openai:gpt-4o',
162 | result_type=User,
163 | system_prompt='Extract information about the user',
164 | )
165 | result = agent.run_sync('The user with ID 123 is called Samuel, born on Jan 28th 87')
166 | print(result.data)
167 | ```
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 | ---
180 |
181 | ## Dependency Injection & type safety
182 |
183 | In the real applications, LLMs don't operate in isolation.
184 |
185 | ```py
186 | from dataclasses import dataclass
187 | from typing import Any
188 | from httpx import AsyncClient
189 | from pydantic_ai import Agent, RunContext
190 |
191 |
192 | @dataclass
193 | class Deps:
194 | client: AsyncClient
195 | weather_api_key: str | None
196 | geo_api_key: str | None
197 |
198 |
199 | weather_agent = Agent('openai:gpt-4o', deps_type=Deps)
200 |
201 |
202 | @weather_agent.tool
203 | async def get_lat_lng(ctx: RunContext[Deps], location_description: str) -> dict[str, float]:
204 | params = {'q': location_description, 'api_key': ctx.deps.geo_api_key}
205 | r = await ctx.deps.client.get('https://geocode.maps.co/search', params=params)
206 | r.raise_for_status()
207 | data = r.json()
208 | return {'lat': data[0]['lat'], 'lng': data[0]['lon']}
209 |
210 |
211 | @weather_agent.tool
212 | async def get_weather(ctx: RunContext[Deps], lat: float, lng: float) -> dict[str, Any]:
213 | params = {'apikey': ctx.deps.weather_api_key, 'location': f'{lat},{lng}'}
214 | r = await ctx.deps.client.get('https://api.tomorrow.io/v4/weather/realtime', params=params)
215 | r.raise_for_status()
216 | values = r.json()['data']['values']
217 | return {
218 | 'temperature': f'{values["temperatureApparent"]:0.0f}°C',
219 | 'description': values['weatherCode'],
220 | }
221 |
222 |
223 | async def main(weather_api_key: str, geo_api_key: str):
224 | async with AsyncClient() as client:
225 | deps = Deps(client=client, weather_api_key=weather_api_key, geo_api_key=geo_api_key)
226 | result = await weather_agent.run('What is the weather like in London and in Wiltshire?', deps=deps)
227 | print(result.data)
228 | ```
229 |
230 |
231 |
232 |
233 |
234 |
235 | ---
236 |
237 | ## Observability
238 |
239 | How LLMs behave is inherently non-deterministic and slow — we sorely need Observability into what agents are doing.
240 |
241 | Logfire to the rescue, just add a few lines to our code:
242 |
243 | ```py
244 | import logfire
245 | logfire.configure()
246 | ```
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 | ---
265 |
266 | ### Next steps: Agent handoff
267 |
268 | Making it easier to build multi-agent systems.
269 |
270 | ```py
271 | from dataclasses import dataclass
272 | from datetime import datetime
273 |
274 | from httpx import AsyncClient
275 | from pydantic import BaseModel
276 |
277 | from pydantic_ai import Agent
278 | from pydantic_ai.tools import AgentTool
279 |
280 | @dataclass
281 | class Deps:
282 | http_client: AsyncClient
283 |
284 | class Flight(BaseModel):
285 | departure_time: datetime
286 | arrival_time: datetime
287 | destination: str
288 |
289 | search_agent = Agent(model='openai:gpt-4o', deps_type=Deps, result_type=list[Flight])
290 |
291 |
292 | class DesiredFlight(BaseModel):
293 | ideal_flight_time: datetime
294 | destination: str
295 |
296 |
297 | control_agent = Agent(
298 | model='openai:gpt-4o',
299 | tools=[
300 | AgentTool(
301 | name='find_flights',
302 | agent=Agent(model='openai:gpt-4o', deps_type=Deps, result_type=list[Flight]),
303 | input_type=DesiredFlight,
304 | ),
305 | AgentTool(
306 | name='select_best_flight',
307 | agent=Agent(model='openai:gpt-4o', deps_type=Deps, result_type=Flight),
308 | input_type=DesiredFlight,
309 | )
310 | ],
311 | deps_type=Deps
312 | )
313 |
314 | result = control_agent.run_sync('Find me a flight to Alaska on the 20th of December')
315 | ```
316 |
317 |
318 |
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 | ---
327 |
328 | ## Next steps: Model Context Protocol
329 |
330 | Released by Anthropic last week.
331 |
332 | > ""
333 |
334 |
335 | ```py
336 | from pydantic_ai import Agent
337 | from pydantic_ai.toolsets import SlackToolset, OpenAPIToolset
338 |
339 |
340 | agent = Agent(
341 | 'openai:gpt-4o',
342 | toolsets=[SlackToolset(api_key=...), OpenAPIToolset(url='https://api.example.com')]
343 | )
344 | ...
345 | ```
346 |
347 |
348 |
349 |
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 |
361 |
362 |
363 |
364 |
365 |
366 |
367 |
368 |
369 | ---
370 |
371 | ## Thank you!
372 |
373 | Some useful links:
374 |
375 | * Pydantic: docs.pydantic.dev
376 |
377 | * Logfire: pydantic.dev/logfire
378 |
379 | * PydanticAI: ai.pydantic.dev
380 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/.python-version:
--------------------------------------------------------------------------------
1 | 3.13
2 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/Makefile:
--------------------------------------------------------------------------------
1 | .DEFAULT_GOAL := all
2 |
3 | .PHONY: .uv
4 | .uv:
5 | @uv --version || echo 'Please install uv: https://docs.astral.sh/uv/getting-started/installation/'
6 |
7 | .PHONY: .pre-commit
8 | .pre-commit:
9 | @pre-commit -V || echo 'Please install pre-commit: https://pre-commit.com/'
10 |
11 | .PHONY: install
12 | install: .uv .pre-commit
13 | uv sync --frozen
14 | #pre-commit install --install-hooks
15 |
16 | .PHONY: format
17 | format:
18 | uv run ruff format
19 | uv run ruff check --fix --fix-only
20 |
21 | .PHONY: lint
22 | lint:
23 | uv run ruff format --check
24 | uv run ruff check
25 |
26 | .PHONY: typecheck
27 | typecheck:
28 | PYRIGHT_PYTHON_IGNORE_WARNINGS=1 uv run pyright
29 |
30 | .PHONY: dev
31 | dev:
32 | uv run uvicorn app:app --reload
33 |
34 | .PHONY: all
35 | all: format lint typecheck
36 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/README.md:
--------------------------------------------------------------------------------
1 | # AI Engineer in NYC, February 2025
2 |
3 | ## PydanticAI
4 |
5 | [Schedule](https://www.ai.engineer/summit/2025/schedule#2025-02-22)
6 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/app/__init__.py:
--------------------------------------------------------------------------------
1 | from .server import app
2 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/app/analyse.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations as _annotations
2 |
3 | from dataclasses import dataclass
4 | from typing import TYPE_CHECKING, Literal
5 | import logfire
6 |
7 |
8 | if TYPE_CHECKING:
9 | from .server import EmailInfo
10 |
11 |
12 | @dataclass
13 | class EmailOk:
14 | reason: str
15 | status: Literal['ok'] = 'ok'
16 |
17 |
18 | @dataclass
19 | class EmailReply:
20 | text: str
21 | reason: str
22 | status: Literal['reply'] = 'reply'
23 |
24 |
25 | @dataclass
26 | class EmailDrop:
27 | reason: str
28 | status: Literal['drop'] = 'drop'
29 |
30 |
31 | @logfire.instrument()
32 | async def analyse_email(email: EmailInfo) -> EmailOk | EmailReply | EmailDrop:
33 | if email.references:
34 | return EmailOk(reason='This email is a reply, let all replies through.')
35 |
36 | if 'spam' in email.subject.lower():
37 | return EmailReply(
38 | reason='looks like your email is spam.',
39 | text='Please stop sending spam.'
40 | )
41 | else:
42 | return EmailOk(reason='This email is not spam.')
43 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/app/prompt.toml:
--------------------------------------------------------------------------------
1 | prompt = """
2 | Analyse an email to determine if it is likely to be interesting to me.
3 |
4 | If the email looks interesting, return "EmailOk" with a reason.
5 |
6 | IF the email does not look interesting, return "EmailReply", write a reply asking the user for more information.
7 |
8 | Reply asking for more info, if the email:
9 | * is offering outbound sales services, e.g. getting me meetings or clients.
10 | * incorrectly assumes we offer "data validation services".
11 | * appears to be written by an LLM/AI model.
12 |
13 | If the email is part of a thread, and the sender has replied to your specific questions, let it through, otherwise
14 | query them again.
15 |
16 | You should always sign your emails as "Samuel's personal spiced ham judge.".
17 |
18 | You should include a link to this youtube video for context: https://youtu.be/anwy2MPT5RE?feature=shared
19 |
20 | Reply as markdown, remain polite, but be clear about what is required.
21 | """
22 |
23 | [[examples]]
24 | subject = "opportunity?"
25 | body = """
26 | Hi Samuel,
27 |
28 | We have the ability to track software companies showing clear buying signals for data validation -- particularly companies companies launching new data-centric applications or expanding data validation engineering teams.
29 |
30 | We can drive introductions and assist in closing deals by becoming an extension of your team to create and execute an outbound sales strategy.
31 |
32 | I'm happy to chat or send more info if relevant.
33 |
34 | All the best,
35 |
36 | Kevin
37 | Founder -- Jump Partners
38 | """
39 | response = "EmailReply"
40 |
41 | [[examples]]
42 | subject = "Re: Animated Explainer for Pydantic?"
43 | body = """
44 | Hi Samuel,
45 | Hope this email finds you well. Following up on my previous message about the animated explainer video for Pydantic. Let's discuss how an animated explainer video can redefine Pydantic's narrative. It could be an explainer video explaining your service or product.
46 |
47 | Check out our portfolio https://10.studio/portfolio/.
48 |
49 | Looking forward to hearing from you.
50 |
51 | Best,
52 | 10 Studio https://10.studio/
53 |
54 | On Sun, January 19, 2025 3:00 PM, Lunia Parse
55 | [james@your10studio.com]> wrote:
56 |
57 | > Hi Samuel,
58 | > Was looking at Pydantic and was really intrigued with how you simplify data validation effortlessly. Great job!
59 | >
60 | > An animated explainer video can effectively demonstrate the simplicity and efficiency of Pydantic's data validation process, attracting more users to the platform.
61 | >
62 | > Mind if I send over some example videos?
63 | >
64 | > Thanks,
65 | > James
66 | > 10 Studio
67 | """
68 | response = "EmailReply"
69 |
70 | [[examples]]
71 | subject = "Accelerate AI Adoption with Proven Governance Solutions"
72 | body = """
73 | Hi Samuel,
74 |
75 | Following up on AI implementation strategies - your pioneering work with Pydantic has revolutionized data validation, making you uniquely positioned to lead in AI governance.
76 |
77 | Organizations with structured AI governance frameworks see 40% better adoption rates, while maintaining the agility that software development teams need.
78 |
79 | Our template provides ready-to-implement protocols that help founders like you scale AI initiatives while ensuring proper controls - essential for Pydantic's continued innovation.
80 |
81 | Secure your competitive advantage with industry-leading governance standards.
82 |
83 | Download your AI Policy Template
84 | https://info.whitehat-seo.co.uk/ai-policy-template
85 |
86 |
87 | Clwyd Welsh
88 | CEO
89 | Whitehat Inbound Marketing
90 | """
91 | response = "EmailReply"
92 |
93 | [[examples]]
94 | subject = "Sponsoring Pydantic"
95 | body = """
96 | Hey Samuel,
97 |
98 | James, VP of DevRel at Sausage and a long-time fan of your OSS work,
99 |
100 | As a company that uses Pydantic extensively in its core, maintains two related library, and uses it as a core part of our OSS project,
101 | it was just about time to sponsor and appreciate your work, and we just started a monthly $200 sponsorship earlier today.
102 |
103 | As you requested, I'm contacting you here for instructions about what materials are needed from our side to feature us as sponsors. Please help with directions.
104 |
105 | Best,
106 | James
107 | """
108 | response = "EmailOk"
109 |
110 | [[examples]]
111 | subject = "Meet up in SF"
112 | body = """
113 | Hi there! I am a founder, using Pydantic AI for my backend. Would love to buy you coffee if there’s a
114 | chance while you are here in SF or just meet up if there’s a luma type thing.
115 |
116 | Thank you for your work!
117 |
118 | David
119 | """
120 | response = "EmailOk"
121 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/app/send_reply.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations as _annotations
2 | import os
3 | from html import escape
4 | from textwrap import indent
5 | from typing import TYPE_CHECKING
6 |
7 | from markdown import markdown
8 | from httpx import AsyncClient
9 | from aioaws.ses import SesConfig, SesClient, SesRecipient
10 | import logfire
11 |
12 | if TYPE_CHECKING:
13 | from .server import EmailInfo
14 | from .analyse import EmailReply
15 |
16 | ses_config = SesConfig(os.environ['AWS_ACCESS_KEY'], os.environ['AWS_SECRET_KEY'], 'us-east-1')
17 |
18 |
19 | @logfire.instrument
20 | async def send_reply(client: AsyncClient, email: EmailInfo, email_reply: EmailReply) -> None:
21 | plain_text = email_reply.text
22 |
23 | summary = f'On {email.timestamp:%d %b %Y at %H:%M}, {email.from_} wrote'
24 | if email.text:
25 | plain_text += f'\n\n{summary}:\n{indent(email.text, "> ")}'
26 |
27 | html = f'{markdown(email_reply.text)}
'
28 | if email.html:
29 | quote_body = email.html
30 | elif email.text:
31 | quote_body = escape(email.text).replace('\n', '
')
32 | else:
33 | quote_body = None
34 |
35 | if quote_body:
36 | quote_styles = 'margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex'
37 | html += (
38 | f'{escape(summary)}:
'
39 | f'
'
40 | f'{quote_body}
'
41 | )
42 |
43 | ses_client = SesClient(client, ses_config)
44 |
45 | if '@pydantic.io' in email.from_:
46 | logfire.warning('Not sending email to pydantic.io')
47 | return
48 |
49 | if email.subject.lower().startswith('re:'):
50 | subject = email.subject
51 | else:
52 | subject = f'Re: {email.subject}'
53 |
54 | message_id = await ses_client.send_email(
55 | e_from=SesRecipient('spiced-ham@pydantic.io', 'Samuel', 'Colvin (Spiced Ham)'),
56 | subject=subject,
57 | to=[email.from_],
58 | text_body=plain_text,
59 | html_body=html,
60 | configuration_set='spiced-ham',
61 | smtp_headers={
62 | 'In-Reply-To': email.message_id,
63 | 'References': f'{email.references} {email.message_id}' if email.references else email.message_id,
64 | },
65 | )
66 | logfire.info(f'email sent: {message_id=}')
67 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/app/server.py:
--------------------------------------------------------------------------------
1 | from contextlib import asynccontextmanager
2 | from dataclasses import dataclass
3 | from datetime import datetime
4 | from typing import AsyncIterator, Mapping, Any, Literal
5 |
6 | from pydantic import BaseModel
7 | from starlette.applications import Starlette
8 | from starlette.responses import Response
9 | from starlette.routing import Route
10 | from starlette.requests import Request
11 | from email import message_from_bytes
12 | from email.utils import parsedate_to_datetime
13 | from httpx import AsyncClient
14 |
15 | from .send_reply import send_reply
16 | from .analyse import analyse_email
17 |
18 | import logfire
19 |
20 | logfire.configure(scrubbing=False)
21 | logfire.info('running server')
22 |
23 |
24 | @dataclass
25 | class EmailInfo:
26 | from_: str
27 | subject: str
28 | to: str
29 | message_id: str
30 | references: str | None
31 | timestamp: datetime
32 | text: str | None
33 | html: str | None
34 |
35 |
36 | class AnalysisResponse(BaseModel):
37 | status: Literal['ok', 'reply', 'drop']
38 |
39 |
40 | @asynccontextmanager
41 | async def lifespan(_app: Starlette) -> AsyncIterator[Mapping[str, Any]]:
42 | async with AsyncClient() as client:
43 | logfire.instrument_httpx(client, capture_all=True)
44 | yield {'httpx_client': client}
45 |
46 |
47 | async def analyze_email(request: Request):
48 | body = await request.body()
49 | msg = message_from_bytes(body)
50 | logfire.info('{body=}', body=body.decode(errors='ignore'), smtp_headers=dict(msg.items()))
51 | text_body = None
52 | html_body = None
53 | for part in msg.walk():
54 | content_type = part.get_content_type()
55 |
56 | charset = part.get_content_charset() or 'utf-8'
57 | if content_type == 'text/plain':
58 | text_body = part.get_payload(decode=True).decode(charset)
59 | elif content_type == 'text/html':
60 | html_body = part.get_payload(decode=True).decode(charset)
61 |
62 | date = msg['Date']
63 | if date:
64 | timestamp = parsedate_to_datetime(str(date))
65 | else:
66 | timestamp = datetime.now()
67 |
68 | email = EmailInfo(
69 | from_=str(msg['from']),
70 | subject=str(msg['subject']),
71 | to=str(msg['to']),
72 | message_id=str(msg['Message-ID']),
73 | references=str(msg['References']) if 'References' in msg else None,
74 | timestamp=timestamp,
75 | text=text_body,
76 | html=html_body,
77 | )
78 |
79 | email_analysis = await analyse_email(email)
80 | logfire.info(f'{email_analysis=}')
81 | if email_analysis.status == 'reply':
82 | client: AsyncClient = request.state.httpx_client
83 | await send_reply(client, email, email_analysis)
84 |
85 | response = AnalysisResponse(status=email_analysis.status)
86 | return Response(response.model_dump_json(), headers={'content-type': 'application/json'})
87 |
88 |
89 | app = Starlette(routes=[Route('/', analyze_email, methods=['post'])], lifespan=lifespan)
90 | logfire.instrument_starlette(app, capture_headers=True)
91 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/cf-worker/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "spiced-ham",
3 | "version": "0.0.0",
4 | "private": true,
5 | "scripts": {
6 | "format": "prettier --write -- .",
7 | "lint": "prettier --check -- .",
8 | "typecheck": "tsc --noEmit",
9 | "dev": "wrangler dev"
10 | },
11 | "prettier": {
12 | "singleQuote": true,
13 | "semi": false,
14 | "trailingComma": "all",
15 | "tabWidth": 2,
16 | "printWidth": 119,
17 | "bracketSpacing": true,
18 | "plugins": [
19 | "prettier-plugin-toml"
20 | ]
21 | },
22 | "dependencies": {
23 | "@cloudflare/workers-types": "^4.20250204.0",
24 | "prettier": "^3.5.1",
25 | "prettier-plugin-toml": "^2.0.1",
26 | "typescript": "^5.7.3",
27 | "wrangler": "^3.109.0"
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/cf-worker/src/index.ts:
--------------------------------------------------------------------------------
1 | interface Env {}
2 |
3 | interface AnalysisResult {
4 | status: 'ok' | 'reply' | 'drop'
5 | }
6 |
7 | export default {
8 | async email(message, env, ctx) {
9 | const r = await fetch('https://samuelcolvin.eu.ngrok.io', {
10 | method: 'POST',
11 | body: message.raw,
12 | })
13 | const { status }: AnalysisResult = await r.json()
14 | if (status == 'ok') {
15 | console.log('forwarding email')
16 | await message.forward('samuel@pydantic.dev')
17 | }
18 | },
19 | } satisfies ExportedHandler
20 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/cf-worker/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | /* Visit https://aka.ms/tsconfig.json to read more about this file */
4 |
5 | /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
6 | "target": "es2021",
7 | /* Specify a set of bundled library declaration files that describe the target runtime environment. */
8 | "lib": ["es2021"],
9 | /* Specify what JSX code is generated. */
10 | "jsx": "react-jsx",
11 |
12 | /* Specify what module code is generated. */
13 | "module": "es2022",
14 | /* Specify how TypeScript looks up a file from a given module specifier. */
15 | "moduleResolution": "Bundler",
16 | /* Specify type package names to be included without being referenced in a source file. */
17 | "types": ["@cloudflare/workers-types/2023-07-01"],
18 | /* Enable importing .json files */
19 | "resolveJsonModule": true,
20 |
21 | /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */
22 | "allowJs": true,
23 | /* Enable error reporting in type-checked JavaScript files. */
24 | "checkJs": false,
25 |
26 | /* Disable emitting files from a compilation. */
27 | "noEmit": true,
28 |
29 | /* Ensure that each file can be safely transpiled without relying on other imports. */
30 | "isolatedModules": true,
31 | /* Allow 'import x from y' when a module doesn't have a default export. */
32 | "allowSyntheticDefaultImports": true,
33 | /* Ensure that casing is correct in imports. */
34 | "forceConsistentCasingInFileNames": true,
35 |
36 | /* Enable all strict type-checking options. */
37 | "strict": true,
38 |
39 | /* Skip type checking all .d.ts files. */
40 | "skipLibCheck": true
41 | },
42 | "exclude": ["test"],
43 | "include": ["worker-configuration.d.ts", "src/**/*.ts"]
44 | }
45 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/cf-worker/wrangler.toml:
--------------------------------------------------------------------------------
1 | #:schema node_modules/wrangler/config-schema.json
2 | name = "spiced-ham"
3 | compatibility_date = "2025-02-14"
4 | main = "src/index.ts"
5 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "spiced-ham"
3 | version = "0.1.0"
4 | description = "Add your description here"
5 | readme = "README.md"
6 | requires-python = ">=3.13"
7 | dependencies = [
8 | "aioaws>=0.15.1",
9 | "logfire[httpx,starlette]>=3.6.1",
10 | "markdown>=3.7",
11 | "pydantic>=2.10.6",
12 | "pyright>=1.1.394",
13 | "ruff>=0.9.7",
14 | "starlette>=0.45.3",
15 | "uvicorn>=0.34.0",
16 | "watchfiles>=1.0.4",
17 | ]
18 |
19 | [tool.ruff]
20 | line-length = 120
21 | target-version = "py313"
22 | include = ["app/**/*.py"]
23 |
24 | [tool.ruff.lint]
25 | extend-select = [
26 | "Q",
27 | "RUF100",
28 | "C90",
29 | "UP",
30 | "I",
31 | "D",
32 | ]
33 | flake8-quotes = { inline-quotes = "single", multiline-quotes = "double" }
34 | isort = { combine-as-imports = true }
35 | mccabe = { max-complexity = 15 }
36 | ignore = [
37 | "D100", # ignore missing docstring in module
38 | "D102", # ignore missing docstring in public method
39 | "D104", # ignore missing docstring in public package
40 | "D105", # ignore missing docstring in magic methods
41 | "D107", # ignore missing docstring in __init__ methods
42 | ]
43 |
44 | [tool.ruff.lint.pydocstyle]
45 | convention = "google"
46 |
47 | [tool.ruff.format]
48 | # don't format python in docstrings, pytest-examples takes care of it
49 | docstring-code-format = false
50 | quote-style = "single"
51 |
--------------------------------------------------------------------------------
/2025-02-ai-engineer-pydantic-ai/raw_send.py:
--------------------------------------------------------------------------------
1 | import smtplib
2 | from email.message import EmailMessage
3 | from email.utils import make_msgid
4 | import sys
5 |
6 | if len(sys.argv) != 3:
7 | print('Usage: python raw_send.py ')
8 | sys.exit(1)
9 |
10 | msg = EmailMessage()
11 | msg['Subject'] = sys.argv[1]
12 | msg['From'] = 'mail-server-test@helpmanual.io'
13 | msg['To'] = 'spiced-ham@pydantic.io'
14 | msg['Message-ID'] = make_msgid()
15 | msg.set_content(sys.argv[2])
16 |
17 | with smtplib.SMTP('route1.mx.cloudflare.net', 0) as server:
18 | server.starttls() # Secure the connection
19 | server.send_message(msg) # Send the email
20 | print('Email sent successfully.')
21 |
--------------------------------------------------------------------------------
/2025-04-cli-demo/cli.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import random
3 | from time import sleep
4 | from urllib.parse import quote_plus
5 |
6 | import logfire
7 | from opentelemetry.trace import format_trace_id
8 |
9 | logfire.configure(
10 | # Maybe token info here?
11 | # If it's an internal service you might as well hardcode the token in the code you distribute to your team
12 | console=False,
13 | service_name='cli',
14 | service_version='0.1.0',
15 | )
16 |
17 |
18 | def fibonacci(n: int) -> int:
19 | """Compute the Fibonacci number recursively."""
20 | with logfire.span('fibonacci({n})', n=n):
21 | if random.uniform(0, 1) < 0.25:
22 | sleep(1)
23 | if n <= 0:
24 | return 0
25 | elif n == 1:
26 | return 1
27 | else:
28 | return fibonacci(n - 1) + fibonacci(n - 2)
29 |
30 |
31 | @logfire.instrument
32 | def divide(numerator: int, denominator: int) -> float:
33 | """Divide two numbers."""
34 | return numerator / denominator
35 |
36 |
37 | def main():
38 | # Create the top-level parser
39 | parser = argparse.ArgumentParser(description='Demo CLI with multiple commands')
40 | subparsers = parser.add_subparsers(dest='command', help='Commands')
41 | subparsers.required = True
42 |
43 | # Create parser for the "fib" command
44 | fib_parser = subparsers.add_parser('fib', help='Compute Fibonacci number')
45 | fib_parser.add_argument('n', type=int, help='Which Fibonacci number to compute')
46 |
47 | # Create parser for the "divide" command
48 | divide_parser = subparsers.add_parser('divide', help='Divide two numbers')
49 | divide_parser.add_argument('numerator', type=int, help='Numerator')
50 | divide_parser.add_argument('denominator', type=int, help='Denominator')
51 |
52 | # Parse arguments
53 | args = parser.parse_args()
54 |
55 | # Process commands
56 | with logfire.span('main', command=args.command) as span:
57 | try:
58 | if args.command == 'fib':
59 | result = fibonacci(args.n)
60 | print(f'The {args.n}th Fibonacci number is: {result}')
61 | elif args.command == 'divide':
62 | result = divide(args.numerator, args.denominator)
63 | print(f'The result of dividing {args.numerator} by {args.denominator} is: {result}')
64 | except Exception as e:
65 | if span.context is not None:
66 | trace_id = format_trace_id(span.context.trace_id)
67 | query = f"trace_id='{trace_id}'"
68 | url = f'https://logfire-eu.pydantic.info/adriangb/starter-project?q={quote_plus(query)}'
69 | print(f'Error occurred during command "{args.command}".\n\n{e}\n\nTrace: {url}')
70 |
71 |
72 | if __name__ == '__main__':
73 | main()
74 |
--------------------------------------------------------------------------------
/2025-04-cli-demo/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "2025-04-cli-demo"
3 | version = "0"
4 | description = "A demo of using Logfire with an internal CLI tool"
5 | requires-python = ">=3.13"
6 | dependencies = [
7 | "opentelemetry-api>=1.20.0",
8 | "logfire>=3.14.0",
9 | ]
10 |
11 | [tool.ruff]
12 | line-length = 120
13 | target-version = "py313"
14 |
15 | [tool.ruff.lint]
16 | extend-select = [
17 | "Q",
18 | "RUF100",
19 | "C90",
20 | "UP",
21 | "I",
22 | ]
23 | flake8-quotes = { inline-quotes = "single", multiline-quotes = "double" }
24 | isort = { combine-as-imports = true }
25 | mccabe = { max-complexity = 15 }
26 |
27 | [tool.ruff.lint.pydocstyle]
28 | convention = "google"
29 |
30 | [tool.ruff.format]
31 | # don't format python in docstrings, pytest-examples takes care of it
32 | docstring-code-format = false
33 | quote-style = "single"
34 |
35 | [tool.pyright]
36 | pythonVersion = "3.13"
37 | typeCheckingMode = "strict"
38 | reportUnnecessaryTypeIgnoreComment = true
39 | include = [
40 | "**/*.py",
41 | ]
42 | venvPath = ".venv"
43 |
44 | [dependency-groups]
45 | dev = [
46 | "ruff>=0.11.6",
47 | ]
48 |
--------------------------------------------------------------------------------
/2025-04-data-council/.python-version:
--------------------------------------------------------------------------------
1 | 3.13
2 |
--------------------------------------------------------------------------------
/2025-04-data-council/agent-loop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2025-04-data-council/agent-loop.png
--------------------------------------------------------------------------------
/2025-04-data-council/browser_mcp.py:
--------------------------------------------------------------------------------
1 | import logfire
2 | from pydantic_ai import Agent
3 | from pydantic_ai.mcp import MCPServerStdio
4 |
5 | logfire.configure(scrubbing=False, service_name='browse')
6 | logfire.instrument_mcp()
7 | logfire.instrument_pydantic_ai()
8 |
9 | browser_mcp = MCPServerStdio('npx', args=['-Y', '@playwright/mcp@latest'])
10 |
11 | agent = Agent(
12 | 'anthropic:claude-3-7-sonnet-latest',
13 | mcp_servers=[browser_mcp],
14 | )
15 |
16 |
17 | async def main():
18 | async with agent.run_mcp_servers():
19 | result = await agent.run(
20 | 'get the most recent blog post from pydantic.dev '
21 | 'which should contain multiple announcements, '
22 | 'summaries those annoucements as a list.'
23 | )
24 | print(result.output)
25 |
26 |
27 | if __name__ == '__main__':
28 | import asyncio
29 |
30 | asyncio.run(main())
31 |
--------------------------------------------------------------------------------
/2025-04-data-council/browser_mcp_graph.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations as _
2 |
3 | from dataclasses import dataclass
4 |
5 | import logfire
6 | from pydantic_ai import Agent
7 | from pydantic_ai.mcp import MCPServerStdio
8 | from pydantic_graph import BaseNode, End, Graph, GraphRunContext
9 |
10 | logfire.configure(scrubbing=False, service_name='browse-graph')
11 | logfire.instrument_mcp()
12 | logfire.instrument_pydantic_ai()
13 |
14 | server = MCPServerStdio(
15 | 'npx',
16 | args=[
17 | '-Y',
18 | '@playwright/mcp@latest',
19 | ],
20 | )
21 |
22 | browser_agent = Agent(
23 | 'anthropic:claude-3-7-sonnet-latest',
24 | mcp_servers=[server],
25 | system_prompt='Find the page requested by the user and return the URL only. Nothing else.',
26 | )
27 |
28 |
29 | @dataclass
30 | class FindBlog(BaseNode):
31 | url: str
32 |
33 | async def run(self, ctx: GraphRunContext) -> FindLatestPosts:
34 | result = await browser_agent.run(f'Find the page with a list of blog posts at {self.url}.')
35 | return FindLatestPosts(result.output)
36 |
37 |
38 | @dataclass
39 | class FindLatestPosts(BaseNode):
40 | url: str
41 |
42 | async def run(self, ctx: GraphRunContext) -> SummariesContent:
43 | result = await browser_agent.run(f'Find the latest blog post at {self.url}')
44 | return SummariesContent(result.output)
45 |
46 |
47 | summary_agent = Agent(
48 | 'anthropic:claude-3-7-sonnet-latest', system_prompt='Summarise the content of the blog post page as markdown'
49 | )
50 |
51 |
52 | @dataclass
53 | class SummariesContent(BaseNode[None, None, str]):
54 | content: str
55 |
56 | async def run(self, ctx: GraphRunContext) -> End[str]:
57 | result = await summary_agent.run(self.content)
58 | return End(result.output)
59 |
60 |
61 | graph = Graph(nodes=[FindBlog, FindLatestPosts, SummariesContent])
62 |
63 |
64 | async def main():
65 | async with browser_agent.run_mcp_servers():
66 | result = await graph.run(FindBlog(url='pydantic.dev'))
67 | print(result.output)
68 |
69 |
70 | if __name__ == '__main__':
71 | with open('browser.mermaid', 'w') as f:
72 | f.write(graph.mermaid_code())
73 | # import asyncio
74 | # asyncio.run(main())
75 |
--------------------------------------------------------------------------------
/2025-04-data-council/evals/01_generate_dataset.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from pathlib import Path
3 | from types import NoneType
4 |
5 | from pydantic_evals import Dataset
6 | from pydantic_evals.generation import generate_dataset
7 |
8 | from agent import TimeRangeInputs, TimeRangeResponse
9 |
10 | import logfire
11 |
12 | logfire.configure()
13 | logfire.instrument_pydantic_ai()
14 |
15 |
16 | async def main():
17 | dataset = await generate_dataset(
18 | dataset_type=Dataset[TimeRangeInputs, TimeRangeResponse, NoneType],
19 | model='openai:o1', # Use a smarter model since this is a more complex task that is only run once
20 | n_examples=10,
21 | extra_instructions="""
22 | Generate a dataset of test cases for the time range inference agent.
23 |
24 | Include a variety of inputs that might be given to the agent, including some where the only
25 | reasonable response is a `TimeRangeBuilderError`, and some where a `TimeRangeBuilderSuccess` is
26 | expected. Make use of the `IsInstance` evaluator to ensure that the inputs and outputs are of the appropriate
27 | type.
28 |
29 | When appropriate, use the `LLMJudge` evaluator to provide a more precise description of the time range the
30 | agent should have inferred. In particular, it's good if the example user inputs are somewhat ambiguous, to
31 | reflect realistic (difficult-to-handle) user questions, but the LLMJudge evaluator can help ensure that the
32 | agent's output is still judged based on precisely what the desired behavior is even for somewhat ambiguous
33 | user questions. You do not need to include LLMJudge evaluations for all cases (in particular, for cases where
34 | the expected output is unambiguous from the user's question), but you should include at least one or two
35 | examples that do benefit from an LLMJudge evaluation (and include it).
36 |
37 | To be clear, the LLMJudge rubrics should be concise and reflect only information that is NOT ALREADY PRESENT
38 | in the user prompt for the example.
39 |
40 | Leave the model and include_input arguments to LLMJudge as their default values (null).
41 |
42 | Also add a dataset-wide LLMJudge evaluator to ensure that the 'explanation' or 'error_message' fields are
43 | appropriate to be displayed to the user (e.g., written in second person, etc.).
44 | """,
45 | )
46 |
47 | dataset.to_file(
48 | Path(__file__).parent / 'datasets' / 'time_range_v1.yaml',
49 | fmt='yaml',
50 | )
51 |
52 |
53 | if __name__ == '__main__':
54 | asyncio.run(main())
55 |
--------------------------------------------------------------------------------
/2025-04-data-council/evals/02_add_custom_evaluators.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from types import NoneType
3 |
4 | from pydantic_evals import Dataset
5 |
6 | from custom_evaluators import CUSTOM_EVALUATOR_TYPES, AgentCalledTool, UserMessageIsConcise, ValidateTimeRange
7 | from agent import TimeRangeInputs, TimeRangeResponse
8 |
9 |
10 | def main():
11 | dataset_path = Path(__file__).parent / 'datasets' / 'time_range_v1.yaml'
12 | dataset = Dataset[TimeRangeInputs, TimeRangeResponse, NoneType].from_file(dataset_path)
13 | dataset.add_evaluator(ValidateTimeRange())
14 | dataset.add_evaluator(UserMessageIsConcise())
15 | dataset.add_evaluator(
16 | AgentCalledTool('time_range_agent', 'get_current_time'),
17 | specific_case='Single time point',
18 | )
19 | dataset.to_file(
20 | Path(__file__).parent / 'datasets' / 'time_range_v2.yaml',
21 | custom_evaluator_types=CUSTOM_EVALUATOR_TYPES,
22 | )
23 |
24 |
25 | if __name__ == '__main__':
26 | main()
27 |
--------------------------------------------------------------------------------
/2025-04-data-council/evals/03_unit_testing.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from types import NoneType
3 |
4 | import logfire
5 | from pydantic_evals import Dataset
6 |
7 | from agent import infer_time_range, TimeRangeInputs, TimeRangeResponse
8 | from custom_evaluators import CUSTOM_EVALUATOR_TYPES
9 |
10 | logfire.configure(environment='development', service_name='evals', service_version='0.0.1')
11 | logfire.instrument_pydantic_ai()
12 |
13 | dataset_path = Path(__file__).parent / 'datasets' / 'time_range_v2.yaml'
14 | dataset = Dataset[TimeRangeInputs, TimeRangeResponse, NoneType].from_file(
15 | dataset_path, custom_evaluator_types=CUSTOM_EVALUATOR_TYPES
16 | )
17 | report = dataset.evaluate_sync(infer_time_range)
18 | print(report)
19 |
20 | assertion_pass_rate = report.averages().assertions
21 | assert assertion_pass_rate is not None, 'There should be at least one assertion'
22 | assert assertion_pass_rate > 0.9, f'The assertion pass rate was {assertion_pass_rate:.1%}; it should be above 90%.'
23 |
--------------------------------------------------------------------------------
/2025-04-data-council/evals/04_compare_models.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from types import NoneType
3 |
4 | import logfire
5 | from pydantic_evals import Dataset
6 |
7 | from custom_evaluators import CUSTOM_EVALUATOR_TYPES
8 | from agent import time_range_agent, infer_time_range, TimeRangeInputs, TimeRangeResponse
9 |
10 | logfire.configure(environment='development', service_name='evals')
11 | logfire.instrument_pydantic_ai()
12 |
13 | dataset_path = Path(__file__).parent / 'datasets' / 'time_range_v2.yaml'
14 | dataset = Dataset[TimeRangeInputs, TimeRangeResponse, NoneType].from_file(
15 | dataset_path, custom_evaluator_types=CUSTOM_EVALUATOR_TYPES
16 | )
17 | with logfire.span('Comparing different models for time_range_agent'):
18 | with time_range_agent.override(model='openai:gpt-4o'):
19 | dataset.evaluate_sync(infer_time_range, name='openai:gpt-4o')
20 | with time_range_agent.override(model='anthropic:claude-3-7-sonnet-latest'):
21 | dataset.evaluate_sync(infer_time_range, name='anthropic:claude-3-7-sonnet-latest')
22 |
--------------------------------------------------------------------------------
/2025-04-data-council/evals/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2025-04-data-council/evals/__init__.py
--------------------------------------------------------------------------------
/2025-04-data-council/evals/agent.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations as _annotations
2 |
3 | from dataclasses import dataclass, field
4 | from datetime import datetime
5 |
6 | from pydantic import AwareDatetime, BaseModel
7 | from pydantic_ai import Agent, RunContext
8 | from typing_extensions import TypedDict
9 |
10 |
11 | class TimeRangeBuilderSuccess(BaseModel, use_attribute_docstrings=True):
12 | """Response when a time range could be successfully generated."""
13 |
14 | min_timestamp_with_offset: AwareDatetime
15 | """A datetime in ISO format with timezone offset."""
16 | max_timestamp_with_offset: AwareDatetime
17 | """A datetime in ISO format with timezone offset."""
18 | explanation: str | None
19 | """
20 | A brief explanation of the time range that was selected.
21 |
22 | For example, if a user only mentions a specific point in time, you might explain that you selected a 10 minute
23 | window around that time.
24 | """
25 |
26 | def __str__(self):
27 | lines = [
28 | 'TimeRangeBuilderSuccess:',
29 | f'* min_timestamp_with_offset: {self.min_timestamp_with_offset:%A, %B %d, %Y %H:%M:%S %Z}',
30 | f'* max_timestamp_with_offset: {self.max_timestamp_with_offset:%A, %B %d, %Y %H:%M:%S %Z}',
31 | ]
32 | if self.explanation is not None:
33 | lines.append(f'* explanation: {self.explanation}')
34 | return '\n'.join(lines)
35 |
36 |
37 | class TimeRangeBuilderError(BaseModel):
38 | """Response when a time range cannot not be generated."""
39 |
40 | error_message: str
41 |
42 | def __str__(self):
43 | return f'TimeRangeBuilderError:\n* {self.error_message}'
44 |
45 |
46 | TimeRangeResponse = TimeRangeBuilderSuccess | TimeRangeBuilderError
47 |
48 |
49 | class TimeRangeInputs(TypedDict):
50 | """The inputs for the time range inference agent."""
51 |
52 | prompt: str
53 | now: AwareDatetime
54 |
55 |
56 | @dataclass
57 | class TimeRangeDeps:
58 | now: datetime = field(default_factory=lambda: datetime.now().astimezone())
59 |
60 |
61 | time_range_agent = Agent[TimeRangeDeps, TimeRangeResponse](
62 | 'openai:gpt-4o',
63 | output_type=TimeRangeResponse, # type: ignore # we can't yet annotate something as receiving a TypeForm
64 | deps_type=TimeRangeDeps,
65 | system_prompt='Convert the user request into a structured time range.',
66 | retries=1,
67 | instrument=True,
68 | )
69 |
70 |
71 | @time_range_agent.tool
72 | def get_current_time(ctx: RunContext[TimeRangeDeps]) -> str:
73 | """Get the user's current time and timezone in the format 'Friday, November 22, 2024 11:15:14 PST'."""
74 | return f"The user's current time is {ctx.deps.now:%A, %B %d, %Y %H:%M:%S %Z}."
75 |
76 |
77 | async def infer_time_range(inputs: TimeRangeInputs) -> TimeRangeResponse:
78 | """Infer a time range from a user prompt."""
79 | deps = TimeRangeDeps(now=inputs['now'])
80 | return (await time_range_agent.run(inputs['prompt'], deps=deps)).output
81 |
--------------------------------------------------------------------------------
/2025-04-data-council/evals/custom_evaluators.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from datetime import timedelta
3 |
4 | from pydantic_evals.evaluators import Evaluator, EvaluatorContext, EvaluatorOutput
5 | from pydantic_evals.otel import SpanQuery
6 |
7 | from agent import TimeRangeBuilderSuccess, TimeRangeInputs, TimeRangeResponse
8 |
9 |
10 | @dataclass
11 | class ValidateTimeRange(Evaluator[TimeRangeInputs, TimeRangeResponse]):
12 | def evaluate(self, ctx: EvaluatorContext[TimeRangeInputs, TimeRangeResponse]) -> EvaluatorOutput:
13 | if isinstance(ctx.output, TimeRangeBuilderSuccess):
14 | window_end = ctx.output.max_timestamp_with_offset
15 | window_size = window_end - ctx.output.min_timestamp_with_offset
16 | return {
17 | 'window_is_not_too_long': window_size <= timedelta(days=30),
18 | 'window_is_not_in_the_future': window_end <= ctx.inputs['now'],
19 | }
20 |
21 | return {} # No evaluation needed for errors
22 |
23 |
24 | @dataclass
25 | class UserMessageIsConcise(Evaluator[TimeRangeInputs, TimeRangeResponse]):
26 | async def evaluate(self, ctx: EvaluatorContext[TimeRangeInputs, TimeRangeResponse]) -> EvaluatorOutput:
27 | if isinstance(ctx.output, TimeRangeBuilderSuccess):
28 | user_facing_message = ctx.output.explanation
29 | else:
30 | user_facing_message = ctx.output.error_message
31 |
32 | if user_facing_message is not None:
33 | return len(user_facing_message.split()) < 50
34 | else:
35 | return {}
36 |
37 |
38 | @dataclass
39 | class AgentCalledTool(Evaluator):
40 | agent_name: str
41 | tool_name: str
42 |
43 | def evaluate(self, ctx: EvaluatorContext) -> bool:
44 | return ctx.span_tree.any(
45 | SpanQuery(
46 | name_equals='agent run',
47 | has_attributes={'agent_name': self.agent_name},
48 | stop_recursing_when=SpanQuery(name_equals='agent run'),
49 | some_descendant_has=SpanQuery(
50 | name_equals='running tool',
51 | has_attributes={'gen_ai.tool.name': self.tool_name},
52 | ),
53 | )
54 | )
55 |
56 |
57 | CUSTOM_EVALUATOR_TYPES = ValidateTimeRange, UserMessageIsConcise, AgentCalledTool
58 |
--------------------------------------------------------------------------------
/2025-04-data-council/evals/datasets/time_range_v1.yaml:
--------------------------------------------------------------------------------
1 | # yaml-language-server: $schema=time_range_v1_schema.json
2 | cases:
3 | - name: Single time point
4 | inputs:
5 | prompt: I'd like logs from 2 PM on 2024-05-20.
6 | now: '2024-05-20T14:15:00Z'
7 | expected_output:
8 | min_timestamp_with_offset: '2024-05-20T14:00:00Z'
9 | max_timestamp_with_offset: '2024-05-20T14:30:00Z'
10 | explanation: You provided a single time (2 PM), so we selected a short window
11 | around that time.
12 | evaluators:
13 | - IsInstance: TimeRangeBuilderSuccess
14 | - name: Ambiguous request for tomorrow
15 | inputs:
16 | prompt: Show me what's going on tomorrow.
17 | now: '2024-05-19T09:00:00Z'
18 | expected_output:
19 | min_timestamp_with_offset: '2024-05-20T00:00:00Z'
20 | max_timestamp_with_offset: '2024-05-20T23:59:59Z'
21 | explanation: We interpreted 'tomorrow' as the entire next day in UTC.
22 | evaluators:
23 | - IsInstance: TimeRangeBuilderSuccess
24 | - LLMJudge: We want the entire next day in UTC to cover all events tomorrow.
25 | - name: Future logs not available
26 | inputs:
27 | prompt: I'd like logs from next year.
28 | now: '2025-01-01T00:00:00Z'
29 | expected_output:
30 | error_message: We cannot generate a time range in the future based on the provided
31 | 'now'.
32 | evaluators:
33 | - IsInstance: TimeRangeBuilderError
34 | - name: No time reference at all
35 | inputs:
36 | prompt: Give me the logs.
37 | now: '2024-01-01T00:00:00Z'
38 | expected_output:
39 | error_message: No time references were detected in your request.
40 | evaluators:
41 | - IsInstance: TimeRangeBuilderError
42 | - name: Exact small range
43 | inputs:
44 | prompt: Could I see logs from 3:00 PM to 3:45 PM on Feb 10, 2024?
45 | now: '2024-02-10T15:30:00Z'
46 | expected_output:
47 | min_timestamp_with_offset: '2024-02-10T15:00:00Z'
48 | max_timestamp_with_offset: '2024-02-10T15:45:00Z'
49 | explanation: You specifically requested logs between 3:00 PM and 3:45 PM.
50 | evaluators:
51 | - IsInstance: TimeRangeBuilderSuccess
52 | - name: All-day request
53 | inputs:
54 | prompt: I need logs from October 25th, 2024.
55 | now: '2024-10-24T10:00:00Z'
56 | expected_output:
57 | min_timestamp_with_offset: '2024-10-25T00:00:00Z'
58 | max_timestamp_with_offset: '2024-10-25T23:59:59Z'
59 | explanation: We interpreted the request for October 25, 2024 as the entire day
60 | in UTC.
61 | evaluators:
62 | - IsInstance: TimeRangeBuilderSuccess
63 | - name: Unrecognized date format
64 | inputs:
65 | prompt: Get logs from 13/13/2024.
66 | now: '2024-10-24T10:00:00Z'
67 | expected_output:
68 | error_message: We could not recognize a valid date from your request.
69 | evaluators:
70 | - IsInstance: TimeRangeBuilderError
71 | - name: Ambiguous reference to next weekend
72 | inputs:
73 | prompt: I want logs from next weekend.
74 | now: '2024-05-16T12:00:00Z'
75 | expected_output:
76 | min_timestamp_with_offset: '2024-05-18T00:00:00Z'
77 | max_timestamp_with_offset: '2024-05-19T23:59:59Z'
78 | explanation: We interpreted 'next weekend' as Saturday and Sunday following your
79 | current date.
80 | evaluators:
81 | - IsInstance: TimeRangeBuilderSuccess
82 | - LLMJudge: We assume the user wants the entire upcoming Saturday and Sunday in
83 | UTC.
84 | - name: Last night logs
85 | inputs:
86 | prompt: Show me the logs from last night.
87 | now: '2024-08-01T09:00:00Z'
88 | expected_output:
89 | min_timestamp_with_offset: '2024-07-31T20:00:00Z'
90 | max_timestamp_with_offset: '2024-08-01T06:00:00Z'
91 | explanation: We interpreted 'last night' as 8 PM to 6 AM prior to your current
92 | morning time.
93 | evaluators:
94 | - IsInstance: TimeRangeBuilderSuccess
95 | - name: Cross-year boundary
96 | inputs:
97 | prompt: Show me logs from 2024-12-31 23:59 UTC to 2025-01-01 00:15 UTC.
98 | now: '2025-01-01T12:00:00Z'
99 | expected_output:
100 | min_timestamp_with_offset: '2024-12-31T23:59:00Z'
101 | max_timestamp_with_offset: '2025-01-01T00:15:00Z'
102 | explanation: We selected the precise range you requested, crossing into the new
103 | year.
104 | evaluators:
105 | - IsInstance: TimeRangeBuilderSuccess
106 | evaluators:
107 | - LLMJudge: Ensure explanation or error_message is in second person. Provide helpful
108 | but concise feedback. Must not conflict with user question. Must not be insulting.
109 | The user is the primary audience.
110 |
--------------------------------------------------------------------------------
/2025-04-data-council/evals/datasets/time_range_v2.yaml:
--------------------------------------------------------------------------------
1 | # yaml-language-server: $schema=time_range_v2_schema.json
2 | cases:
3 | - name: Single time point
4 | inputs:
5 | prompt: I'd like logs from 2 PM on 2024-05-20.
6 | now: '2024-05-20T14:15:00Z'
7 | expected_output:
8 | min_timestamp_with_offset: '2024-05-20T14:00:00Z'
9 | max_timestamp_with_offset: '2024-05-20T14:30:00Z'
10 | explanation: You provided a single time (2 PM), so we selected a short window
11 | around that time.
12 | evaluators:
13 | - IsInstance: TimeRangeBuilderSuccess
14 | - AgentCalledTool:
15 | agent_name: time_range_agent
16 | tool_name: get_current_time
17 | - name: Ambiguous request for tomorrow
18 | inputs:
19 | prompt: Show me what's going on tomorrow.
20 | now: '2024-05-19T09:00:00Z'
21 | expected_output:
22 | min_timestamp_with_offset: '2024-05-20T00:00:00Z'
23 | max_timestamp_with_offset: '2024-05-20T23:59:59Z'
24 | explanation: We interpreted 'tomorrow' as the entire next day in UTC.
25 | evaluators:
26 | - IsInstance: TimeRangeBuilderSuccess
27 | - LLMJudge: We want the entire next day in UTC to cover all events tomorrow.
28 | - name: Future logs not available
29 | inputs:
30 | prompt: I'd like logs from next year.
31 | now: '2025-01-01T00:00:00Z'
32 | expected_output:
33 | error_message: We cannot generate a time range in the future based on the provided
34 | 'now'.
35 | evaluators:
36 | - IsInstance: TimeRangeBuilderError
37 | - name: No time reference at all
38 | inputs:
39 | prompt: Give me the logs.
40 | now: '2024-01-01T00:00:00Z'
41 | expected_output:
42 | error_message: No time references were detected in your request.
43 | evaluators:
44 | - IsInstance: TimeRangeBuilderError
45 | - name: Exact small range
46 | inputs:
47 | prompt: Could I see logs from 3:00 PM to 3:45 PM on Feb 10, 2024?
48 | now: '2024-02-10T15:30:00Z'
49 | expected_output:
50 | min_timestamp_with_offset: '2024-02-10T15:00:00Z'
51 | max_timestamp_with_offset: '2024-02-10T15:45:00Z'
52 | explanation: You specifically requested logs between 3:00 PM and 3:45 PM.
53 | evaluators:
54 | - IsInstance: TimeRangeBuilderSuccess
55 | - name: All-day request
56 | inputs:
57 | prompt: I need logs from October 25th, 2024.
58 | now: '2024-10-24T10:00:00Z'
59 | expected_output:
60 | min_timestamp_with_offset: '2024-10-25T00:00:00Z'
61 | max_timestamp_with_offset: '2024-10-25T23:59:59Z'
62 | explanation: We interpreted the request for October 25, 2024 as the entire day
63 | in UTC.
64 | evaluators:
65 | - IsInstance: TimeRangeBuilderSuccess
66 | - name: Unrecognized date format
67 | inputs:
68 | prompt: Get logs from 13/13/2024.
69 | now: '2024-10-24T10:00:00Z'
70 | expected_output:
71 | error_message: We could not recognize a valid date from your request.
72 | evaluators:
73 | - IsInstance: TimeRangeBuilderError
74 | - name: Ambiguous reference to next weekend
75 | inputs:
76 | prompt: I want logs from next weekend.
77 | now: '2024-05-16T12:00:00Z'
78 | expected_output:
79 | min_timestamp_with_offset: '2024-05-18T00:00:00Z'
80 | max_timestamp_with_offset: '2024-05-19T23:59:59Z'
81 | explanation: We interpreted 'next weekend' as Saturday and Sunday following your
82 | current date.
83 | evaluators:
84 | - IsInstance: TimeRangeBuilderSuccess
85 | - LLMJudge: We assume the user wants the entire upcoming Saturday and Sunday in
86 | UTC.
87 | - name: Last night logs
88 | inputs:
89 | prompt: Show me the logs from last night.
90 | now: '2024-08-01T09:00:00Z'
91 | expected_output:
92 | min_timestamp_with_offset: '2024-07-31T20:00:00Z'
93 | max_timestamp_with_offset: '2024-08-01T06:00:00Z'
94 | explanation: We interpreted 'last night' as 8 PM to 6 AM prior to your current
95 | morning time.
96 | evaluators:
97 | - IsInstance: TimeRangeBuilderSuccess
98 | - name: Cross-year boundary
99 | inputs:
100 | prompt: Show me logs from 2024-12-31 23:59 UTC to 2025-01-01 00:15 UTC.
101 | now: '2025-01-01T12:00:00Z'
102 | expected_output:
103 | min_timestamp_with_offset: '2024-12-31T23:59:00Z'
104 | max_timestamp_with_offset: '2025-01-01T00:15:00Z'
105 | explanation: We selected the precise range you requested, crossing into the new
106 | year.
107 | evaluators:
108 | - IsInstance: TimeRangeBuilderSuccess
109 | evaluators:
110 | - LLMJudge: Ensure explanation or error_message is in second person. Provide helpful
111 | but concise feedback. Must not conflict with user question. Must not be insulting.
112 | The user is the primary audience.
113 | - ValidateTimeRange
114 | - UserMessageIsConcise
115 |
--------------------------------------------------------------------------------
/2025-04-data-council/memory_messages.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | from collections.abc import AsyncIterator
5 | from contextlib import asynccontextmanager
6 | from dataclasses import dataclass
7 | from typing import TYPE_CHECKING
8 |
9 | import asyncpg
10 | from pydantic_ai import Agent
11 | from pydantic_ai.messages import ModelMessage, ModelMessagesTypeAdapter
12 |
13 | # hack to get around asyncpg's poor typing support
14 | if TYPE_CHECKING:
15 | DbConn = asyncpg.Connection[asyncpg.Record]
16 | else:
17 | DbConn = asyncpg.Connection
18 |
19 |
20 | import logfire
21 |
22 | logfire.configure(service_name='mem-msgs')
23 | logfire.instrument_pydantic_ai()
24 | logfire.instrument_asyncpg()
25 |
26 |
27 | @asynccontextmanager
28 | async def db() -> AsyncIterator[DbConn]:
29 | conn = await asyncpg.connect('postgresql://postgres@localhost:5432')
30 | await conn.execute("""
31 | create table if not exists messages(
32 | id serial primary key,
33 | ts timestamp not null default now(),
34 | user_id integer not null,
35 | messages json not null
36 | )
37 | """)
38 |
39 | try:
40 | yield conn
41 | finally:
42 | await conn.close()
43 |
44 |
45 | agent = Agent(
46 | 'openai:gpt-4o',
47 | instructions='You are a helpful assistant.',
48 | )
49 |
50 |
51 | @logfire.instrument
52 | async def run_agent(prompt: str, user_id: int):
53 | async with db() as conn:
54 | with logfire.span('retrieve messages'):
55 | messages: list[ModelMessage] = []
56 | for row in await conn.fetch('SELECT messages FROM messages WHERE user_id = $1 order by ts', user_id):
57 | messages += ModelMessagesTypeAdapter.validate_json(row[0])
58 |
59 | async with agent.run_stream(prompt, message_history=messages) as stream:
60 | async for message in stream.stream_text(delta=True):
61 | print(message, end='', flush=True)
62 |
63 | with logfire.span('record messages'):
64 | msgs = result.new_messages_json().decode()
65 | await conn.execute('INSERT INTO messages(user_id, messages) VALUES($1, $2)', user_id, msgs)
66 |
67 |
68 | @logfire.instrument
69 | async def memory_messages():
70 | # await run_agent('My name is Samuel.', 123)
71 |
72 | await run_agent('tell me a short story', 123)
73 |
74 |
75 | if __name__ == '__main__':
76 | asyncio.run(memory_messages())
77 |
--------------------------------------------------------------------------------
/2025-04-data-council/memory_tools.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | from collections.abc import AsyncIterator
5 | from contextlib import asynccontextmanager
6 | from dataclasses import dataclass
7 | from typing import TYPE_CHECKING
8 |
9 | import asyncpg
10 | from pydantic_ai import Agent, RunContext
11 |
12 | # hack to get around asyncpg's poor typing support
13 | if TYPE_CHECKING:
14 | DbConn = asyncpg.Connection[asyncpg.Record]
15 | else:
16 | DbConn = asyncpg.Connection
17 |
18 |
19 | import logfire
20 |
21 | logfire.configure(service_name='mem-tool')
22 | logfire.instrument_pydantic_ai()
23 | logfire.instrument_asyncpg()
24 |
25 |
26 | @asynccontextmanager
27 | async def db(reset: bool = False) -> AsyncIterator[DbConn]:
28 | conn = await asyncpg.connect('postgresql://postgres@localhost:5432')
29 | if reset:
30 | await conn.execute('drop table if exists memory')
31 | await conn.execute("""
32 | create table if not exists memory(
33 | id serial primary key,
34 | user_id integer not null,
35 | value text not null,
36 | unique(user_id, value)
37 | )
38 | """)
39 |
40 | try:
41 | yield conn
42 | finally:
43 | await conn.close()
44 |
45 |
46 | @dataclass
47 | class Deps:
48 | user_id: int
49 | conn: DbConn
50 |
51 |
52 | agent = Agent(
53 | 'openai:gpt-4o',
54 | deps_type=Deps,
55 | instructions='You are a helpful assistant.',
56 | )
57 |
58 |
59 | @agent.tool
60 | async def record_memory(ctx: RunContext[Deps], value: str) -> str:
61 | """Use this tool to store information in memory."""
62 | await ctx.deps.conn.execute(
63 | 'insert into memory(user_id, value) values($1, $2) on conflict do nothing', ctx.deps.user_id, value
64 | )
65 | return 'Value added to memory.'
66 |
67 |
68 | @agent.tool
69 | async def retrieve_memories(ctx: RunContext[Deps], memory_contains: str) -> str:
70 | """Get all memories about the user."""
71 | rows = await ctx.deps.conn.fetch(
72 | 'select value from memory where user_id = $1 and value ilike $2', ctx.deps.user_id, f'%{memory_contains}%'
73 | )
74 | return '\n'.join(row[0] for row in rows)
75 |
76 |
77 | @logfire.instrument
78 | async def memory_tools():
79 | async with db(True) as conn:
80 | deps = Deps(123, conn)
81 | result = await agent.run('My name is Samuel.', deps=deps)
82 | print(result.output)
83 |
84 | # time goes by...
85 |
86 | async with db() as conn:
87 | deps = Deps(123, conn)
88 | result = await agent.run('What is my name?', deps=deps)
89 | print(result.output)
90 |
91 |
92 | if __name__ == '__main__':
93 | asyncio.run(memory_tools())
94 |
--------------------------------------------------------------------------------
/2025-04-data-council/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "2025-04-data-council"
3 | version = "0"
4 | description = "An Opinionated Blueprint for the Future of GenAI Applications"
5 | readme = "README.md"
6 | requires-python = ">=3.13"
7 | dependencies = [
8 | "asyncpg>=0.30.0",
9 | "devtools>=0.12.2",
10 | "logfire[asyncpg,httpx]>=3.14.0",
11 | "pydantic-ai>=0.1.3",
12 | "qrcode>=8.1",
13 | ]
14 |
15 | [tool.ruff]
16 | line-length = 120
17 | target-version = "py313"
18 |
19 | [tool.ruff.lint]
20 | extend-select = [
21 | "Q",
22 | "RUF100",
23 | "C90",
24 | "UP",
25 | "I",
26 | ]
27 | flake8-quotes = { inline-quotes = "single", multiline-quotes = "double" }
28 | isort = { combine-as-imports = true }
29 | mccabe = { max-complexity = 15 }
30 |
31 | [tool.ruff.lint.pydocstyle]
32 | convention = "google"
33 |
34 | [tool.ruff.format]
35 | # don't format python in docstrings, pytest-examples takes care of it
36 | docstring-code-format = false
37 | quote-style = "single"
38 |
39 | [tool.pyright]
40 | pythonVersion = "3.13"
41 | typeCheckingMode = "strict"
42 | reportUnnecessaryTypeIgnoreComment = true
43 | include = [
44 | "**/*.py",
45 | ]
46 | venvPath = ".venv"
47 |
48 | [dependency-groups]
49 | dev = [
50 | "asyncpg-stubs>=0.30.1",
51 | "ruff>=0.11.6",
52 | ]
53 |
--------------------------------------------------------------------------------
/2025-05-16-fastapi-demo/.gitignore:
--------------------------------------------------------------------------------
1 | test.db
2 | .venv
3 | .logfire
4 | .env
--------------------------------------------------------------------------------
/2025-05-16-fastapi-demo/README.md:
--------------------------------------------------------------------------------
1 | # FastAPI Demo with Math, Database, PydanticAI and MCP
2 |
3 | This is a FastAPI application that demonstrates:
4 | - Mathematical operations (division, Fibonacci)
5 | - Database operations with SQLAlchemy
6 | - PydanticAI agent integration with Tavily search
7 | - MCP (Model Context Protocol) integration with Playwright MCP server
8 | - Logfire observability
9 |
10 | ## Setup
11 |
12 | 1. Install dependencies:
13 | ```bash
14 | uv sync
15 | ```
16 |
17 | 2. Ensure you have Node.js installed for the MCP filesystem server:
18 | ```bash
19 | node --version # Should be v16 or higher
20 | ```
21 |
22 | 3. Create a `.env` file in the root directory with the following environment variables:
23 | ```
24 | OPENAI_API_KEY=your_openai_api_key_here
25 | TAVILY_API_KEY=your_tavily_api_key_here
26 | LOGFIRE_TOKEN=your_logfire_token_here
27 | DATABASE_URL=sqlite:///./test.db
28 | ```
29 |
30 | 4. Run the application:
31 | ```bash
32 | uv run uvicorn src.app:app --host 0.0.0.0 --port 8000 --reload
33 | ```
34 |
35 | ## API Endpoints
36 |
37 | - `GET /divide/{numerator}/{denominator}` - Divide two numbers
38 | - `GET /fibonacci/{n}` - Calculate nth Fibonacci number
39 | - `POST /items/` - Create a new item in the database
40 | - `GET /items/` - List all items with pagination
41 | - `GET /items/{item_id}` - Get a specific item by ID
42 | - `POST /agent/query` - Query the PydanticAI agent with a question
43 | - `POST /mcp/query` - Query the MCP-enabled agent with Playwright MCP
44 |
45 | ## Example Usage
46 |
47 | Query the PydanticAI agent:
48 | ```bash
49 | curl -X POST "http://localhost:8000/agent/query" \
50 | -H "Content-Type: application/json" \
51 | -d '{"question": "How do I use PydanticAI tools?"}'
52 | ```
53 |
54 | Query the MCP agent with filesystem capabilities:
55 | ```bash
56 | curl -X POST "http://localhost:8000/mcp/query" \
57 | -H "Content-Type: application/json" \
58 | -d '{"question": "Create a simple Python script that prints hello world and save it to hello.py"}'
59 | ```
60 |
61 | Run evals:
62 | ```bash
63 | PYTHONPATH=. uv run python tests/evals.py
64 | ```
65 |
--------------------------------------------------------------------------------
/2025-05-16-fastapi-demo/main.py:
--------------------------------------------------------------------------------
1 | import logfire
2 |
3 | APP_NAME = "example"
4 |
5 | logfire.configure(
6 | service_name='cli',
7 | service_version='0.1.0',
8 | )
9 |
10 | def main():
11 | logfire.info(f"Launching app: {APP_NAME}")
12 |
13 |
14 | if __name__ == '__main__':
15 | main()
16 |
--------------------------------------------------------------------------------
/2025-05-16-fastapi-demo/pydantic_ai_evals.yaml:
--------------------------------------------------------------------------------
1 | # yaml-language-server: $schema=pydantic_ai_evals_schema.json
2 | cases:
3 | - name: basic_agent_creation
4 | inputs:
5 | question: How do I create a basic PydanticAI agent?
6 | metadata:
7 | difficulty: easy
8 | topic: agent_creation
9 | expected_keywords:
10 | - Agent
11 | - model
12 | - system_prompt
13 | - pydantic_ai
14 | evaluators:
15 | - ConfidenceEvaluator
16 | - KeywordPresenceEvaluator
17 | - LLMJudge:
18 | rubric: Response should clearly explain how to create a PydanticAI agent with
19 | code examples
20 | include_input: true
21 | - name: user_prompt_modification
22 | inputs:
23 | question: How do I change the user prompt in PydanticAI?
24 | metadata:
25 | difficulty: medium
26 | topic: prompt_handling
27 | expected_keywords:
28 | - run
29 | - run_sync
30 | - user_prompt
31 | - agent
32 | evaluators:
33 | - ConfidenceEvaluator
34 | - KeywordPresenceEvaluator
35 | - LLMJudge:
36 | rubric: Response should explain how to modify user prompts with practical examples
37 | include_input: true
38 | - name: tools_integration
39 | inputs:
40 | question: How do I add tools to a PydanticAI agent?
41 | metadata:
42 | difficulty: medium
43 | topic: tools
44 | expected_keywords:
45 | - tools
46 | - function
47 | - decorator
48 | - '@tool'
49 | evaluators:
50 | - ConfidenceEvaluator
51 | - KeywordPresenceEvaluator
52 | - LLMJudge:
53 | rubric: Response should explain tools integration with clear examples and best
54 | practices
55 | include_input: true
56 | evaluators:
57 | - LLMJudge:
58 | rubric: Response should be helpful, accurate, and well-structured for PydanticAI
59 | documentation questions
60 | model: openai:gpt-4o-mini
61 |
--------------------------------------------------------------------------------
/2025-05-16-fastapi-demo/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "2025-04-cli-demo"
3 | version = "0"
4 | description = "A demo of using Logfire with an internal CLI tool"
5 | requires-python = ">=3.13"
6 | dependencies = [
7 | "opentelemetry-api>=1.20.0",
8 | "logfire[fastapi,sqlalchemy]>=3.14.0",
9 | "fastapi>=0.115.12",
10 | "sqlalchemy>=2.0.41",
11 | "uvicorn>=0.34.2",
12 | "pydantic-ai>=0.2.9",
13 | "pydantic-ai-slim[tavily]>=0.2.9",
14 | "mcp>=1.9.2",
15 | "python-dotenv>=1.1.0",
16 | "pydantic-evals>=0.2.9",
17 | ]
18 |
19 | [tool.ruff]
20 | line-length = 120
21 | target-version = "py313"
22 |
23 | [tool.ruff.lint]
24 | extend-select = [
25 | "Q",
26 | "RUF100",
27 | "C90",
28 | "UP",
29 | "I",
30 | ]
31 | flake8-quotes = { inline-quotes = "single", multiline-quotes = "double" }
32 | isort = { combine-as-imports = true }
33 | mccabe = { max-complexity = 15 }
34 |
35 | [tool.ruff.lint.pydocstyle]
36 | convention = "google"
37 |
38 | [tool.ruff.format]
39 | # don't format python in docstrings, pytest-examples takes care of it
40 | docstring-code-format = false
41 | quote-style = "single"
42 |
43 | [tool.pyright]
44 | pythonVersion = "3.13"
45 | typeCheckingMode = "strict"
46 | reportUnnecessaryTypeIgnoreComment = true
47 | include = [
48 | "**/*.py",
49 | ]
50 | venvPath = ".venv"
51 |
52 | [dependency-groups]
53 | dev = [
54 | "ruff>=0.11.6",
55 | ]
56 |
--------------------------------------------------------------------------------
/2025-05-16-fastapi-demo/src/agent.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 | from textwrap import dedent
4 | from typing import Annotated
5 |
6 | from dotenv import load_dotenv
7 | from pydantic import BaseModel, Field
8 | from pydantic_ai.agent import Agent
9 | from pydantic_ai.common_tools.tavily import tavily_search_tool
10 |
11 |
12 | ROOT_DIR = Path(__file__).parent.parent
13 | load_dotenv(dotenv_path=ROOT_DIR / ".env")
14 |
15 | class BotResponse(BaseModel):
16 | answer: str
17 | reasoning: str
18 | reference: str | None = None
19 | confidence_percentage: Annotated[int, Field(ge=0, le=100)]
20 |
21 | SYSTEM_PROMPT = dedent(
22 | """
23 | You're an all-knowing expert in the PydanticAI agent framework.
24 | You will receive questions from users of PydanticAI about how to use the framework effectively.
25 |
26 | Where necessary, use Tavily to search for PydanticAI information. The documentation can be found here: https://ai.pydantic.dev/
27 | The LLM txt can be found here: https://ai.pydantic.dev/llms.txt
28 |
29 | For any given answer, where possible provide references to the documentation or other relevant resources.
30 | Give a confidence percentage for your answer, from 0 to 100.
31 | """
32 | )
33 |
34 | def build_agent() -> Agent[None, BotResponse]:
35 | api_key = os.getenv("TAVILY_API_KEY")
36 | assert api_key is not None
37 |
38 | return Agent(
39 | "openai:gpt-4.1",
40 | tools=[tavily_search_tool(api_key)],
41 | output_type=BotResponse,
42 | system_prompt=SYSTEM_PROMPT,
43 | instrument=True,
44 | )
45 |
46 | async def answer_question(agent: Agent[None, BotResponse], question: str) -> BotResponse:
47 | result = await agent.run(user_prompt=question)
48 | return result.output
49 |
--------------------------------------------------------------------------------
/2025-05-16-fastapi-demo/src/app.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import logfire
4 | from fastapi import FastAPI, HTTPException, Depends
5 | from pydantic import BaseModel
6 | from pydantic_ai.agent import Agent
7 | from sqlalchemy import create_engine, Column, Integer, String
8 | from sqlalchemy.ext.declarative import declarative_base
9 | from sqlalchemy.orm import sessionmaker, Session
10 |
11 | from src.agent import build_agent, answer_question, BotResponse
12 | from src.mcp_agent import answer_mcp_question, MCPBotResponse
13 |
14 | logfire.configure(
15 | service_name='api',
16 | environment='staging'
17 | )
18 |
19 | # FastAPI application setup
20 | app = FastAPI(title="Math, Database and PydanticAI API")
21 | logfire.instrument_fastapi(app)
22 |
23 |
24 | # Database setup
25 | DATABASE_URL = os.getenv("DATABASE_URL", "sqlite:///./test.db")
26 | engine = create_engine(DATABASE_URL)
27 | SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
28 | Base = declarative_base()
29 | logfire.instrument_sqlalchemy(engine)
30 | logfire.instrument_mcp()
31 |
32 |
33 | # Database model
34 | class Item(Base):
35 | __tablename__ = "items"
36 | id = Column(Integer, primary_key=True, index=True)
37 | name = Column(String, index=True)
38 | description = Column(String)
39 |
40 |
41 | Base.metadata.create_all(bind=engine)
42 |
43 |
44 | # Pydantic models
45 | class ItemCreate(BaseModel):
46 | name: str
47 | description: str
48 |
49 |
50 | class ItemResponse(BaseModel):
51 | id: int
52 | name: str
53 | description: str
54 |
55 | class Config:
56 | from_attributes = True
57 |
58 |
59 | class AgentQuery(BaseModel):
60 | question: str
61 |
62 |
63 | class MCPQuery(BaseModel):
64 | question: str
65 |
66 |
67 | # Dependency to get DB session
68 | def get_db():
69 | db = SessionLocal()
70 | try:
71 | yield db
72 | finally:
73 | db.close()
74 |
75 |
76 | # Dependency to get agent instance
77 | def get_agent() -> Agent[None, BotResponse]:
78 | return build_agent()
79 |
80 |
81 | # Endpoint 1: Division
82 | @app.get("/divide/{numerator}/{denominator}")
83 | async def divide(numerator: float, denominator: float):
84 | """
85 | Divides the numerator by the denominator and returns the result.
86 | """
87 | result = numerator / denominator
88 | return {"result": result}
89 |
90 |
91 | # Endpoint 2: Fibonacci
92 | @app.get("/fibonacci/{n}")
93 | async def fibonacci(n: int):
94 | """
95 | Calculates the nth number in the Fibonacci sequence.
96 | Raises an HTTPException if n is negative.
97 | """
98 | if n < 0:
99 | raise HTTPException(status_code=400, detail="Input must be a non-negative integer")
100 |
101 | if n <= 1:
102 | return {"result": n}
103 |
104 | a, b = 0, 1
105 | for _ in range(2, n + 1):
106 | a, b = b, a + b
107 |
108 | return {"result": b}
109 |
110 |
111 | # Endpoint 3: Database Query
112 | @app.post("/items/", response_model=ItemResponse)
113 | async def create_item(item: ItemCreate, db: Session = Depends(get_db)):
114 | """
115 | Creates a new item in the database.
116 | """
117 | db_item = Item(name=item.name, description=item.description)
118 | db.add(db_item)
119 | db.commit()
120 | db.refresh(db_item)
121 | return db_item
122 |
123 |
124 | @app.get("/items/", response_model=list[ItemResponse])
125 | async def read_items(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
126 | """
127 | Retrieves items from the database with pagination.
128 | """
129 | items = db.query(Item).offset(skip).limit(limit).all()
130 | return items
131 |
132 |
133 | @app.get("/items/{item_id}", response_model=ItemResponse)
134 | async def read_item(item_id: int, db: Session = Depends(get_db)):
135 | """
136 | Retrieves a specific item by ID.
137 | Raises an HTTPException if the item is not found.
138 | """
139 | item = db.query(Item).filter(Item.id == item_id).first()
140 | if item is None:
141 | raise HTTPException(status_code=404, detail="Item not found")
142 | return item
143 |
144 |
145 | @app.post("/agent/query", response_model=BotResponse)
146 | async def query_agent(query: AgentQuery, agent: Agent[None, BotResponse] = Depends(get_agent)):
147 | """
148 | Queries the PydanticAI agent with a user question and returns the response.
149 | """
150 | logfire.info(f"Querying agent with question: {query.question}")
151 | response = await answer_question(agent, query.question)
152 | return response
153 |
154 |
155 | @app.post("/mcp/query", response_model=MCPBotResponse)
156 | async def query_mcp_agent(query: MCPQuery):
157 | """
158 | Queries the MCP-enabled PydanticAI agent with browser automation capabilities.
159 | """
160 | logfire.info(f"Querying MCP agent with question: {query.question}")
161 | response = await answer_mcp_question(query.question)
162 | return response
163 |
164 |
165 |
166 | if __name__ == '__main__': # Fixed double asterisks
167 | import uvicorn
168 | uvicorn.run(app, host="0.0.0.0", port=8000) # Added this line to complete the if block
--------------------------------------------------------------------------------
/2025-05-16-fastapi-demo/src/mcp_agent.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from pathlib import Path
3 | from textwrap import dedent
4 | from typing import Annotated
5 |
6 | import logfire
7 | from dotenv import load_dotenv
8 | from pydantic import BaseModel, Field
9 | from pydantic_ai import Agent
10 | from pydantic_ai.mcp import MCPServerStdio
11 |
12 | ROOT_DIR = Path(__file__).parent.parent
13 | load_dotenv(dotenv_path=ROOT_DIR / ".env")
14 |
15 | # Configure logfire instrumentation
16 | logfire.configure(scrubbing=False, service_name='playwright-browser')
17 | logfire.instrument_mcp()
18 | logfire.instrument_pydantic_ai()
19 |
20 | class MCPBotResponse(BaseModel):
21 | answer: str
22 | reasoning: str
23 | websites_accessed: list[str] = []
24 | confidence_percentage: Annotated[int, Field(ge=0, le=100)]
25 |
26 | SYSTEM_PROMPT = dedent(
27 | """
28 | You're a helpful AI assistant with access to browser automation capabilities through Playwright.
29 | You can navigate to websites, interact with web pages, take screenshots, and extract information.
30 |
31 | When working with web pages:
32 | - Be thorough in your web navigation and information extraction
33 | - Take screenshots when helpful for verification
34 | - Extract relevant information clearly and accurately
35 | - Explain what you're doing with the browser
36 | - Be mindful of website terms of service and respectful browsing practices
37 |
38 | Give a confidence percentage for your answer, from 0 to 100.
39 | List any websites you accessed in the websites_accessed field.
40 | """
41 | )
42 |
43 | # Set up Playwright MCP server
44 | browser_mcp = MCPServerStdio('npx', args=['-Y', '@playwright/mcp@latest'])
45 |
46 | # Create the agent with MCP server integration
47 | agent = Agent(
48 | 'openai:gpt-4o',
49 | output_type=MCPBotResponse,
50 | system_prompt=SYSTEM_PROMPT,
51 | mcp_servers=[browser_mcp],
52 | instrument=True,
53 | )
54 |
55 | async def answer_mcp_question(question: str) -> MCPBotResponse:
56 | """Run a question through the MCP-enabled browser agent."""
57 | async with agent.run_mcp_servers():
58 | result = await agent.run(user_prompt=question)
59 | return result.output
60 |
61 | async def main():
62 | """Example usage of the browser agent."""
63 | async with agent.run_mcp_servers():
64 | result = await agent.run(
65 | 'Navigate to pydantic.dev and get information about their latest blog post or announcement. '
66 | 'Summarize what you find.'
67 | )
68 | print(result.output)
69 |
70 | if __name__ == '__main__':
71 | asyncio.run(main())
--------------------------------------------------------------------------------
/2025-05-16-fastapi-demo/tests/evals.py:
--------------------------------------------------------------------------------
1 | """
2 | Pydantic AI Evaluation for testing the PydanticAI docs agent functionality.
3 | This eval tests how well the agent can answer questions about PydanticAI framework.
4 | """
5 |
6 | import asyncio
7 | from typing import Any
8 |
9 | import logfire
10 | from pydantic import BaseModel, Field
11 | from pydantic_evals import Case, Dataset
12 | from pydantic_evals.evaluators import Evaluator, EvaluatorContext, LLMJudge
13 |
14 | from src.agent import build_agent, answer_question, BotResponse
15 |
16 |
17 | class AgentQuery(BaseModel):
18 | """Input model for agent queries."""
19 | question: str = Field(description="Question to ask the PydanticAI docs agent")
20 |
21 |
22 | class EvalMetadata(BaseModel):
23 | """Metadata for evaluation cases."""
24 | difficulty: str = Field(description="Difficulty level: easy, medium, hard")
25 | topic: str = Field(description="Topic area being tested")
26 | expected_keywords: list[str] = Field(description="Keywords that should appear in response")
27 |
28 |
29 | class ConfidenceEvaluator(Evaluator[AgentQuery, BotResponse]):
30 | """Evaluator that checks if the agent's confidence meets minimum threshold."""
31 |
32 | def __init__(self, min_confidence: int = 70):
33 | self.min_confidence = min_confidence
34 |
35 | def evaluate(self, ctx: EvaluatorContext[AgentQuery, BotResponse]) -> float:
36 | if ctx.output.confidence_percentage >= self.min_confidence:
37 | return 1.0
38 | return ctx.output.confidence_percentage / 100.0
39 |
40 |
41 | class KeywordPresenceEvaluator(Evaluator[AgentQuery, BotResponse]):
42 | """Evaluator that checks if expected keywords appear in the response."""
43 |
44 | def evaluate(self, ctx: EvaluatorContext[AgentQuery, BotResponse]) -> float:
45 | if not hasattr(ctx, 'metadata') or not ctx.metadata:
46 | return 0.0
47 |
48 | expected_keywords = ctx.metadata.get('expected_keywords', [])
49 | if not expected_keywords:
50 | return 1.0
51 |
52 | answer_lower = ctx.output.answer.lower()
53 | found_keywords = [
54 | keyword for keyword in expected_keywords
55 | if keyword.lower() in answer_lower
56 | ]
57 |
58 | return len(found_keywords) / len(expected_keywords)
59 |
60 |
61 | # Task function that wraps the agent
62 | async def query_pydantic_ai_agent(query: AgentQuery) -> BotResponse:
63 | """Task function that queries the PydanticAI docs agent."""
64 | agent = build_agent()
65 | return await answer_question(agent, query.question)
66 |
67 |
68 | # Create the evaluation dataset
69 | pydantic_ai_dataset = Dataset[AgentQuery, BotResponse, dict[str, Any]](
70 | cases=[
71 | Case(
72 | name='basic_agent_creation',
73 | inputs=AgentQuery(question="How do I create a basic PydanticAI agent?"),
74 | metadata={
75 | 'difficulty': 'easy',
76 | 'topic': 'agent_creation',
77 | 'expected_keywords': ['Agent', 'model', 'system_prompt', 'pydantic_ai']
78 | },
79 | evaluators=(
80 | ConfidenceEvaluator(min_confidence=80),
81 | KeywordPresenceEvaluator(),
82 | LLMJudge(
83 | rubric="Response should clearly explain how to create a PydanticAI agent with code examples",
84 | include_input=True
85 | ),
86 | )
87 | ),
88 |
89 | Case(
90 | name='user_prompt_modification',
91 | inputs=AgentQuery(question="How do I change the user prompt in PydanticAI?"),
92 | metadata={
93 | 'difficulty': 'medium',
94 | 'topic': 'prompt_handling',
95 | 'expected_keywords': ['run', 'run_sync', 'user_prompt', 'agent']
96 | },
97 | evaluators=(
98 | ConfidenceEvaluator(min_confidence=75),
99 | KeywordPresenceEvaluator(),
100 | LLMJudge(
101 | rubric="Response should explain how to modify user prompts with practical examples",
102 | include_input=True
103 | ),
104 | )
105 | ),
106 |
107 | Case(
108 | name='tools_integration',
109 | inputs=AgentQuery(question="How do I add tools to a PydanticAI agent?"),
110 | metadata={
111 | 'difficulty': 'medium',
112 | 'topic': 'tools',
113 | 'expected_keywords': ['tools', 'function', 'decorator', '@tool']
114 | },
115 | evaluators=(
116 | ConfidenceEvaluator(min_confidence=70),
117 | KeywordPresenceEvaluator(),
118 | LLMJudge(
119 | rubric="Response should explain tools integration with clear examples and best practices",
120 | include_input=True
121 | ),
122 | )
123 | ),
124 | ],
125 | evaluators=[
126 | # Global evaluators that apply to all cases
127 | LLMJudge(
128 | rubric="Response should be helpful, accurate, and well-structured for PydanticAI documentation questions",
129 | model='openai:gpt-4o-mini' # Use a cost-effective model for evaluation
130 | ),
131 | ]
132 | )
133 |
134 |
135 | async def run_evaluation(send_to_logfire: bool = True):
136 | """Run the PydanticAI docs agent evaluation."""
137 |
138 | if send_to_logfire:
139 | logfire.configure(
140 | send_to_logfire=True,
141 | service_name='pydantic-ai-docs-evals',
142 | environment='development'
143 | )
144 |
145 | print("🚀 Running PydanticAI Docs Agent Evaluation")
146 | print("=" * 60)
147 |
148 | # Run the evaluation
149 | report = await pydantic_ai_dataset.evaluate(query_pydantic_ai_agent)
150 |
151 | # Print detailed results
152 | report.print(
153 | include_input=True,
154 | include_output=True,
155 | include_durations=True,
156 | include_averages=True
157 | )
158 |
159 | # Save results to file
160 | pydantic_ai_dataset.to_file('pydantic_ai_evals.yaml')
161 | print(f"\n📁 Dataset saved to: pydantic_ai_evals.yaml")
162 |
163 | return report
164 |
165 |
166 | if __name__ == "__main__":
167 | asyncio.run(run_evaluation())
168 |
--------------------------------------------------------------------------------
/2025-05-odsc/.python-version:
--------------------------------------------------------------------------------
1 | 3.13
2 |
--------------------------------------------------------------------------------
/2025-05-odsc/agent-loop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2025-05-odsc/agent-loop.png
--------------------------------------------------------------------------------
/2025-05-odsc/browser_mcp.py:
--------------------------------------------------------------------------------
1 | import logfire
2 | from pydantic_ai import Agent
3 | from pydantic_ai.mcp import MCPServerStdio
4 |
5 | logfire.configure(scrubbing=False, service_name='browse')
6 | logfire.instrument_mcp()
7 | logfire.instrument_pydantic_ai()
8 |
9 | browser_mcp = MCPServerStdio('npx', args=['-Y', '@playwright/mcp@latest'])
10 |
11 | agent = Agent(
12 | 'anthropic:claude-3-7-sonnet-latest',
13 | mcp_servers=[browser_mcp],
14 | )
15 |
16 |
17 | async def main():
18 | async with agent.run_mcp_servers():
19 | result = await agent.run(
20 | 'get the most recent blog post from pydantic.dev '
21 | 'which should contain multiple announcements, '
22 | 'summaries those annoucements as a list.'
23 | )
24 | print(result.output)
25 |
26 |
27 | if __name__ == '__main__':
28 | import asyncio
29 |
30 | asyncio.run(main())
31 |
--------------------------------------------------------------------------------
/2025-05-odsc/browser_mcp_graph.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations as _
2 |
3 | from dataclasses import dataclass
4 |
5 | import logfire
6 | from pydantic_ai import Agent
7 | from pydantic_ai.mcp import MCPServerStdio
8 | from pydantic_graph import BaseNode, End, Graph, GraphRunContext
9 |
10 | logfire.configure(scrubbing=False, service_name='browse-graph')
11 | logfire.instrument_mcp()
12 | logfire.instrument_pydantic_ai()
13 |
14 | server = MCPServerStdio(
15 | 'npx',
16 | args=[
17 | '-Y',
18 | '@playwright/mcp@latest',
19 | ],
20 | )
21 |
22 | browser_agent = Agent(
23 | 'anthropic:claude-3-7-sonnet-latest',
24 | mcp_servers=[server],
25 | system_prompt='Find the page requested by the user and return the URL only. Nothing else.',
26 | )
27 |
28 |
29 | @dataclass
30 | class FindBlog(BaseNode):
31 | url: str
32 |
33 | async def run(self, ctx: GraphRunContext) -> FindLatestPosts:
34 | result = await browser_agent.run(
35 | f'Find the page with a list of blog posts at {self.url}.'
36 | )
37 | return FindLatestPosts(result.output)
38 |
39 |
40 | @dataclass
41 | class FindLatestPosts(BaseNode):
42 | url: str
43 |
44 | async def run(self, ctx: GraphRunContext) -> SummariesContent:
45 | result = await browser_agent.run(f'Find the latest blog post at {self.url}')
46 | return SummariesContent(result.output)
47 |
48 |
49 | summary_agent = Agent(
50 | 'anthropic:claude-3-7-sonnet-latest',
51 | system_prompt='Summarise the content of the blog post page as markdown',
52 | )
53 |
54 |
55 | @dataclass
56 | class SummariesContent(BaseNode[None, None, str]):
57 | content: str
58 |
59 | async def run(self, ctx: GraphRunContext) -> End[str]:
60 | result = await summary_agent.run(self.content)
61 | return End(result.output)
62 |
63 |
64 | graph = Graph(nodes=[FindBlog, FindLatestPosts, SummariesContent])
65 |
66 |
67 | async def main():
68 | async with browser_agent.run_mcp_servers():
69 | result = await graph.run(FindBlog(url='pydantic.dev'))
70 | print(result.output)
71 |
72 |
73 | if __name__ == '__main__':
74 | graph.mermaid_save('browser.png')
75 | # import asyncio
76 | # asyncio.run(main())
77 |
--------------------------------------------------------------------------------
/2025-05-odsc/evals/01_generate_dataset.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from pathlib import Path
3 | from types import NoneType
4 |
5 | from pydantic_evals import Dataset
6 | from pydantic_evals.generation import generate_dataset
7 |
8 | from agent import TimeRangeInputs, TimeRangeResponse
9 |
10 | import logfire
11 |
12 | logfire.configure()
13 | logfire.instrument_pydantic_ai()
14 |
15 |
16 | async def main():
17 | dataset = await generate_dataset(
18 | dataset_type=Dataset[TimeRangeInputs, TimeRangeResponse, NoneType],
19 | model='openai:o1', # Use a smarter model since this is a more complex task that is only run once
20 | n_examples=10,
21 | extra_instructions="""
22 | Generate a dataset of test cases for the time range inference agent.
23 |
24 | Include a variety of inputs that might be given to the agent, including some where the only
25 | reasonable response is a `TimeRangeBuilderError`, and some where a `TimeRangeBuilderSuccess` is
26 | expected. Make use of the `IsInstance` evaluator to ensure that the inputs and outputs are of the appropriate
27 | type.
28 |
29 | When appropriate, use the `LLMJudge` evaluator to provide a more precise description of the time range the
30 | agent should have inferred. In particular, it's good if the example user inputs are somewhat ambiguous, to
31 | reflect realistic (difficult-to-handle) user questions, but the LLMJudge evaluator can help ensure that the
32 | agent's output is still judged based on precisely what the desired behavior is even for somewhat ambiguous
33 | user questions. You do not need to include LLMJudge evaluations for all cases (in particular, for cases where
34 | the expected output is unambiguous from the user's question), but you should include at least one or two
35 | examples that do benefit from an LLMJudge evaluation (and include it).
36 |
37 | To be clear, the LLMJudge rubrics should be concise and reflect only information that is NOT ALREADY PRESENT
38 | in the user prompt for the example.
39 |
40 | Leave the model and include_input arguments to LLMJudge as their default values (null).
41 |
42 | Also add a dataset-wide LLMJudge evaluator to ensure that the 'explanation' or 'error_message' fields are
43 | appropriate to be displayed to the user (e.g., written in second person, etc.).
44 | """,
45 | )
46 |
47 | dataset.to_file(
48 | Path(__file__).parent / 'datasets' / 'time_range_v1.yaml',
49 | fmt='yaml',
50 | )
51 |
52 |
53 | if __name__ == '__main__':
54 | asyncio.run(main())
55 |
--------------------------------------------------------------------------------
/2025-05-odsc/evals/02_add_custom_evaluators.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from types import NoneType
3 |
4 | from pydantic_evals import Dataset
5 |
6 | from custom_evaluators import (
7 | CUSTOM_EVALUATOR_TYPES,
8 | AgentCalledTool,
9 | UserMessageIsConcise,
10 | ValidateTimeRange,
11 | )
12 | from agent import TimeRangeInputs, TimeRangeResponse
13 |
14 |
15 | def main():
16 | dataset_path = Path(__file__).parent / 'datasets' / 'time_range_v1.yaml'
17 | dataset = Dataset[TimeRangeInputs, TimeRangeResponse, NoneType].from_file(
18 | dataset_path
19 | )
20 | dataset.add_evaluator(ValidateTimeRange())
21 | dataset.add_evaluator(UserMessageIsConcise())
22 | dataset.add_evaluator(
23 | AgentCalledTool('time_range_agent', 'get_current_time'),
24 | specific_case='Single time point',
25 | )
26 | dataset.to_file(
27 | Path(__file__).parent / 'datasets' / 'time_range_v2.yaml',
28 | custom_evaluator_types=CUSTOM_EVALUATOR_TYPES,
29 | )
30 |
31 |
32 | if __name__ == '__main__':
33 | main()
34 |
--------------------------------------------------------------------------------
/2025-05-odsc/evals/03_unit_testing.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from types import NoneType
3 |
4 | import logfire
5 | from agent import TimeRangeInputs, TimeRangeResponse, infer_time_range
6 | from custom_evaluators import CUSTOM_EVALUATOR_TYPES
7 | from pydantic_evals import Dataset
8 |
9 | logfire.configure(
10 | environment='development', service_name='evals', service_version='0.0.1'
11 | )
12 | logfire.instrument_pydantic_ai()
13 |
14 | dataset_path = Path(__file__).parent / 'datasets' / 'time_range_v2.yaml'
15 | dataset = Dataset[TimeRangeInputs, TimeRangeResponse, NoneType].from_file(
16 | dataset_path, custom_evaluator_types=CUSTOM_EVALUATOR_TYPES
17 | )
18 | report = dataset.evaluate_sync(infer_time_range)
19 | print(report)
20 |
21 | assertion_pass_rate = report.averages().assertions
22 | assert assertion_pass_rate is not None, 'There should be at least one assertion'
23 | assert assertion_pass_rate > 0.8, (
24 | f'The assertion pass rate was {assertion_pass_rate:.1%}; it should be above 80%.'
25 | )
26 |
--------------------------------------------------------------------------------
/2025-05-odsc/evals/04_compare_models.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from types import NoneType
3 |
4 | import logfire
5 | from pydantic_evals import Dataset
6 |
7 | from custom_evaluators import CUSTOM_EVALUATOR_TYPES
8 | from agent import time_range_agent, infer_time_range, TimeRangeInputs, TimeRangeResponse
9 |
10 | logfire.configure(environment='development', service_name='evals')
11 | logfire.instrument_pydantic_ai()
12 |
13 | dataset_path = Path(__file__).parent / 'datasets' / 'time_range_v2.yaml'
14 | dataset = Dataset[TimeRangeInputs, TimeRangeResponse, NoneType].from_file(
15 | dataset_path, custom_evaluator_types=CUSTOM_EVALUATOR_TYPES
16 | )
17 | with logfire.span('Comparing different models for time_range_agent'):
18 | with time_range_agent.override(model='openai:gpt-4o'):
19 | dataset.evaluate_sync(infer_time_range, name='openai:gpt-4o')
20 | with time_range_agent.override(model='anthropic:claude-3-7-sonnet-latest'):
21 | dataset.evaluate_sync(
22 | infer_time_range, name='anthropic:claude-3-7-sonnet-latest'
23 | )
24 |
--------------------------------------------------------------------------------
/2025-05-odsc/evals/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2025-05-odsc/evals/__init__.py
--------------------------------------------------------------------------------
/2025-05-odsc/evals/agent.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations as _annotations
2 |
3 | from dataclasses import dataclass, field
4 | from datetime import datetime
5 |
6 | from devtools import debug
7 | from pydantic import AwareDatetime, BaseModel
8 | from pydantic_ai import Agent, RunContext
9 | from typing_extensions import TypedDict
10 |
11 |
12 | class TimeRangeBuilderSuccess(BaseModel, use_attribute_docstrings=True):
13 | """Response when a time range could be successfully generated."""
14 |
15 | min_timestamp_with_offset: AwareDatetime
16 | """A datetime in ISO format with timezone offset."""
17 | max_timestamp_with_offset: AwareDatetime
18 | """A datetime in ISO format with timezone offset."""
19 | explanation: str | None
20 | """
21 | A brief explanation of the time range that was selected.
22 |
23 | For example, if a user only mentions a specific point in time, you might explain that you selected a 10 minute
24 | window around that time.
25 | """
26 |
27 | def __str__(self):
28 | lines = [
29 | 'TimeRangeBuilderSuccess:',
30 | f'* min_timestamp_with_offset: {self.min_timestamp_with_offset:%A, %B %d, %Y %H:%M:%S %Z}',
31 | f'* max_timestamp_with_offset: {self.max_timestamp_with_offset:%A, %B %d, %Y %H:%M:%S %Z}',
32 | ]
33 | if self.explanation is not None:
34 | lines.append(f'* explanation: {self.explanation}')
35 | return '\n'.join(lines)
36 |
37 |
38 | class TimeRangeBuilderError(BaseModel):
39 | """Response when a time range cannot not be generated."""
40 |
41 | error_message: str
42 |
43 | def __str__(self):
44 | return f'TimeRangeBuilderError:\n* {self.error_message}'
45 |
46 |
47 | TimeRangeResponse = TimeRangeBuilderSuccess | TimeRangeBuilderError
48 |
49 |
50 | class TimeRangeInputs(TypedDict):
51 | """The inputs for the time range inference agent."""
52 |
53 | prompt: str
54 | now: AwareDatetime
55 |
56 |
57 | @dataclass
58 | class TimeRangeDeps:
59 | now: datetime = field(default_factory=lambda: datetime.now().astimezone())
60 |
61 |
62 | time_range_agent = Agent[TimeRangeDeps, TimeRangeResponse](
63 | 'openai:gpt-4o',
64 | output_type=TimeRangeResponse, # type: ignore # we can't yet annotate something as receiving a TypeForm
65 | deps_type=TimeRangeDeps,
66 | system_prompt='Convert the user request into a structured time range.',
67 | retries=1,
68 | instrument=True,
69 | )
70 |
71 |
72 | @time_range_agent.tool
73 | def get_current_time(ctx: RunContext[TimeRangeDeps]) -> str:
74 | """Get the user's current time and timezone in the format 'Friday, November 22, 2024 11:15:14 PST'."""
75 | return f"The user's current time is {ctx.deps.now:%A, %B %d, %Y %H:%M:%S %Z}."
76 |
77 |
78 | async def infer_time_range(inputs: TimeRangeInputs) -> TimeRangeResponse:
79 | """Infer a time range from a user prompt."""
80 | deps = TimeRangeDeps(now=inputs['now'])
81 | return (await time_range_agent.run(inputs['prompt'], deps=deps)).output
82 |
83 |
84 | if __name__ == '__main__':
85 | import asyncio
86 |
87 | response = asyncio.run(
88 | infer_time_range(
89 | {'prompt': '2pm yesterday', 'now': datetime.now().astimezone()}
90 | )
91 | )
92 |
93 | debug(response)
94 |
--------------------------------------------------------------------------------
/2025-05-odsc/evals/custom_evaluators.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from datetime import timedelta
3 |
4 | from pydantic_evals.evaluators import Evaluator, EvaluatorContext, EvaluatorOutput
5 | from pydantic_evals.otel import SpanQuery
6 |
7 | from agent import TimeRangeBuilderSuccess, TimeRangeInputs, TimeRangeResponse
8 |
9 |
10 | @dataclass
11 | class ValidateTimeRange(Evaluator[TimeRangeInputs, TimeRangeResponse]):
12 | def evaluate(
13 | self, ctx: EvaluatorContext[TimeRangeInputs, TimeRangeResponse]
14 | ) -> EvaluatorOutput:
15 | if isinstance(ctx.output, TimeRangeBuilderSuccess):
16 | window_end = ctx.output.max_timestamp_with_offset
17 | window_size = window_end - ctx.output.min_timestamp_with_offset
18 | return {
19 | 'window_is_not_too_long': window_size <= timedelta(days=30),
20 | 'window_is_not_in_the_future': window_end <= ctx.inputs['now'],
21 | }
22 |
23 | return {} # No evaluation needed for errors
24 |
25 |
26 | @dataclass
27 | class UserMessageIsConcise(Evaluator[TimeRangeInputs, TimeRangeResponse]):
28 | async def evaluate(
29 | self, ctx: EvaluatorContext[TimeRangeInputs, TimeRangeResponse]
30 | ) -> EvaluatorOutput:
31 | if isinstance(ctx.output, TimeRangeBuilderSuccess):
32 | user_facing_message = ctx.output.explanation
33 | else:
34 | user_facing_message = ctx.output.error_message
35 |
36 | if user_facing_message is not None:
37 | return len(user_facing_message.split()) < 50
38 | else:
39 | return {}
40 |
41 |
42 | @dataclass
43 | class AgentCalledTool(Evaluator):
44 | agent_name: str
45 | tool_name: str
46 |
47 | def evaluate(self, ctx: EvaluatorContext) -> bool:
48 | return ctx.span_tree.any(
49 | SpanQuery(
50 | name_equals='agent run',
51 | has_attributes={'agent_name': self.agent_name},
52 | stop_recursing_when=SpanQuery(name_equals='agent run'),
53 | some_descendant_has=SpanQuery(
54 | name_equals='running tool',
55 | has_attributes={'gen_ai.tool.name': self.tool_name},
56 | ),
57 | )
58 | )
59 |
60 |
61 | CUSTOM_EVALUATOR_TYPES = ValidateTimeRange, UserMessageIsConcise, AgentCalledTool
62 |
--------------------------------------------------------------------------------
/2025-05-odsc/evals/datasets/time_range_v1.yaml:
--------------------------------------------------------------------------------
1 | # yaml-language-server: $schema=time_range_v1_schema.json
2 | cases:
3 | - name: Single time point
4 | inputs:
5 | prompt: I'd like logs from 2 PM on 2024-05-20.
6 | now: "2024-05-20T14:15:00Z"
7 | expected_output:
8 | min_timestamp_with_offset: "2024-05-20T14:00:00Z"
9 | max_timestamp_with_offset: "2024-05-20T14:30:00Z"
10 | explanation:
11 | You provided a single time (2 PM), so we selected a short window
12 | around that time.
13 | evaluators:
14 | - IsInstance: TimeRangeBuilderSuccess
15 | - name: Ambiguous request for tomorrow
16 | inputs:
17 | prompt: Show me what's going on tomorrow.
18 | now: "2024-05-19T09:00:00Z"
19 | expected_output:
20 | min_timestamp_with_offset: "2024-05-20T00:00:00Z"
21 | max_timestamp_with_offset: "2024-05-20T23:59:59Z"
22 | explanation: We interpreted 'tomorrow' as the entire next day in UTC.
23 | evaluators:
24 | - IsInstance: TimeRangeBuilderSuccess
25 | - LLMJudge: We want the entire next day in UTC to cover all events tomorrow.
26 | - name: Future logs not available
27 | inputs:
28 | prompt: I'd like logs from next year.
29 | now: "2025-01-01T00:00:00Z"
30 | expected_output:
31 | error_message:
32 | We cannot generate a time range in the future based on the provided
33 | 'now'.
34 | evaluators:
35 | - IsInstance: TimeRangeBuilderError
36 | - name: No time reference at all
37 | inputs:
38 | prompt: Give me the logs.
39 | now: "2024-01-01T00:00:00Z"
40 | expected_output:
41 | error_message: No time references were detected in your request.
42 | evaluators:
43 | - IsInstance: TimeRangeBuilderError
44 | - name: Exact small range
45 | inputs:
46 | prompt: Could I see logs from 3:00 PM to 3:45 PM on Feb 10, 2024?
47 | now: "2024-02-10T15:30:00Z"
48 | expected_output:
49 | min_timestamp_with_offset: "2024-02-10T15:00:00Z"
50 | max_timestamp_with_offset: "2024-02-10T15:45:00Z"
51 | explanation: You specifically requested logs between 3:00 PM and 3:45 PM.
52 | evaluators:
53 | - IsInstance: TimeRangeBuilderSuccess
54 | - name: All-day request
55 | inputs:
56 | prompt: I need logs from October 25th, 2024.
57 | now: "2024-10-24T10:00:00Z"
58 | expected_output:
59 | min_timestamp_with_offset: "2024-10-25T00:00:00Z"
60 | max_timestamp_with_offset: "2024-10-25T23:59:59Z"
61 | explanation:
62 | We interpreted the request for October 25, 2024 as the entire day
63 | in UTC.
64 | evaluators:
65 | - IsInstance: TimeRangeBuilderSuccess
66 | - name: Unrecognized date format
67 | inputs:
68 | prompt: Get logs from 13/13/2024.
69 | now: "2024-10-24T10:00:00Z"
70 | expected_output:
71 | error_message: We could not recognize a valid date from your request.
72 | evaluators:
73 | - IsInstance: TimeRangeBuilderError
74 | - name: Ambiguous reference to next weekend
75 | inputs:
76 | prompt: I want logs from next weekend.
77 | now: "2024-05-16T12:00:00Z"
78 | expected_output:
79 | min_timestamp_with_offset: "2024-05-18T00:00:00Z"
80 | max_timestamp_with_offset: "2024-05-19T23:59:59Z"
81 | explanation:
82 | We interpreted 'next weekend' as Saturday and Sunday following your
83 | current date.
84 | evaluators:
85 | - IsInstance: TimeRangeBuilderSuccess
86 | - LLMJudge:
87 | We assume the user wants the entire upcoming Saturday and Sunday in
88 | UTC.
89 | - name: Last night logs
90 | inputs:
91 | prompt: Show me the logs from last night.
92 | now: "2024-08-01T09:00:00Z"
93 | expected_output:
94 | min_timestamp_with_offset: "2024-07-31T20:00:00Z"
95 | max_timestamp_with_offset: "2024-08-01T06:00:00Z"
96 | explanation:
97 | We interpreted 'last night' as 8 PM to 6 AM prior to your current
98 | morning time.
99 | evaluators:
100 | - IsInstance: TimeRangeBuilderSuccess
101 | - name: Cross-year boundary
102 | inputs:
103 | prompt: Show me logs from 2024-12-31 23:59 UTC to 2025-01-01 00:15 UTC.
104 | now: "2025-01-01T12:00:00Z"
105 | expected_output:
106 | min_timestamp_with_offset: "2024-12-31T23:59:00Z"
107 | max_timestamp_with_offset: "2025-01-01T00:15:00Z"
108 | explanation:
109 | We selected the precise range you requested, crossing into the new
110 | year.
111 | evaluators:
112 | - IsInstance: TimeRangeBuilderSuccess
113 | evaluators:
114 | - LLMJudge:
115 | Ensure explanation or error_message is in second person. Provide helpful
116 | but concise feedback. Must not conflict with user question. Must not be insulting.
117 | The user is the primary audience.
118 |
--------------------------------------------------------------------------------
/2025-05-odsc/evals/datasets/time_range_v2.yaml:
--------------------------------------------------------------------------------
1 | # yaml-language-server: $schema=time_range_v2_schema.json
2 | cases:
3 | - name: Single time point
4 | inputs:
5 | prompt: I'd like logs from 2 PM on 2024-05-20.
6 | now: '2024-05-20T14:15:00Z'
7 | expected_output:
8 | min_timestamp_with_offset: '2024-05-20T14:00:00Z'
9 | max_timestamp_with_offset: '2024-05-20T14:30:00Z'
10 | explanation: You provided a single time (2 PM), so we selected a short window
11 | around that time.
12 | evaluators:
13 | - IsInstance: TimeRangeBuilderSuccess
14 | - AgentCalledTool:
15 | agent_name: time_range_agent
16 | tool_name: get_current_time
17 | - name: Ambiguous request for tomorrow
18 | inputs:
19 | prompt: Show me what's going on tomorrow.
20 | now: '2024-05-19T09:00:00Z'
21 | expected_output:
22 | min_timestamp_with_offset: '2024-05-20T00:00:00Z'
23 | max_timestamp_with_offset: '2024-05-20T23:59:59Z'
24 | explanation: We interpreted 'tomorrow' as the entire next day in UTC.
25 | evaluators:
26 | - IsInstance: TimeRangeBuilderSuccess
27 | - LLMJudge: We want the entire next day in UTC to cover all events tomorrow.
28 | - name: Future logs not available
29 | inputs:
30 | prompt: I'd like logs from next year.
31 | now: '2025-01-01T00:00:00Z'
32 | expected_output:
33 | error_message: We cannot generate a time range in the future based on the provided
34 | 'now'.
35 | evaluators:
36 | - IsInstance: TimeRangeBuilderError
37 | - name: No time reference at all
38 | inputs:
39 | prompt: Give me the logs.
40 | now: '2024-01-01T00:00:00Z'
41 | expected_output:
42 | error_message: No time references were detected in your request.
43 | evaluators:
44 | - IsInstance: TimeRangeBuilderError
45 | - name: Exact small range
46 | inputs:
47 | prompt: Could I see logs from 3:00 PM to 3:45 PM on Feb 10, 2024?
48 | now: '2024-02-10T15:30:00Z'
49 | expected_output:
50 | min_timestamp_with_offset: '2024-02-10T15:00:00Z'
51 | max_timestamp_with_offset: '2024-02-10T15:45:00Z'
52 | explanation: You specifically requested logs between 3:00 PM and 3:45 PM.
53 | evaluators:
54 | - IsInstance: TimeRangeBuilderSuccess
55 | - name: All-day request
56 | inputs:
57 | prompt: I need logs from October 25th, 2024.
58 | now: '2024-10-24T10:00:00Z'
59 | expected_output:
60 | min_timestamp_with_offset: '2024-10-25T00:00:00Z'
61 | max_timestamp_with_offset: '2024-10-25T23:59:59Z'
62 | explanation: We interpreted the request for October 25, 2024 as the entire day
63 | in UTC.
64 | evaluators:
65 | - IsInstance: TimeRangeBuilderSuccess
66 | - name: Unrecognized date format
67 | inputs:
68 | prompt: Get logs from 13/13/2024.
69 | now: '2024-10-24T10:00:00Z'
70 | expected_output:
71 | error_message: We could not recognize a valid date from your request.
72 | evaluators:
73 | - IsInstance: TimeRangeBuilderError
74 | - name: Ambiguous reference to next weekend
75 | inputs:
76 | prompt: I want logs from next weekend.
77 | now: '2024-05-16T12:00:00Z'
78 | expected_output:
79 | min_timestamp_with_offset: '2024-05-18T00:00:00Z'
80 | max_timestamp_with_offset: '2024-05-19T23:59:59Z'
81 | explanation: We interpreted 'next weekend' as Saturday and Sunday following your
82 | current date.
83 | evaluators:
84 | - IsInstance: TimeRangeBuilderSuccess
85 | - LLMJudge: We assume the user wants the entire upcoming Saturday and Sunday in
86 | UTC.
87 | - name: Last night logs
88 | inputs:
89 | prompt: Show me the logs from last night.
90 | now: '2024-08-01T09:00:00Z'
91 | expected_output:
92 | min_timestamp_with_offset: '2024-07-31T20:00:00Z'
93 | max_timestamp_with_offset: '2024-08-01T06:00:00Z'
94 | explanation: We interpreted 'last night' as 8 PM to 6 AM prior to your current
95 | morning time.
96 | evaluators:
97 | - IsInstance: TimeRangeBuilderSuccess
98 | - name: Cross-year boundary
99 | inputs:
100 | prompt: Show me logs from 2024-12-31 23:59 UTC to 2025-01-01 00:15 UTC.
101 | now: '2025-01-01T12:00:00Z'
102 | expected_output:
103 | min_timestamp_with_offset: '2024-12-31T23:59:00Z'
104 | max_timestamp_with_offset: '2025-01-01T00:15:00Z'
105 | explanation: We selected the precise range you requested, crossing into the new
106 | year.
107 | evaluators:
108 | - IsInstance: TimeRangeBuilderSuccess
109 | evaluators:
110 | - LLMJudge: Ensure explanation or error_message is in second person. Provide helpful
111 | but concise feedback. Must not conflict with user question. Must not be insulting.
112 | The user is the primary audience.
113 | - ValidateTimeRange
114 | - UserMessageIsConcise
115 |
--------------------------------------------------------------------------------
/2025-05-odsc/memory_messages.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | from collections.abc import AsyncIterator
5 | from contextlib import asynccontextmanager
6 | from typing import TYPE_CHECKING
7 |
8 | import asyncpg
9 | from pydantic_ai import Agent
10 | from pydantic_ai.messages import ModelMessage, ModelMessagesTypeAdapter
11 |
12 | # hack to get around asyncpg's poor typing support
13 | if TYPE_CHECKING:
14 | DbConn = asyncpg.Connection[asyncpg.Record]
15 | else:
16 | DbConn = asyncpg.Connection
17 |
18 |
19 | import logfire
20 |
21 | logfire.configure(service_name='mem-msgs')
22 | logfire.instrument_pydantic_ai()
23 | logfire.instrument_asyncpg()
24 |
25 |
26 | @asynccontextmanager
27 | async def db() -> AsyncIterator[DbConn]:
28 | conn = await asyncpg.connect('postgresql://postgres@localhost:5432')
29 | await conn.execute("""
30 | create table if not exists messages(
31 | id serial primary key,
32 | ts timestamp not null default now(),
33 | user_id integer not null,
34 | messages json not null
35 | )
36 | """)
37 |
38 | try:
39 | yield conn
40 | finally:
41 | await conn.close()
42 |
43 |
44 | agent = Agent(
45 | 'openai:gpt-4o',
46 | instructions='You are a helpful assistant.',
47 | )
48 |
49 |
50 | @logfire.instrument
51 | async def run_agent(prompt: str, user_id: int):
52 | async with db() as conn:
53 | with logfire.span('retrieve messages'):
54 | messages: list[ModelMessage] = []
55 | for row in await conn.fetch(
56 | 'SELECT messages FROM messages WHERE user_id = $1 order by ts', user_id
57 | ):
58 | messages += ModelMessagesTypeAdapter.validate_json(row[0])
59 |
60 | result = await agent.run(prompt, message_history=messages)
61 | print(result.output)
62 |
63 | with logfire.span('record messages'):
64 | msgs = result.new_messages_json().decode()
65 | await conn.execute(
66 | 'INSERT INTO messages(user_id, messages) VALUES($1, $2)', user_id, msgs
67 | )
68 |
69 |
70 | async def memory_messages():
71 | await run_agent('My name is Samuel.', 123)
72 |
73 | await run_agent('What is my name?', 123)
74 |
75 |
76 | if __name__ == '__main__':
77 | asyncio.run(memory_messages())
78 |
--------------------------------------------------------------------------------
/2025-05-odsc/memory_tools.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | from collections.abc import AsyncIterator
5 | from contextlib import asynccontextmanager
6 | from dataclasses import dataclass
7 | from typing import TYPE_CHECKING
8 |
9 | import asyncpg
10 | from pydantic_ai import Agent, RunContext
11 |
12 | # hack to get around asyncpg's poor typing support
13 | if TYPE_CHECKING:
14 | DbConn = asyncpg.Connection[asyncpg.Record]
15 | else:
16 | DbConn = asyncpg.Connection
17 |
18 |
19 | import logfire
20 |
21 | logfire.configure(service_name='mem-tool')
22 | logfire.instrument_pydantic_ai()
23 | logfire.instrument_asyncpg()
24 |
25 |
26 | @asynccontextmanager
27 | async def db(reset: bool = False) -> AsyncIterator[DbConn]:
28 | conn = await asyncpg.connect('postgresql://postgres@localhost:5432')
29 | if reset:
30 | await conn.execute('drop table if exists memory')
31 | await conn.execute("""
32 | create table if not exists memory(
33 | id serial primary key,
34 | user_id integer not null,
35 | value text not null,
36 | unique(user_id, value)
37 | )
38 | """)
39 |
40 | try:
41 | yield conn
42 | finally:
43 | await conn.close()
44 |
45 |
46 | @dataclass
47 | class Deps:
48 | user_id: int
49 | conn: DbConn
50 |
51 |
52 | agent = Agent(
53 | 'openai:gpt-4o',
54 | deps_type=Deps,
55 | instructions='You are a helpful assistant.',
56 | )
57 |
58 |
59 | @agent.tool
60 | async def record_memory(ctx: RunContext[Deps], value: str) -> str:
61 | """Use this tool to store information in memory."""
62 | await ctx.deps.conn.execute(
63 | 'insert into memory(user_id, value) values($1, $2) on conflict do nothing',
64 | ctx.deps.user_id,
65 | value,
66 | )
67 | return 'Value added to memory.'
68 |
69 |
70 | @agent.tool
71 | async def retrieve_memories(ctx: RunContext[Deps], memory_contains: str) -> str:
72 | """Get all memories about the user."""
73 | rows = await ctx.deps.conn.fetch(
74 | 'select value from memory where user_id = $1 and value ilike $2',
75 | ctx.deps.user_id,
76 | f'%{memory_contains}%',
77 | )
78 | return '\n'.join(row[0] for row in rows)
79 |
80 |
81 | async def memory_tools():
82 | async with db(True) as conn:
83 | deps = Deps(123, conn)
84 | result = await agent.run('My name is Samuel.', deps=deps)
85 | print(result.output)
86 |
87 | # time goes by...
88 |
89 | async with db() as conn:
90 | deps = Deps(123, conn)
91 | result = await agent.run('What is my name?', deps=deps)
92 | print(result.output)
93 |
94 |
95 | if __name__ == '__main__':
96 | asyncio.run(memory_tools())
97 |
--------------------------------------------------------------------------------
/2025-05-odsc/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "2025-04-data-council"
3 | version = "0"
4 | description = "An Opinionated Blueprint for the Future of GenAI Applications"
5 | readme = "README.md"
6 | requires-python = ">=3.13"
7 | dependencies = [
8 | "asyncpg>=0.30.0",
9 | "devtools>=0.12.2",
10 | "logfire[asyncpg,httpx]>=3.14.0",
11 | "pydantic-ai>=0.1.3",
12 | "qrcode>=8.1",
13 | ]
14 |
15 | [tool.ruff]
16 | line-length = 88
17 | target-version = "py313"
18 |
19 | [tool.ruff.lint]
20 | extend-select = ["Q", "RUF100", "C90", "UP", "I"]
21 | flake8-quotes = { inline-quotes = "single", multiline-quotes = "double" }
22 | isort = { combine-as-imports = true }
23 | mccabe = { max-complexity = 15 }
24 |
25 | [tool.ruff.lint.pydocstyle]
26 | convention = "google"
27 |
28 | [tool.ruff.format]
29 | # don't format python in docstrings, pytest-examples takes care of it
30 | docstring-code-format = false
31 | quote-style = "single"
32 |
33 | [tool.pyright]
34 | pythonVersion = "3.13"
35 | typeCheckingMode = "strict"
36 | reportUnnecessaryTypeIgnoreComment = true
37 | include = ["**/*.py"]
38 | venvPath = ".venv"
39 |
40 | [dependency-groups]
41 | dev = ["asyncpg-stubs>=0.30.1", "ruff>=0.11.6"]
42 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/.python-version:
--------------------------------------------------------------------------------
1 | 3.13
2 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/README.md:
--------------------------------------------------------------------------------
1 | # Pycon US, Pittsburgh, May 2025
2 |
3 | Slides at
4 |
5 | ## whoami
6 |
7 | **Samuel Colvin** — creator of Pydantic
8 |
9 | Pydantic:
10 | * Python library for data validation
11 | * Created Pydantic in 2017 — long before Gen AI
12 | * Now downloaded ~350M per month
13 | * Used by all of FAANG
14 | * Used by virtually every GenAI Python library — both provider SDKs and Agent Frameworks
15 |
16 | Became a company (Pydantic Labs), backed by Sequoia in 2023, released:
17 | * Pydantic Logfire (developer observability)
18 | * Pydantic AI (agent framework)
19 |
20 | **Come to our booth for Logfire demo, t-shirts, prize draw**
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 | ## what
38 |
39 | **Building AI Applications the Pydantic way**
40 |
41 | * Everything is changing
42 | * ...except when it's not: people still want to build
43 | reliable, scalable applications, and that's still hard.
44 |
45 | In this talk, we'll use **PydanticAI** & **Pydantic Logfire** to demonstrate:
46 | * How to build typesafe agents — important for production,
47 | even more important for development
48 | * The power of MCP for autonomous agents
49 | * How evals fit into the picture
50 |
51 | * The importance of tracing and observability for AI Applications
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 | ## What is an "Agent"?
67 |
68 | This has been widely covered, but still seems to be a subject of dispute, so I'll explain what I mean.
69 |
70 | From [Building effective agents](https://www.anthropic.com/engineering/building-effective-agents).
71 |
72 | 
73 |
74 | From [How We Build Effective Agents: Barry Zhang, Anthropic](https://youtu.be/D7_ipDqhtwk?&t=358)
75 |
76 | **Agents are models using tools in a loop**
77 | ```py
78 | env = Environment()
79 | tools = Tools(env)
80 | system_prompt = "Goals, constraints, and how to act"
81 |
82 | while True:
83 | action = llm.run(system_prompt + env.state)
84 | env.state = tools.run(action)
85 | ```
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 | ## Enough pseudo code, show me a real example
101 |
102 | ```py {title="simplest_agent.py"}
103 | from datetime import date
104 | from pydantic_ai import Agent
105 | from pydantic import BaseModel
106 |
107 |
108 | class Person(BaseModel):
109 | name: str
110 | dob: date
111 | city: str
112 |
113 |
114 | agent = Agent(
115 | 'openai:gpt-4o',
116 | output_type=Person,
117 | instructions='Extract information about the person',
118 | )
119 | result = agent.run_sync("Samuel lived in London and was born on Jan 28th '87")
120 | print(repr(result.output))
121 | ```
122 |
123 | This doesn't look much like a loop, but what if validation fails...
124 |
125 | ```py title="agent_retry.py"
126 | from datetime import date
127 | from pydantic_ai import Agent
128 | from pydantic import BaseModel, field_validator
129 |
130 | import logfire
131 | logfire.configure(service_name='agent-retry')
132 | logfire.instrument_pydantic_ai()
133 |
134 | class Person(BaseModel):
135 | """Definition of an historic person"""
136 | name: str
137 | dob: date
138 | city: str
139 |
140 | @field_validator('dob')
141 | def validate_dob(cls, v: date) -> date:
142 | if v >= date(1900, 1, 1):
143 | raise ValueError('The person must be born in the 19th century')
144 | return v
145 |
146 |
147 | agent = Agent(
148 | 'google-vertex:gemini-2.0-flash',
149 | output_type=Person,
150 | instructions='Extract information about the person',
151 | )
152 | result = agent.run_sync("Samuel lived in London and was born on Jan 28th '87")
153 | print(repr(result.output))
154 | ```
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 | ## But don't Agents need tools?
170 |
171 | Here we demonstrate tools, dependencies and type safety with a tool used to record memories.
172 |
173 | ```py {title="memory_tools.py"}
174 | ...
175 | agent = Agent(
176 | 'openai:gpt-4o',
177 | deps_type=Deps,
178 | instructions='You are a helpful assistant.',
179 | )
180 |
181 |
182 | @agent.tool
183 | async def record_memory(ctx: RunContext[Deps], value: str) -> str:
184 | """Use this tool to store information in memory."""
185 | await ctx.deps.conn.execute(
186 | 'insert into memory(user_id, value) values($1, $2) on conflict do nothing', ctx.deps.user_id, value
187 | )
188 | return 'Value added to memory.'
189 |
190 |
191 | @agent.tool
192 | async def retrieve_memories(ctx: RunContext[Deps]) -> str:
193 | """Get all memories about the user."""
194 | rows = await ctx.deps.conn.fetch('select value from memory where user_id = $1', ctx.deps.user_id)
195 | return '\n'.join(row[0] for row in rows)
196 | ...
197 | ```
198 |
199 | We can also achieve memory by persisting message history:
200 |
201 | ```py {title="memory_messages.py"}
202 | async def run_agent(prompt: str, user_id: int):
203 | async with db() as conn:
204 | with logfire.span('retrieve messages'):
205 | messages: list[ModelMessage] = []
206 | for row in await conn.fetch('SELECT messages FROM messages WHERE user_id = $1 order by ts', user_id):
207 | messages += ModelMessagesTypeAdapter.validate_json(row[0])
208 |
209 | result = await agent.run(prompt, message_history=messages)
210 | print(result.output)
211 |
212 | with logfire.span('record messages'):
213 | msgs = result.new_messages_json().decode()
214 | await conn.execute('INSERT INTO messages(user_id, messages) VALUES($1, $2)', user_id, msgs)
215 | ```
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 | ## MCP
228 |
229 | Tools, are great. Model Context Protocol gives us lots of tools and LOTS more.
230 |
231 | ```py {title="mcp-run-python"}
232 | import asyncio
233 |
234 | from pydantic_ai import Agent
235 | from pydantic_ai.mcp import MCPServerStdio
236 |
237 | import logfire
238 | logfire.configure(service_name='mcp-run-python')
239 | logfire.instrument_pydantic_ai()
240 | logfire.instrument_mcp()
241 |
242 | server = MCPServerStdio(
243 | 'deno',
244 | args=[
245 | 'run',
246 | '-N',
247 | '-R=node_modules',
248 | '-W=node_modules',
249 | '--node-modules-dir=auto',
250 | 'jsr:@pydantic/mcp-run-python',
251 | 'stdio',
252 | ]
253 | )
254 | agent = Agent('openai:gpt-4o', mcp_servers=[server])
255 |
256 |
257 | async def main():
258 | async with agent.run_mcp_servers():
259 | result = await agent.run('How many days between 2000-01-01 and 2025-03-18?')
260 | print(result.output)
261 |
262 | if __name__ == '__main__':
263 | asyncio.run(main())
264 | ```
265 |
266 | But there are lots of powerful MCP servers, including browser control:
267 |
268 | ```py {title="browser_mcp.py"}
269 | ...
270 |
271 | async def main():
272 | async with agent.run_mcp_servers():
273 | result = await agent.run(
274 | 'get the most recent blog post from pydantic.dev '
275 | 'which should contain multiple announcements, '
276 | 'summaries those annoucements as a list.'
277 | )
278 | print(result.output)
279 |
280 | ...
281 | ```
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 | ## Evals
300 |
301 | Let's switch to evals, time to dive into some code.
302 |
303 |
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 |
319 |
320 |
321 |
322 | ## PydanticAI is unfinished, what's next?
323 |
324 | * Structured outputs without tools
325 | * end an agent run from within a tool
326 | * MCP sampling — client support
327 | * MCP sampling — server support
328 | * allow more control of how MCP tools are registered with the model, and run
329 | * `mcp-run-python` — calling back to the client/host
330 | * Graph changes
331 |
332 | Most importantly:
333 |
334 | * **We need stability — planning to release V1 in June**
335 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/agent-loop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2025-05-pycon-us/agent-loop.png
--------------------------------------------------------------------------------
/2025-05-pycon-us/browser_mcp.py:
--------------------------------------------------------------------------------
1 | import logfire
2 | from pydantic_ai import Agent
3 | from pydantic_ai.mcp import MCPServerStdio
4 |
5 | logfire.configure(scrubbing=False, service_name='browser')
6 | logfire.instrument_mcp()
7 | logfire.instrument_pydantic_ai()
8 |
9 | browser_mcp = MCPServerStdio('npx', args=['-Y', '@playwright/mcp@latest'])
10 |
11 | agent = Agent(
12 | 'anthropic:claude-3-7-sonnet-latest',
13 | mcp_servers=[browser_mcp],
14 | )
15 |
16 |
17 | async def main():
18 | async with agent.run_mcp_servers():
19 | result = await agent.run(
20 | 'get the most recent blog post from pydantic.dev '
21 | 'which should contain multiple announcements, '
22 | 'summaries those annoucements as a list.'
23 | )
24 | print(result.output)
25 |
26 |
27 | if __name__ == '__main__':
28 | import asyncio
29 |
30 | asyncio.run(main())
31 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/evals/01_generate_dataset.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from pathlib import Path
3 | from types import NoneType
4 |
5 | from pydantic_evals import Dataset
6 | from pydantic_evals.generation import generate_dataset
7 |
8 | from agent import TimeRangeInputs, TimeRangeResponse
9 |
10 | import logfire
11 |
12 | logfire.configure()
13 | logfire.instrument_pydantic_ai()
14 |
15 |
16 | async def main():
17 | dataset = await generate_dataset(
18 | dataset_type=Dataset[TimeRangeInputs, TimeRangeResponse, NoneType],
19 | model='openai:o1', # Use a smarter model since this is a more complex task that is only run once
20 | n_examples=10,
21 | extra_instructions="""
22 | Generate a dataset of test cases for the time range inference agent.
23 |
24 | Include a variety of inputs that might be given to the agent, including some where the only
25 | reasonable response is a `TimeRangeBuilderError`, and some where a `TimeRangeBuilderSuccess` is
26 | expected. Make use of the `IsInstance` evaluator to ensure that the inputs and outputs are of the appropriate
27 | type.
28 |
29 | When appropriate, use the `LLMJudge` evaluator to provide a more precise description of the time range the
30 | agent should have inferred. In particular, it's good if the example user inputs are somewhat ambiguous, to
31 | reflect realistic (difficult-to-handle) user questions, but the LLMJudge evaluator can help ensure that the
32 | agent's output is still judged based on precisely what the desired behavior is even for somewhat ambiguous
33 | user questions. You do not need to include LLMJudge evaluations for all cases (in particular, for cases where
34 | the expected output is unambiguous from the user's question), but you should include at least one or two
35 | examples that do benefit from an LLMJudge evaluation (and include it).
36 |
37 | To be clear, the LLMJudge rubrics should be concise and reflect only information that is NOT ALREADY PRESENT
38 | in the user prompt for the example.
39 |
40 | Leave the model and include_input arguments to LLMJudge as their default values (null).
41 |
42 | Also add a dataset-wide LLMJudge evaluator to ensure that the 'explanation' or 'error_message' fields are
43 | appropriate to be displayed to the user (e.g., written in second person, etc.).
44 | """,
45 | )
46 |
47 | dataset.to_file(
48 | Path(__file__).parent / 'datasets' / 'time_range_v1.yaml',
49 | fmt='yaml',
50 | )
51 |
52 |
53 | if __name__ == '__main__':
54 | asyncio.run(main())
55 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/evals/02_add_custom_evaluators.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from types import NoneType
3 |
4 | from pydantic_evals import Dataset
5 |
6 | from custom_evaluators import (
7 | CUSTOM_EVALUATOR_TYPES,
8 | AgentCalledTool,
9 | UserMessageIsConcise,
10 | ValidateTimeRange,
11 | )
12 | from agent import TimeRangeInputs, TimeRangeResponse
13 |
14 |
15 | def main():
16 | dataset_path = Path(__file__).parent / 'datasets' / 'time_range_v1.yaml'
17 | dataset = Dataset[TimeRangeInputs, TimeRangeResponse, NoneType].from_file(
18 | dataset_path
19 | )
20 | dataset.add_evaluator(ValidateTimeRange())
21 | dataset.add_evaluator(UserMessageIsConcise())
22 | dataset.add_evaluator(
23 | AgentCalledTool('time_range_agent', 'get_current_time'),
24 | specific_case='Single time point',
25 | )
26 | dataset.to_file(
27 | Path(__file__).parent / 'datasets' / 'time_range_v2.yaml',
28 | custom_evaluator_types=CUSTOM_EVALUATOR_TYPES,
29 | )
30 |
31 |
32 | if __name__ == '__main__':
33 | main()
34 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/evals/03_unit_testing.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from types import NoneType
3 |
4 | import logfire
5 | from agent import TimeRangeInputs, TimeRangeResponse, infer_time_range
6 | from custom_evaluators import CUSTOM_EVALUATOR_TYPES
7 | from pydantic_evals import Dataset
8 |
9 | logfire.configure(
10 | environment='development', service_name='evals', service_version='0.0.1'
11 | )
12 | logfire.instrument_pydantic_ai()
13 |
14 | dataset_path = Path(__file__).parent / 'datasets' / 'time_range_v2.yaml'
15 | dataset = Dataset[TimeRangeInputs, TimeRangeResponse, NoneType].from_file(
16 | dataset_path, custom_evaluator_types=CUSTOM_EVALUATOR_TYPES
17 | )
18 | report = dataset.evaluate_sync(infer_time_range)
19 | print(report)
20 |
21 | assertion_pass_rate = report.averages().assertions
22 | assert assertion_pass_rate is not None, 'There should be at least one assertion'
23 | assert assertion_pass_rate > 0.8, (
24 | f'The assertion pass rate was {assertion_pass_rate:.1%}; it should be above 80%.'
25 | )
26 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/evals/04_compare_models.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from types import NoneType
3 |
4 | import logfire
5 | from pydantic_evals import Dataset
6 |
7 | from custom_evaluators import CUSTOM_EVALUATOR_TYPES
8 | from agent import time_range_agent, infer_time_range, TimeRangeInputs, TimeRangeResponse
9 |
10 | logfire.configure(environment='development', service_name='evals')
11 | logfire.instrument_pydantic_ai()
12 |
13 | dataset_path = Path(__file__).parent / 'datasets' / 'time_range_v2.yaml'
14 | dataset = Dataset[TimeRangeInputs, TimeRangeResponse, NoneType].from_file(
15 | dataset_path, custom_evaluator_types=CUSTOM_EVALUATOR_TYPES
16 | )
17 | with logfire.span('Comparing different models for time_range_agent'):
18 | with time_range_agent.override(model='openai:gpt-4o'):
19 | dataset.evaluate_sync(infer_time_range, name='openai:gpt-4o')
20 | with time_range_agent.override(model='anthropic:claude-3-7-sonnet-latest'):
21 | dataset.evaluate_sync(
22 | infer_time_range, name='anthropic:claude-3-7-sonnet-latest'
23 | )
24 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/evals/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2025-05-pycon-us/evals/__init__.py
--------------------------------------------------------------------------------
/2025-05-pycon-us/evals/agent.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations as _annotations
2 |
3 | from dataclasses import dataclass, field
4 | from datetime import datetime
5 |
6 | from devtools import debug
7 | from pydantic import AwareDatetime, BaseModel
8 | from pydantic_ai import Agent, RunContext
9 | from typing_extensions import TypedDict
10 |
11 |
12 | class TimeRangeBuilderSuccess(BaseModel, use_attribute_docstrings=True):
13 | """Response when a time range could be successfully generated."""
14 |
15 | min_timestamp_with_offset: AwareDatetime
16 | """A datetime in ISO format with timezone offset."""
17 | max_timestamp_with_offset: AwareDatetime
18 | """A datetime in ISO format with timezone offset."""
19 | explanation: str | None
20 | """
21 | A brief explanation of the time range that was selected.
22 |
23 | For example, if a user only mentions a specific point in time, you might explain that you selected a 10 minute
24 | window around that time.
25 | """
26 |
27 | def __str__(self):
28 | lines = [
29 | 'TimeRangeBuilderSuccess:',
30 | f'* min_timestamp_with_offset: {self.min_timestamp_with_offset:%A, %B %d, %Y %H:%M:%S %Z}',
31 | f'* max_timestamp_with_offset: {self.max_timestamp_with_offset:%A, %B %d, %Y %H:%M:%S %Z}',
32 | ]
33 | if self.explanation is not None:
34 | lines.append(f'* explanation: {self.explanation}')
35 | return '\n'.join(lines)
36 |
37 |
38 | class TimeRangeBuilderError(BaseModel):
39 | """Response when a time range cannot not be generated."""
40 |
41 | error_message: str
42 |
43 | def __str__(self):
44 | return f'TimeRangeBuilderError:\n* {self.error_message}'
45 |
46 |
47 | TimeRangeResponse = TimeRangeBuilderSuccess | TimeRangeBuilderError
48 |
49 |
50 | class TimeRangeInputs(TypedDict):
51 | """The inputs for the time range inference agent."""
52 |
53 | prompt: str
54 | now: AwareDatetime
55 |
56 |
57 | @dataclass
58 | class TimeRangeDeps:
59 | now: datetime = field(default_factory=lambda: datetime.now().astimezone())
60 |
61 |
62 | time_range_agent = Agent[TimeRangeDeps, TimeRangeResponse](
63 | 'openai:gpt-4o',
64 | output_type=TimeRangeResponse, # type: ignore # we can't yet annotate something as receiving a TypeForm
65 | deps_type=TimeRangeDeps,
66 | system_prompt='Convert the user request into a structured time range.',
67 | retries=1,
68 | instrument=True,
69 | )
70 |
71 |
72 | @time_range_agent.tool
73 | def get_current_time(ctx: RunContext[TimeRangeDeps]) -> str:
74 | """Get the user's current time and timezone in the format 'Friday, November 22, 2024 11:15:14 PST'."""
75 | return f"The user's current time is {ctx.deps.now:%A, %B %d, %Y %H:%M:%S %Z}."
76 |
77 |
78 | async def infer_time_range(inputs: TimeRangeInputs) -> TimeRangeResponse:
79 | """Infer a time range from a user prompt."""
80 | deps = TimeRangeDeps(now=inputs['now'])
81 | return (await time_range_agent.run(inputs['prompt'], deps=deps)).output
82 |
83 |
84 | if __name__ == '__main__':
85 | import asyncio
86 |
87 | response = asyncio.run(
88 | infer_time_range(
89 | {'prompt': '2pm yesterday', 'now': datetime.now().astimezone()}
90 | )
91 | )
92 |
93 | debug(response)
94 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/evals/custom_evaluators.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from datetime import timedelta
3 |
4 | from agent import TimeRangeBuilderSuccess, TimeRangeInputs, TimeRangeResponse
5 | from pydantic_evals.evaluators import Evaluator, EvaluatorContext, EvaluatorOutput
6 | from pydantic_evals.otel import SpanQuery
7 |
8 |
9 | @dataclass
10 | class ValidateTimeRange(Evaluator[TimeRangeInputs, TimeRangeResponse]):
11 | def evaluate(
12 | self, ctx: EvaluatorContext[TimeRangeInputs, TimeRangeResponse]
13 | ) -> EvaluatorOutput:
14 | if isinstance(ctx.output, TimeRangeBuilderSuccess):
15 | window_end = ctx.output.max_timestamp_with_offset
16 | window_size = window_end - ctx.output.min_timestamp_with_offset
17 | return {
18 | 'window_is_not_too_long': window_size <= timedelta(days=30),
19 | 'window_is_not_in_the_future': window_end <= ctx.inputs['now'],
20 | }
21 |
22 | return {} # No evaluation needed for errors
23 |
24 |
25 | @dataclass
26 | class UserMessageIsConcise(Evaluator[TimeRangeInputs, TimeRangeResponse]):
27 | async def evaluate(
28 | self, ctx: EvaluatorContext[TimeRangeInputs, TimeRangeResponse]
29 | ) -> EvaluatorOutput:
30 | if isinstance(ctx.output, TimeRangeBuilderSuccess):
31 | user_facing_message = ctx.output.explanation
32 | else:
33 | user_facing_message = ctx.output.error_message
34 |
35 | if user_facing_message is not None:
36 | return len(user_facing_message.split()) < 50
37 | else:
38 | return {}
39 |
40 |
41 | @dataclass
42 | class AgentCalledTool(Evaluator):
43 | agent_name: str
44 | tool_name: str
45 |
46 | def evaluate(self, ctx: EvaluatorContext) -> bool:
47 | return ctx.span_tree.any(
48 | SpanQuery(
49 | name_equals='agent run',
50 | has_attributes={'agent_name': self.agent_name},
51 | stop_recursing_when=SpanQuery(name_equals='agent run'),
52 | some_descendant_has=SpanQuery(
53 | name_equals='running tool',
54 | has_attributes={'gen_ai.tool.name': self.tool_name},
55 | ),
56 | )
57 | )
58 |
59 |
60 | CUSTOM_EVALUATOR_TYPES = ValidateTimeRange, UserMessageIsConcise, AgentCalledTool
61 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/evals/datasets/time_range_v1.yaml:
--------------------------------------------------------------------------------
1 | # yaml-language-server: $schema=time_range_v1_schema.json
2 | cases:
3 | - name: Single time point
4 | inputs:
5 | prompt: I'd like logs from 2 PM on 2024-05-20.
6 | now: "2024-05-20T14:15:00Z"
7 | expected_output:
8 | min_timestamp_with_offset: "2024-05-20T14:00:00Z"
9 | max_timestamp_with_offset: "2024-05-20T14:30:00Z"
10 | explanation:
11 | You provided a single time (2 PM), so we selected a short window
12 | around that time.
13 | evaluators:
14 | - IsInstance: TimeRangeBuilderSuccess
15 | - name: Ambiguous request for tomorrow
16 | inputs:
17 | prompt: Show me what's going on tomorrow.
18 | now: "2024-05-19T09:00:00Z"
19 | expected_output:
20 | min_timestamp_with_offset: "2024-05-20T00:00:00Z"
21 | max_timestamp_with_offset: "2024-05-20T23:59:59Z"
22 | explanation: We interpreted 'tomorrow' as the entire next day in UTC.
23 | evaluators:
24 | - IsInstance: TimeRangeBuilderSuccess
25 | - LLMJudge: We want the entire next day in UTC to cover all events tomorrow.
26 | - name: Future logs not available
27 | inputs:
28 | prompt: I'd like logs from next year.
29 | now: "2025-01-01T00:00:00Z"
30 | expected_output:
31 | error_message:
32 | We cannot generate a time range in the future based on the provided
33 | 'now'.
34 | evaluators:
35 | - IsInstance: TimeRangeBuilderError
36 | - name: No time reference at all
37 | inputs:
38 | prompt: Give me the logs.
39 | now: "2024-01-01T00:00:00Z"
40 | expected_output:
41 | error_message: No time references were detected in your request.
42 | evaluators:
43 | - IsInstance: TimeRangeBuilderError
44 | - name: Exact small range
45 | inputs:
46 | prompt: Could I see logs from 3:00 PM to 3:45 PM on Feb 10, 2024?
47 | now: "2024-02-10T15:30:00Z"
48 | expected_output:
49 | min_timestamp_with_offset: "2024-02-10T15:00:00Z"
50 | max_timestamp_with_offset: "2024-02-10T15:45:00Z"
51 | explanation: You specifically requested logs between 3:00 PM and 3:45 PM.
52 | evaluators:
53 | - IsInstance: TimeRangeBuilderSuccess
54 | - name: All-day request
55 | inputs:
56 | prompt: I need logs from October 25th, 2024.
57 | now: "2024-10-24T10:00:00Z"
58 | expected_output:
59 | min_timestamp_with_offset: "2024-10-25T00:00:00Z"
60 | max_timestamp_with_offset: "2024-10-25T23:59:59Z"
61 | explanation:
62 | We interpreted the request for October 25, 2024 as the entire day
63 | in UTC.
64 | evaluators:
65 | - IsInstance: TimeRangeBuilderSuccess
66 | - name: Unrecognized date format
67 | inputs:
68 | prompt: Get logs from 13/13/2024.
69 | now: "2024-10-24T10:00:00Z"
70 | expected_output:
71 | error_message: We could not recognize a valid date from your request.
72 | evaluators:
73 | - IsInstance: TimeRangeBuilderError
74 | - name: Ambiguous reference to next weekend
75 | inputs:
76 | prompt: I want logs from next weekend.
77 | now: "2024-05-16T12:00:00Z"
78 | expected_output:
79 | min_timestamp_with_offset: "2024-05-18T00:00:00Z"
80 | max_timestamp_with_offset: "2024-05-19T23:59:59Z"
81 | explanation:
82 | We interpreted 'next weekend' as Saturday and Sunday following your
83 | current date.
84 | evaluators:
85 | - IsInstance: TimeRangeBuilderSuccess
86 | - LLMJudge:
87 | We assume the user wants the entire upcoming Saturday and Sunday in
88 | UTC.
89 | - name: Last night logs
90 | inputs:
91 | prompt: Show me the logs from last night.
92 | now: "2024-08-01T09:00:00Z"
93 | expected_output:
94 | min_timestamp_with_offset: "2024-07-31T20:00:00Z"
95 | max_timestamp_with_offset: "2024-08-01T06:00:00Z"
96 | explanation:
97 | We interpreted 'last night' as 8 PM to 6 AM prior to your current
98 | morning time.
99 | evaluators:
100 | - IsInstance: TimeRangeBuilderSuccess
101 | - name: Cross-year boundary
102 | inputs:
103 | prompt: Show me logs from 2024-12-31 23:59 UTC to 2025-01-01 00:15 UTC.
104 | now: "2025-01-01T12:00:00Z"
105 | expected_output:
106 | min_timestamp_with_offset: "2024-12-31T23:59:00Z"
107 | max_timestamp_with_offset: "2025-01-01T00:15:00Z"
108 | explanation:
109 | We selected the precise range you requested, crossing into the new
110 | year.
111 | evaluators:
112 | - IsInstance: TimeRangeBuilderSuccess
113 | evaluators:
114 | - LLMJudge:
115 | Ensure explanation or error_message is in second person. Provide helpful
116 | but concise feedback. Must not conflict with user question. Must not be insulting.
117 | The user is the primary audience.
118 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/evals/datasets/time_range_v2.yaml:
--------------------------------------------------------------------------------
1 | # yaml-language-server: $schema=time_range_v2_schema.json
2 | cases:
3 | - name: Single time point
4 | inputs:
5 | prompt: I'd like logs from 2 PM on 2024-05-20.
6 | now: '2024-05-20T14:15:00Z'
7 | expected_output:
8 | min_timestamp_with_offset: '2024-05-20T14:00:00Z'
9 | max_timestamp_with_offset: '2024-05-20T14:30:00Z'
10 | explanation: You provided a single time (2 PM), so we selected a short window
11 | around that time.
12 | evaluators:
13 | - IsInstance: TimeRangeBuilderSuccess
14 | - AgentCalledTool:
15 | agent_name: time_range_agent
16 | tool_name: get_current_time
17 | - name: Ambiguous request for tomorrow
18 | inputs:
19 | prompt: Show me what's going on tomorrow.
20 | now: '2024-05-19T09:00:00Z'
21 | expected_output:
22 | min_timestamp_with_offset: '2024-05-20T00:00:00Z'
23 | max_timestamp_with_offset: '2024-05-20T23:59:59Z'
24 | explanation: We interpreted 'tomorrow' as the entire next day in UTC.
25 | evaluators:
26 | - IsInstance: TimeRangeBuilderSuccess
27 | - LLMJudge: We want the entire next day in UTC to cover all events tomorrow.
28 | - name: Future logs not available
29 | inputs:
30 | prompt: I'd like logs from next year.
31 | now: '2025-01-01T00:00:00Z'
32 | expected_output:
33 | error_message: We cannot generate a time range in the future based on the provided
34 | 'now'.
35 | evaluators:
36 | - IsInstance: TimeRangeBuilderError
37 | - name: No time reference at all
38 | inputs:
39 | prompt: Give me the logs.
40 | now: '2024-01-01T00:00:00Z'
41 | expected_output:
42 | error_message: No time references were detected in your request.
43 | evaluators:
44 | - IsInstance: TimeRangeBuilderError
45 | - name: Exact small range
46 | inputs:
47 | prompt: Could I see logs from 3:00 PM to 3:45 PM on Feb 10, 2024?
48 | now: '2024-02-10T15:30:00Z'
49 | expected_output:
50 | min_timestamp_with_offset: '2024-02-10T15:00:00Z'
51 | max_timestamp_with_offset: '2024-02-10T15:45:00Z'
52 | explanation: You specifically requested logs between 3:00 PM and 3:45 PM.
53 | evaluators:
54 | - IsInstance: TimeRangeBuilderSuccess
55 | - name: All-day request
56 | inputs:
57 | prompt: I need logs from October 25th, 2024.
58 | now: '2024-10-24T10:00:00Z'
59 | expected_output:
60 | min_timestamp_with_offset: '2024-10-25T00:00:00Z'
61 | max_timestamp_with_offset: '2024-10-25T23:59:59Z'
62 | explanation: We interpreted the request for October 25, 2024 as the entire day
63 | in UTC.
64 | evaluators:
65 | - IsInstance: TimeRangeBuilderSuccess
66 | - name: Unrecognized date format
67 | inputs:
68 | prompt: Get logs from 13/13/2024.
69 | now: '2024-10-24T10:00:00Z'
70 | expected_output:
71 | error_message: We could not recognize a valid date from your request.
72 | evaluators:
73 | - IsInstance: TimeRangeBuilderError
74 | - name: Ambiguous reference to next weekend
75 | inputs:
76 | prompt: I want logs from next weekend.
77 | now: '2024-05-16T12:00:00Z'
78 | expected_output:
79 | min_timestamp_with_offset: '2024-05-18T00:00:00Z'
80 | max_timestamp_with_offset: '2024-05-19T23:59:59Z'
81 | explanation: We interpreted 'next weekend' as Saturday and Sunday following your
82 | current date.
83 | evaluators:
84 | - IsInstance: TimeRangeBuilderSuccess
85 | - LLMJudge: We assume the user wants the entire upcoming Saturday and Sunday in
86 | UTC.
87 | - name: Last night logs
88 | inputs:
89 | prompt: Show me the logs from last night.
90 | now: '2024-08-01T09:00:00Z'
91 | expected_output:
92 | min_timestamp_with_offset: '2024-07-31T20:00:00Z'
93 | max_timestamp_with_offset: '2024-08-01T06:00:00Z'
94 | explanation: We interpreted 'last night' as 8 PM to 6 AM prior to your current
95 | morning time.
96 | evaluators:
97 | - IsInstance: TimeRangeBuilderSuccess
98 | - name: Cross-year boundary
99 | inputs:
100 | prompt: Show me logs from 2024-12-31 23:59 UTC to 2025-01-01 00:15 UTC.
101 | now: '2025-01-01T12:00:00Z'
102 | expected_output:
103 | min_timestamp_with_offset: '2024-12-31T23:59:00Z'
104 | max_timestamp_with_offset: '2025-01-01T00:15:00Z'
105 | explanation: We selected the precise range you requested, crossing into the new
106 | year.
107 | evaluators:
108 | - IsInstance: TimeRangeBuilderSuccess
109 | evaluators:
110 | - LLMJudge: Ensure explanation or error_message is in second person. Provide helpful
111 | but concise feedback. Must not conflict with user question. Must not be insulting.
112 | The user is the primary audience.
113 | - ValidateTimeRange
114 | - UserMessageIsConcise
115 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/memory_messages.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | from collections.abc import AsyncIterator
5 | from contextlib import asynccontextmanager
6 | from typing import TYPE_CHECKING
7 |
8 | import asyncpg
9 | from pydantic_ai import Agent
10 | from pydantic_ai.messages import ModelMessage, ModelMessagesTypeAdapter
11 |
12 | # hack to get around asyncpg's poor typing support
13 | if TYPE_CHECKING:
14 | DbConn = asyncpg.Connection[asyncpg.Record]
15 | else:
16 | DbConn = asyncpg.Connection
17 |
18 |
19 | import logfire
20 |
21 | logfire.configure(service_name='mem-msgs')
22 | logfire.instrument_pydantic_ai()
23 | logfire.instrument_asyncpg()
24 |
25 |
26 | @asynccontextmanager
27 | async def db() -> AsyncIterator[DbConn]:
28 | conn = await asyncpg.connect('postgresql://postgres@localhost:5432')
29 | await conn.execute("""
30 | create table if not exists messages(
31 | id serial primary key,
32 | ts timestamp not null default now(),
33 | user_id integer not null,
34 | messages json not null
35 | )
36 | """)
37 |
38 | try:
39 | yield conn
40 | finally:
41 | await conn.close()
42 |
43 |
44 | agent = Agent(
45 | 'openai:gpt-4o',
46 | instructions='You are a helpful assistant.',
47 | )
48 |
49 |
50 | @logfire.instrument
51 | async def run_agent(prompt: str, user_id: int):
52 | async with db() as conn:
53 | with logfire.span('retrieve messages'):
54 | messages: list[ModelMessage] = []
55 | for row in await conn.fetch(
56 | 'SELECT messages FROM messages WHERE user_id = $1 order by ts', user_id
57 | ):
58 | messages += ModelMessagesTypeAdapter.validate_json(row[0])
59 |
60 | result = await agent.run(prompt, message_history=messages)
61 | print(result.output)
62 |
63 | with logfire.span('record messages'):
64 | msgs = result.new_messages_json().decode()
65 | await conn.execute(
66 | 'INSERT INTO messages(user_id, messages) VALUES($1, $2)', user_id, msgs
67 | )
68 |
69 |
70 | async def memory_messages():
71 | await run_agent('My name is Samuel.', 123)
72 |
73 | await run_agent('What is my name?', 123)
74 |
75 |
76 | if __name__ == '__main__':
77 | asyncio.run(memory_messages())
78 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/memory_tools.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | from collections.abc import AsyncIterator
5 | from contextlib import asynccontextmanager
6 | from dataclasses import dataclass
7 | from typing import TYPE_CHECKING
8 |
9 | import asyncpg
10 | from pydantic_ai import Agent, RunContext
11 |
12 | # hack to get around asyncpg's poor typing support
13 | if TYPE_CHECKING:
14 | DbConn = asyncpg.Connection[asyncpg.Record]
15 | else:
16 | DbConn = asyncpg.Connection
17 |
18 |
19 | import logfire
20 |
21 | logfire.configure(service_name='mem-tool')
22 | logfire.instrument_pydantic_ai()
23 | logfire.instrument_asyncpg()
24 |
25 |
26 | @asynccontextmanager
27 | async def db(reset: bool = False) -> AsyncIterator[DbConn]:
28 | conn = await asyncpg.connect('postgresql://postgres@localhost:5432')
29 | if reset:
30 | await conn.execute('drop table if exists memory')
31 | await conn.execute("""
32 | create table if not exists memory(
33 | id serial primary key,
34 | user_id integer not null,
35 | value text not null,
36 | unique(user_id, value)
37 | )
38 | """)
39 |
40 | try:
41 | yield conn
42 | finally:
43 | await conn.close()
44 |
45 |
46 | @dataclass
47 | class Deps:
48 | user_id: int
49 | conn: DbConn
50 |
51 |
52 | agent = Agent(
53 | 'openai:gpt-4o',
54 | deps_type=Deps,
55 | instructions='You are a helpful assistant.',
56 | )
57 |
58 |
59 | @agent.tool
60 | async def record_memory(ctx: RunContext[Deps], value: str) -> str:
61 | """Use this tool to store information in memory."""
62 | await ctx.deps.conn.execute(
63 | 'insert into memory(user_id, value) values($1, $2) on conflict do nothing',
64 | ctx.deps.user_id,
65 | value,
66 | )
67 | return 'Value added to memory.'
68 |
69 |
70 | @agent.tool
71 | async def retrieve_memories(ctx: RunContext[Deps], memory_contains: str) -> str:
72 | """Get all memories about the user."""
73 | rows = await ctx.deps.conn.fetch(
74 | 'select value from memory where user_id = $1 and value ilike $2',
75 | ctx.deps.user_id,
76 | f'%{memory_contains}%',
77 | )
78 | return '\n'.join(row[0] for row in rows)
79 |
80 |
81 | async def memory_tools():
82 | async with db(True) as conn:
83 | deps = Deps(123, conn)
84 | result = await agent.run('My name is Samuel.', deps=deps)
85 | print(result.output)
86 |
87 | # time goes by...
88 |
89 | async with db() as conn:
90 | deps = Deps(123, conn)
91 | result = await agent.run('What is my name?', deps=deps)
92 | print(result.output)
93 |
94 |
95 | if __name__ == '__main__':
96 | asyncio.run(memory_tools())
97 |
--------------------------------------------------------------------------------
/2025-05-pycon-us/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "2025-04-data-council"
3 | version = "0"
4 | description = "An Opinionated Blueprint for the Future of GenAI Applications"
5 | readme = "README.md"
6 | requires-python = ">=3.13"
7 | dependencies = [
8 | "asyncpg>=0.30.0",
9 | "devtools>=0.12.2",
10 | "logfire[asyncpg,httpx]>=3.14.0",
11 | "pydantic-ai>=0.1.3",
12 | "qrcode>=8.1",
13 | ]
14 |
15 | [tool.ruff]
16 | line-length = 88
17 | target-version = "py313"
18 |
19 | [tool.ruff.lint]
20 | extend-select = ["Q", "RUF100", "C90", "UP", "I"]
21 | flake8-quotes = { inline-quotes = "single", multiline-quotes = "double" }
22 | isort = { combine-as-imports = true }
23 | mccabe = { max-complexity = 15 }
24 |
25 | [tool.ruff.lint.pydocstyle]
26 | convention = "google"
27 |
28 | [tool.ruff.format]
29 | # don't format python in docstrings, pytest-examples takes care of it
30 | docstring-code-format = false
31 | quote-style = "single"
32 |
33 | [tool.pyright]
34 | pythonVersion = "3.13"
35 | typeCheckingMode = "strict"
36 | reportUnnecessaryTypeIgnoreComment = true
37 | include = ["**/*.py"]
38 | venvPath = ".venv"
39 |
40 | [dependency-groups]
41 | dev = ["asyncpg-stubs>=0.30.1", "ruff>=0.11.6"]
42 |
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/.gitignore:
--------------------------------------------------------------------------------
1 | slides.html
2 |
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/.python-version:
--------------------------------------------------------------------------------
1 | 3.13
2 |
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/README.md:
--------------------------------------------------------------------------------
1 | # MCP is all you need
2 |
3 | ### AI Engineer, San Fransico, June 2025
4 |
5 | Slides at [github.com/pydantic/talks](https://github.com/pydantic/talks).
6 |
7 | ---
8 |
9 | ## whoami
10 |
11 | **Samuel Colvin** — creator of Pydantic
12 |
13 | Pydantic:
14 | * Python library for data validation
15 | * Created Pydantic in 2017 — long before Gen AI
16 | * Now downloaded ~350M per month
17 | * Used by all of FAANG
18 | * Used by virtually every GenAI Python library — both provider SDKs and Agent Frameworks
19 |
20 | Became a company (Pydantic Labs), backed by Sequoia in 2023, released:
21 | * Pydantic Logfire (developer observability)
22 | * Pydantic AI (agent framework)
23 |
24 | **Come to our booth for Logfire demo, t-shirts, etc.**
25 |
26 | _(I'm also a co-maintainer of the MCP python SDK)_
27 |
28 | ---
29 |
30 | # what
31 |
32 | **MCP is all you need**
33 |
34 | * Inspired by Jason Liu's talk "Pydantic is all you need" and "Pydantic is still all you need"
35 | * Same idea: that lots of people are over complicating things
36 | * Same unrealistic title — no one is seriously claiming MCP can do everything
37 |
38 | What I am saying is:
39 |
40 | **MCP can do a lot of multi-agent communications - specifically autonomous agents.**
41 |
42 | ---
43 |
44 | # how
45 |
46 | MCP was not (primarily) designed for multi-agent communication.
47 |
48 | So two of its primatives (prompts and resources) probably aren't necessary.
49 |
50 | But tool calling, (the third primative) absolutely is!
51 |
52 | And tools are lot more complex than you might at first assume:
53 | * dynamic tools
54 | * logging
55 | * sampling
56 | * tracing
57 |
58 | ---
59 |
60 | # MCP for multi-agent communication
61 |
62 | The way most people describe it:
63 |
64 | 
65 |
66 | ---
67 |
68 | # MCP for multi-agent communication
69 |
70 | But what if servers can be clients:
71 |
72 | 
73 |
74 | ... but there's a problem.
75 |
76 | ---
77 |
78 | # sampling
79 |
80 | Give MCP servers the ability to make requests to LLMs via the client.
81 |
82 | (Powerful feature of MCP, but not widely supported*)
83 |
84 |
85 | ```mermaid
86 | sequenceDiagram
87 | participant LLM
88 | participant MCP_Client as MCP client
89 | participant MCP_Server as MCP server
90 |
91 | MCP_Client->>LLM: LLM call
92 | LLM->>MCP_Client: LLM tool call response
93 |
94 | MCP_Client->>MCP_Server: tool call
95 | MCP_Server->>MCP_Client: sampling "create message"
96 |
97 | MCP_Client->>LLM: LLM call
98 | LLM->>MCP_Client: LLM text response
99 |
100 | MCP_Client->>MCP_Server: sampling response
101 | MCP_Server->>MCP_Client: tool call response
102 | ```
103 |
104 | ---
105 |
106 | # Example
107 |
108 | Library research tool.
109 | * connects to pypi MCP server with natural language query
110 | * MCP server uses sampling to convert query into SQL, runs sql.
111 |
112 | ```py
113 | from pydantic_ai import Agent
114 | from pydantic_ai.mcp import MCPServerStdio
115 |
116 | ...
117 |
118 | server = MCPServerStdio(command='uv', args=['run', 'pypi_mcp_server.py'])
119 | libs_agent = Agent(
120 | 'openai:gpt-4o',
121 | mcp_servers=[server],
122 | instructions='your job is to help the user research software libraries and packages using the tools provided',
123 | )
124 |
125 | async def main():
126 | async with libs_agent.run_mcp_servers():
127 | result = await libs_agent.run('How many times has pydantic been downloaded this year')
128 | print(result.output)
129 | ```
130 |
131 | ---
132 |
133 | # Thank you
134 |
135 | Slides at [github.com/pydantic/talks](https://github.com/pydantic/talks).
136 |
137 | I'm at the Pydantic booth, if you have any questions, com and say hi.
138 |
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/libs_mcp_client.py:
--------------------------------------------------------------------------------
1 | from datetime import date
2 |
3 | import logfire
4 | from mcp.types import LoggingMessageNotificationParams
5 | from pydantic_ai import Agent
6 | from pydantic_ai.mcp import MCPServerStdio
7 |
8 | logfire.configure(service_name='mcp-client')
9 |
10 | logfire.instrument_pydantic_ai()
11 | logfire.instrument_mcp()
12 |
13 |
14 | async def log_handler(params: LoggingMessageNotificationParams):
15 | print(f'{params.level}: {params.data}')
16 |
17 |
18 | server = MCPServerStdio(command='uv', args=['run', 'pypi_mcp_server.py'], log_handler=log_handler)
19 | libs_agent = Agent(
20 | 'openai:gpt-4o',
21 | mcp_servers=[server],
22 | instructions='your job is to help the user research software libraries and packages using the tools provided',
23 | )
24 |
25 |
26 | @libs_agent.system_prompt
27 | def add_date():
28 | return f'Today is {date.today():%Y-%m-%d}'
29 |
30 |
31 | async def main():
32 | async with libs_agent.run_mcp_servers():
33 | result = await libs_agent.run('How many times has pydantic been downloaded this year')
34 | print(result.output)
35 |
36 |
37 | if __name__ == '__main__':
38 | import asyncio
39 |
40 | asyncio.run(main())
41 |
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/make_slides.py:
--------------------------------------------------------------------------------
1 | import json
2 | import re
3 | from pathlib import Path
4 |
5 | readme = Path('README.md').read_text()
6 | slides = readme.split('---')
7 |
8 | template_html = Path('slides.template.html').read_text()
9 |
10 | title_m = re.search(r'# (.*)', readme)
11 | assert title_m is not None, 'Title not found in README.md'
12 | title = title_m.group(1)
13 |
14 | slides_html, count = re.subn('{{ *title *}}', title, template_html)
15 | assert count == 1, f'Title found {count} times in slides.template.html'
16 | slides_html, count = re.subn('{{ *slides *}}', lambda m: json.dumps(slides), slides_html)
17 | assert count == 1, f'Slides found {count} times in slides.template.html'
18 |
19 | Path('slides.html').write_text(slides_html)
20 |
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/pypi_mcp_server.py:
--------------------------------------------------------------------------------
1 | """MCP server to get information about python package downloads."""
2 |
3 | import re
4 | from dataclasses import dataclass
5 |
6 | import logfire
7 | from google.api_core.exceptions import BadRequest
8 | from google.cloud import bigquery
9 | from mcp import ServerSession
10 | from mcp.server.fastmcp import Context, FastMCP
11 | from pydantic_ai import Agent, ModelRetry, RunContext, format_as_xml
12 | from pydantic_ai.models.mcp_sampling import MCPSamplingModel
13 |
14 | logfire.configure()
15 | logfire.configure(service_name='mcp-server')
16 | logfire.instrument_mcp()
17 | logfire.instrument_pydantic_ai()
18 |
19 |
20 | table_name = 'bigquery-public-data.pypi.file_downloads'
21 | client = bigquery.Client()
22 |
23 |
24 | @dataclass
25 | class Deps:
26 | mcp_context: Context[ServerSession, None]
27 |
28 |
29 | pypi_agent = Agent(
30 | retries=2,
31 | deps_type=Deps,
32 | system_prompt=f"""
33 | Your job is to help users analyze downloads of python packages.
34 |
35 | Convert the user's query into a BigQuery SQL query against the `{table_name}`
36 | table which has the following schema:
37 |
38 | ```sql
39 | CREATE TABLE {table_name} (
40 | timestamp TIMESTAMP,
41 | country_code STRING, -- two letter ISO country code
42 | url STRING,
43 | project STRING,
44 | file STRUCT<
45 | filename STRING,
46 | project STRING,
47 | version STRING,
48 | type STRING
49 | >,
50 | details STRUCT<
51 | installer STRUCT<
52 | name STRING,
53 | version STRING
54 | >,
55 | python STRING,
56 | implementation STRUCT<
57 | name STRING,
58 | version STRING
59 | >,
60 | distro STRUCT<
61 | name STRING,
62 | version STRING,
63 | id STRING,
64 | libc STRUCT<
65 | lib STRING,
66 | version STRING
67 | >
68 | >,
69 | system STRUCT<
70 | name STRING,
71 | release STRING
72 | >,
73 | cpu STRING,
74 | openssl_version STRING,
75 | setuptools_version STRING,
76 | rustc_version STRING,
77 | ci BOOLEAN
78 | >,
79 | tls_protocol STRING,
80 | tls_cipher STRING
81 | );
82 | ```
83 |
84 | Where possible apply a lower bound constraint to the `timestamp` column to avoid scanning to many partitions.
85 |
86 | For example, if the user asked for an example download of the pydantic package, you could use the following query:
87 |
88 | ```sql
89 | SELECT *
90 | FROM `bigquery-public-data.pypi.file_downloads`
91 | WHERE
92 | file.project = 'pydantic'
93 | AND DATE(timestamp) = current_date()
94 | LIMIT 1
95 | ```
96 |
97 | If the user asked for "number of downloads of pydantic broken down by month, python version, operating system,
98 | CPU architecture, and libc version for this year and last year", you could use the following query:
99 |
100 | ```sql
101 | SELECT
102 | COUNT(*) AS num_downloads,
103 | DATE_TRUNC(DATE(timestamp), MONTH) AS `month`,
104 | REGEXP_EXTRACT(details.python, r"[0-9]+\\.[0-9]+") AS python_version,
105 | details.system.name AS os,
106 | details.cpu AS cpu,
107 | details.distro.libc.lib AS libc
108 | FROM `bigquery-public-data.pypi.file_downloads`
109 | WHERE
110 | file.project = 'pydantic'
111 | AND DATE_TRUNC(DATE(timestamp), YEAR) = DATE_TRUNC(date_sub(current_date(), interval 1 YEAR), YEAR)
112 | GROUP BY `month`, `python_version`, `os`, `cpu`, `libc`
113 | ORDER BY `month` DESC, `num_downloads` DESC
114 | ```
115 | """,
116 | )
117 |
118 |
119 | @pypi_agent.output_validator
120 | async def run_query(ctx: RunContext[Deps], sql: str) -> str:
121 | # remove "```sql...```"
122 | m = re.search(r'```\w*\n(.*?)```', sql, flags=re.S)
123 | if m:
124 | sql = m.group(1).strip()
125 |
126 | logfire.info('running {sql}', sql=sql)
127 | await ctx.deps.mcp_context.log('info', 'running query')
128 | if f'from `{table_name}`' not in sql.lower():
129 | raise ModelRetry(f'Query must be against the `{table_name}` table')
130 | try:
131 | query_job = client.query(sql)
132 | rows = query_job.result()
133 | except BadRequest as e:
134 | await ctx.deps.mcp_context.log('warning', 'query error retrying')
135 | raise ModelRetry(f'Invalid query: {e}') from e
136 | await ctx.deps.mcp_context.log('info', 'query successful')
137 | data = [dict(row) for row in rows] # type: ignore
138 | return format_as_xml(data, item_tag='row', include_root_tag=False)
139 |
140 |
141 | mcp = FastMCP('PyPI query', log_level='WARNING')
142 |
143 |
144 | @mcp.tool()
145 | async def pypi_downloads(question: str, ctx: Context[ServerSession, None]) -> str:
146 | """Analyze downloads of packages from the Python package index PyPI to answer questions about package downloads."""
147 | result = await pypi_agent.run(question, model=MCPSamplingModel(session=ctx.session), deps=Deps(ctx))
148 | return result.output
149 |
150 |
151 | if __name__ == '__main__':
152 | mcp.run()
153 | # from devtools import debug
154 | # result = pypi_agent.run_sync('How many times has pydantic been downloaded this year?', model='openai:gpt-4o')
155 | # debug(result.output)
156 |
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "2025-06-ai-engineer-mcp"
3 | version = "0.1.0"
4 | description = "Add your description here"
5 | readme = "README.md"
6 | requires-python = ">=3.13"
7 | dependencies = [
8 | "google-cloud-bigquery>=3.34.0",
9 | "logfire[httpx]>=3.17.0",
10 | "pydantic-ai",
11 | ]
12 |
13 | [dependency-groups]
14 | dev = ["devtools>=0.12.2", "ruff>=0.11.12"]
15 |
16 | [tool.ruff]
17 | line-length = 120
18 | target-version = 'py313'
19 |
20 | [tool.ruff.lint]
21 | extend-select = ['Q', 'RUF100', 'C90', 'UP', 'I']
22 | flake8-quotes = { inline-quotes = 'single', multiline-quotes = 'double' }
23 | isort = { combine-as-imports = true }
24 | mccabe = { max-complexity = 15 }
25 |
26 | [tool.ruff.lint.pydocstyle]
27 | convention = 'google'
28 |
29 | [tool.ruff.format]
30 | # don't format python in docstrings, pytest-examples takes care of it
31 | docstring-code-format = false
32 | quote-style = 'single'
33 |
34 | [tool.uv.sources]
35 | pydantic-ai = { git = "https://github.com/pydantic/pydantic-ai.git", rev = "edbd3c689df49487858e0dbe13508936333b805c" }
36 |
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/static/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2025-06-ai-engineer-mcp/static/favicon.ico
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/static/fonts/ibm-plex-mono-italic-400.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2025-06-ai-engineer-mcp/static/fonts/ibm-plex-mono-italic-400.ttf
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/static/fonts/ibm-plex-mono-normal-400.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2025-06-ai-engineer-mcp/static/fonts/ibm-plex-mono-normal-400.ttf
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/static/fonts/ibm-plex-mono-normal-500.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2025-06-ai-engineer-mcp/static/fonts/ibm-plex-mono-normal-500.ttf
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/static/fonts/ibm-plex-mono-normal-600.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pydantic/talks/47b2837f290a3955983f534e7563afc174ff172f/2025-06-ai-engineer-mcp/static/fonts/ibm-plex-mono-normal-600.ttf
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/static/google_fonts_ibm_plex.css:
--------------------------------------------------------------------------------
1 | @font-face {
2 | font-family: 'IBM Plex Mono';
3 | font-style: italic;
4 | font-weight: 400;
5 | font-display: swap;
6 | src: url('./fonts/ibm-plex-mono-italic-400.ttf') format('truetype');
7 | }
8 |
9 | @font-face {
10 | font-family: 'IBM Plex Mono';
11 | font-style: normal;
12 | font-weight: 400;
13 | font-display: swap;
14 | src: url('./fonts/ibm-plex-mono-normal-400.ttf') format('truetype');
15 | }
16 |
17 | @font-face {
18 | font-family: 'IBM Plex Mono';
19 | font-style: normal;
20 | font-weight: 500;
21 | font-display: swap;
22 | src: url('./fonts/ibm-plex-mono-normal-500.ttf') format('truetype');
23 | }
24 |
25 | @font-face {
26 | font-family: 'IBM Plex Mono';
27 | font-style: normal;
28 | font-weight: 600;
29 | font-display: swap;
30 | src: url('./fonts/ibm-plex-mono-normal-600.ttf') format('truetype');
31 | }
--------------------------------------------------------------------------------
/2025-06-ai-engineer-mcp/static/highlight.min.css:
--------------------------------------------------------------------------------
1 | pre code.hljs{display:block;overflow-x:auto;padding:1em}code.hljs{padding:3px 5px}/*!
2 | Theme: GitHub
3 | Description: Light theme as seen on github.com
4 | Author: github.com
5 | Maintainer: @Hirse
6 | Updated: 2021-05-15
7 |
8 | Outdated base version: https://github.com/primer/github-syntax-light
9 | Current colors taken from GitHub's CSS
10 | */.hljs{color:#24292e;background:#fff}.hljs-doctag,.hljs-keyword,.hljs-meta .hljs-keyword,.hljs-template-tag,.hljs-template-variable,.hljs-type,.hljs-variable.language_{color:#d73a49}.hljs-title,.hljs-title.class_,.hljs-title.class_.inherited__,.hljs-title.function_{color:#6f42c1}.hljs-attr,.hljs-attribute,.hljs-literal,.hljs-meta,.hljs-number,.hljs-operator,.hljs-selector-attr,.hljs-selector-class,.hljs-selector-id,.hljs-variable{color:#005cc5}.hljs-meta .hljs-string,.hljs-regexp,.hljs-string{color:#032f62}.hljs-built_in,.hljs-symbol{color:#e36209}.hljs-code,.hljs-comment,.hljs-formula{color:#6a737d}.hljs-name,.hljs-quote,.hljs-selector-pseudo,.hljs-selector-tag{color:#22863a}.hljs-subst{color:#24292e}.hljs-section{color:#005cc5;font-weight:700}.hljs-bullet{color:#735c0f}.hljs-emphasis{color:#24292e;font-style:italic}.hljs-strong{color:#24292e;font-weight:700}.hljs-addition{color:#22863a;background-color:#f0fff4}.hljs-deletion{color:#b31d28;background-color:#ffeef0}
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2024 - present Pydantic Services inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # About Talks about Pydantic, PydanticAI and Logfire
2 |
3 | Single repo for talks about Pydantic, PydanticAI and Logfire, mostly by Samuel Colvin.
4 |
5 | Each talk is in a subdirectory of this repo, some content may be duplicated as each talk is designed to be self-contained.
6 |
--------------------------------------------------------------------------------