├── nbs
├── CNAME
├── samples
│ ├── puppy.jpg
│ └── solveit.pdf
├── sidebar.yml
├── nbdev.yml
├── _quarto.yml
├── styles.css
├── 01_usage.ipynb
└── index.ipynb
├── lisette
├── __init__.py
├── usage.py
├── _modidx.py
└── core.py
├── MANIFEST.in
├── index_files
└── figure-commonmark
│ └── cell-8-output-1.jpeg
├── .github
└── workflows
│ ├── deploy.yaml
│ └── test.yaml.off
├── pyproject.toml
├── settings.ini
├── .gitignore
├── setup.py
├── CHANGELOG.md
├── LICENSE
└── README.md
/nbs/CNAME:
--------------------------------------------------------------------------------
1 | lisette.answer.ai
2 |
--------------------------------------------------------------------------------
/lisette/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.0.22"
2 | from .core import *
3 |
--------------------------------------------------------------------------------
/nbs/samples/puppy.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AnswerDotAI/lisette/HEAD/nbs/samples/puppy.jpg
--------------------------------------------------------------------------------
/nbs/samples/solveit.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AnswerDotAI/lisette/HEAD/nbs/samples/solveit.pdf
--------------------------------------------------------------------------------
/nbs/sidebar.yml:
--------------------------------------------------------------------------------
1 | website:
2 | sidebar:
3 | contents:
4 | - index.ipynb
5 | - 00_core.ipynb
6 | - 01_usage.ipynb
7 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include settings.ini
2 | include LICENSE
3 | include CONTRIBUTING.md
4 | include README.md
5 | recursive-exclude * __pycache__
6 |
--------------------------------------------------------------------------------
/index_files/figure-commonmark/cell-8-output-1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AnswerDotAI/lisette/HEAD/index_files/figure-commonmark/cell-8-output-1.jpeg
--------------------------------------------------------------------------------
/nbs/nbdev.yml:
--------------------------------------------------------------------------------
1 | project:
2 | output-dir: _docs
3 |
4 | website:
5 | title: "lisette"
6 | site-url: "https://lisette.answer.ai/"
7 | description: "litellm helper"
8 | repo-branch: main
9 | repo-url: "https://github.com/AnswerDotAI/lisette"
10 |
--------------------------------------------------------------------------------
/.github/workflows/deploy.yaml:
--------------------------------------------------------------------------------
1 | name: Deploy to GitHub Pages
2 |
3 | permissions:
4 | contents: write
5 | pages: write
6 |
7 | on:
8 | push:
9 | branches: [ "main", "master" ]
10 | workflow_dispatch:
11 | jobs:
12 | deploy:
13 | runs-on: ubuntu-latest
14 | steps: [uses: fastai/workflows/quarto-ghp@master]
15 |
--------------------------------------------------------------------------------
/.github/workflows/test.yaml.off:
--------------------------------------------------------------------------------
1 | name: CI
2 | on: [workflow_dispatch, pull_request, push]
3 |
4 | jobs:
5 | test:
6 | runs-on: ubuntu-latest
7 | env:
8 | ANTHROPIC_API_KEY: "sk-xxx"
9 | GEMINI_API_KEY: "sk-xxx"
10 | OPENAI_API_KEY: "sk-xxx"
11 | steps: [uses: fastai/workflows/nbdev-ci@master]
12 |
--------------------------------------------------------------------------------
/nbs/_quarto.yml:
--------------------------------------------------------------------------------
1 | project:
2 | type: website
3 |
4 | format:
5 | html:
6 | theme: cosmo
7 | css: styles.css
8 | toc: true
9 | keep-md: true
10 | commonmark: default
11 |
12 | website:
13 | twitter-card: true
14 | open-graph: true
15 | repo-actions: [issue]
16 | navbar:
17 | background: primary
18 | search: true
19 | sidebar:
20 | style: floating
21 |
22 | metadata-files: [nbdev.yml, sidebar.yml]
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=64.0"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name="lisette"
7 | requires-python=">=3.9"
8 | dynamic = [ "keywords", "description", "version", "dependencies", "optional-dependencies", "readme", "license", "authors", "classifiers", "entry-points", "scripts", "urls"]
9 |
10 | [tool.uv]
11 | cache-keys = [{ file = "pyproject.toml" }, { file = "settings.ini" }, { file = "setup.py" }]
12 |
--------------------------------------------------------------------------------
/nbs/styles.css:
--------------------------------------------------------------------------------
1 | .cell {
2 | margin-bottom: 1rem;
3 | }
4 |
5 | .cell > .sourceCode {
6 | margin-bottom: 0;
7 | }
8 |
9 | .cell-output > pre {
10 | margin-bottom: 0;
11 | }
12 |
13 | .cell-output > pre, .cell-output > .sourceCode > pre, .cell-output-stdout > pre {
14 | margin-left: 0.8rem;
15 | margin-top: 0;
16 | background: none;
17 | border-left: 2px solid lightsalmon;
18 | border-top-left-radius: 0;
19 | border-top-right-radius: 0;
20 | }
21 |
22 | .cell-output > .sourceCode {
23 | border: none;
24 | }
25 |
26 | .cell-output > .sourceCode {
27 | background: none;
28 | margin-top: 0;
29 | }
30 |
31 | div.description {
32 | padding-left: 2px;
33 | padding-top: 5px;
34 | font-style: italic;
35 | font-size: 135%;
36 | opacity: 70%;
37 | }
38 |
--------------------------------------------------------------------------------
/settings.ini:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | repo = lisette
3 | lib_name = lisette
4 | version = 0.0.22
5 | min_python = 3.9
6 | license = apache2
7 | black_formatting = False
8 | doc_path = _docs
9 | lib_path = lisette
10 | nbs_path = nbs
11 | recursive = True
12 | tst_flags = notest
13 | put_version_in_init = True
14 | update_pyproject = True
15 | branch = main
16 | custom_sidebar = False
17 | doc_host = https://lisette.answer.ai
18 | doc_baseurl = /
19 | git_url = https://github.com/AnswerDotAI/lisette
20 | title = lisette
21 | audience = Developers
22 | author = AnswerDotAI
23 | author_email = support@answer.ai
24 | copyright = 2025 onwards, AnswerDotAI
25 | description = litellm helper
26 | keywords = nbdev jupyter notebook python
27 | language = English
28 | status = 3
29 | user = AnswerDotAI
30 | requirements = litellm==1.80.10 numpydoc toolslm fastcore>=1.9.2
31 | dev_requirements = ipython pycachy>=0.0.4 fastlite
32 | cell_number = False
33 | readme_nb = index.ipynb
34 | allowed_metadata_keys =
35 | allowed_cell_metadata_keys =
36 | jupyter_hooks = False
37 | clean_ids = True
38 | clear_all = False
39 | skip_procs =
40 |
41 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | _docs/
2 | _proc/
3 |
4 | *.bak
5 | .gitattributes
6 | .last_checked
7 | .gitconfig
8 | *.bak
9 | *.log
10 | *~
11 | ~*
12 | _tmp*
13 | tmp*
14 | tags
15 | *.pkg
16 |
17 | # Byte-compiled / optimized / DLL files
18 | __pycache__/
19 | *.py[cod]
20 | *$py.class
21 |
22 | # C extensions
23 | *.so
24 |
25 | # Distribution / packaging
26 | .Python
27 | env/
28 | build/
29 | conda/
30 | develop-eggs/
31 | dist/
32 | downloads/
33 | eggs/
34 | .eggs/
35 | lib/
36 | lib64/
37 | parts/
38 | sdist/
39 | var/
40 | wheels/
41 | *.egg-info/
42 | .installed.cfg
43 | *.egg
44 |
45 | # PyInstaller
46 | # Usually these files are written by a python script from a template
47 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
48 | *.manifest
49 | *.spec
50 |
51 | # Installer logs
52 | pip-log.txt
53 | pip-delete-this-directory.txt
54 |
55 | # Unit test / coverage reports
56 | htmlcov/
57 | .tox/
58 | .coverage
59 | .coverage.*
60 | .cache
61 | nosetests.xml
62 | coverage.xml
63 | *.cover
64 | .hypothesis/
65 |
66 | # Translations
67 | *.mo
68 | *.pot
69 |
70 | # Django stuff:
71 | *.log
72 | local_settings.py
73 |
74 | # Flask stuff:
75 | instance/
76 | .webassets-cache
77 |
78 | # Scrapy stuff:
79 | .scrapy
80 |
81 | # Sphinx documentation
82 | docs/_build/
83 |
84 | # PyBuilder
85 | target/
86 |
87 | # Jupyter Notebook
88 | .ipynb_checkpoints
89 |
90 | # pyenv
91 | .python-version
92 |
93 | # celery beat schedule file
94 | celerybeat-schedule
95 |
96 | # SageMath parsed files
97 | *.sage.py
98 |
99 | # dotenv
100 | .env
101 |
102 | # virtualenv
103 | .venv
104 | venv/
105 | ENV/
106 |
107 | # Spyder project settings
108 | .spyderproject
109 | .spyproject
110 |
111 | # Rope project settings
112 | .ropeproject
113 |
114 | # mkdocs documentation
115 | /site
116 |
117 | # mypy
118 | .mypy_cache/
119 |
120 | .vscode
121 | *.swp
122 |
123 | # osx generated files
124 | .DS_Store
125 | .DS_Store?
126 | .Trashes
127 | ehthumbs.db
128 | Thumbs.db
129 | .idea
130 |
131 | # pytest
132 | .pytest_cache
133 |
134 | # tools/trust-doc-nbs
135 | docs_src/.last_checked
136 |
137 | # symlinks to fastai
138 | docs_src/fastai
139 | tools/fastai
140 |
141 | # link checker
142 | checklink/cookies.txt
143 |
144 | # .gitconfig is now autogenerated
145 | .gitconfig
146 |
147 | # Quarto installer
148 | .deb
149 | .pkg
150 |
151 | # Quarto
152 | .quarto
153 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from pkg_resources import parse_version
2 | from configparser import ConfigParser
3 | import setuptools, shlex
4 | assert parse_version(setuptools.__version__)>=parse_version('36.2')
5 |
6 | # note: all settings are in settings.ini; edit there, not here
7 | config = ConfigParser(delimiters=['='])
8 | config.read('settings.ini', encoding='utf-8')
9 | cfg = config['DEFAULT']
10 |
11 | cfg_keys = 'version description keywords author author_email'.split()
12 | expected = cfg_keys + "lib_name user branch license status min_python audience language".split()
13 | for o in expected: assert o in cfg, "missing expected setting: {}".format(o)
14 | setup_cfg = {o:cfg[o] for o in cfg_keys}
15 |
16 | licenses = {
17 | 'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
18 | 'mit': ('MIT License', 'OSI Approved :: MIT License'),
19 | 'gpl2': ('GNU General Public License v2', 'OSI Approved :: GNU General Public License v2 (GPLv2)'),
20 | 'gpl3': ('GNU General Public License v3', 'OSI Approved :: GNU General Public License v3 (GPLv3)'),
21 | 'bsd3': ('BSD License', 'OSI Approved :: BSD License'),
22 | }
23 | statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
24 | '4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
25 | py_versions = '3.6 3.7 3.8 3.9 3.10 3.11 3.12'.split()
26 |
27 | requirements = shlex.split(cfg.get('requirements', ''))
28 | if cfg.get('pip_requirements'): requirements += shlex.split(cfg.get('pip_requirements', ''))
29 | min_python = cfg['min_python']
30 | lic = licenses.get(cfg['license'].lower(), (cfg['license'], None))
31 | dev_requirements = (cfg.get('dev_requirements') or '').split()
32 |
33 | package_data = dict()
34 | pkg_data = cfg.get('package_data', None)
35 | if pkg_data:
36 | package_data[cfg['lib_name']] = pkg_data.split() # split as multiple files might be listed
37 | # Add package data to setup_cfg for setuptools.setup(..., **setup_cfg)
38 | setup_cfg['package_data'] = package_data
39 |
40 | setuptools.setup(
41 | name = cfg['lib_name'],
42 | license = lic[0],
43 | classifiers = [
44 | 'Development Status :: ' + statuses[int(cfg['status'])],
45 | 'Intended Audience :: ' + cfg['audience'].title(),
46 | 'Natural Language :: ' + cfg['language'].title(),
47 | ] + ['Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):]] + (['License :: ' + lic[1] ] if lic[1] else []),
48 | url = cfg['git_url'],
49 | packages = setuptools.find_packages(),
50 | include_package_data = True,
51 | install_requires = requirements,
52 | extras_require={ 'dev': dev_requirements },
53 | dependency_links = cfg.get('dep_links','').split(),
54 | python_requires = '>=' + cfg['min_python'],
55 | long_description = open('README.md', encoding='utf-8').read(),
56 | long_description_content_type = 'text/markdown',
57 | zip_safe = False,
58 | entry_points = {
59 | 'console_scripts': cfg.get('console_scripts','').split(),
60 | 'nbdev': [f'{cfg.get("lib_path")}={cfg.get("lib_path")}._modidx:d']
61 | },
62 | **setup_cfg)
63 |
64 |
65 |
--------------------------------------------------------------------------------
/lisette/usage.py:
--------------------------------------------------------------------------------
1 | """Lisette usage and cost monitoring"""
2 |
3 | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/01_usage.ipynb.
4 |
5 | # %% auto 0
6 | __all__ = ['Usage', 'search_count', 'LisetteUsageLogger']
7 |
8 | # %% ../nbs/01_usage.ipynb
9 | from litellm.integrations.custom_logger import CustomLogger
10 | from fastcore.utils import *
11 | import time
12 | try: from fastlite import *
13 | except ImportError: raise ImportError("Please install `fastlite` to use sqlite based lisette usage logging.")
14 |
15 | # %% ../nbs/01_usage.ipynb
16 | class Usage: id:int; timestamp:float; model:str; user_id:str; prompt_tokens:int; completion_tokens:int; total_tokens:int; cached_tokens:int; cache_creation_tokens:int; cache_read_tokens:int; web_search_requests:int; response_cost:int
17 |
18 | # %% ../nbs/01_usage.ipynb
19 | def search_count(r):
20 | if cnt := nested_idx(r.usage, 'server_tool_use', 'web_search_requests'): return cnt # Anthropic
21 | if meta := getattr(r, 'vertex_ai_grounding_metadata', None): # Gemini
22 | if meta and (queries := meta[0].get('webSearchQueries')): return len(queries)
23 | if cnt := nested_idx(r.usage, 'prompt_tokens_details', 'web_search_requests'): return cnt # streaming with `include_usage`
24 | return 0
25 |
26 | # %% ../nbs/01_usage.ipynb
27 | class LisetteUsageLogger(CustomLogger):
28 | def __init__(self, db_path):
29 | self.db = Database(db_path)
30 | self.usage = self.db.create(Usage)
31 |
32 | async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): self._log_usage(response_obj, kwargs['response_cost'], start_time, end_time)
33 | def log_success_event(self, kwargs, response_obj, start_time, end_time): self._log_usage(response_obj, kwargs['response_cost'], start_time, end_time)
34 | def _log_usage(self, response_obj, response_cost, start_time, end_time):
35 | usage = response_obj.usage
36 | ptd = usage.prompt_tokens_details
37 | self.usage.insert(Usage(timestamp=time.time(),
38 | model=response_obj.model,
39 | user_id=self.user_id_fn(),
40 | prompt_tokens=usage.prompt_tokens,
41 | completion_tokens=usage.completion_tokens,
42 | total_tokens=usage.total_tokens,
43 | cached_tokens=ptd.cached_tokens if ptd else 0, # used by gemini (read tokens)
44 | cache_creation_tokens=nested_idx(usage, 'cache_creation_input_tokens'),
45 | cache_read_tokens=nested_idx(usage, 'cache_read_input_tokens'), # used by anthropic
46 | web_search_requests=search_count(response_obj),
47 | response_cost=response_cost))
48 |
49 | def user_id_fn(self): raise NotImplementedError('Please implement `LisetteUsageLogger.user_id_fn` before initializing, e.g using fastcore.patch.')
50 |
51 | # %% ../nbs/01_usage.ipynb
52 | @patch
53 | def total_cost(self:Usage, sc=0.01): return self.response_cost + sc * ifnone(self.web_search_requests, 0)
54 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Release notes
2 |
3 |
4 |
5 | ## 0.0.21
6 |
7 | ### New Features
8 |
9 | - update litellm 1.80.10 ([#69](https://github.com/AnswerDotAI/lisette/pull/69)), thanks to [@KeremTurgutlu](https://github.com/KeremTurgutlu)
10 |
11 |
12 | ## 0.0.20
13 |
14 | ### New Features
15 |
16 | - Automatically handle context length issues in tool loop ([#68](https://github.com/AnswerDotAI/lisette/issues/68))
17 | - Gemini multimodal support ([#53](https://github.com/AnswerDotAI/lisette/pull/53)), thanks to [@KeremTurgutlu](https://github.com/KeremTurgutlu)
18 |
19 |
20 | ## 0.0.19
21 |
22 | ### New Features
23 |
24 | - Truncate long final tool tokens ([#67](https://github.com/AnswerDotAI/lisette/issues/67))
25 |
26 |
27 | ## 0.0.18
28 |
29 |
30 | ### Bugs Squashed
31 |
32 | - Fix missing usage import ([#63](https://github.com/AnswerDotAI/lisette/pull/63)), thanks to [@KeremTurgutlu](https://github.com/KeremTurgutlu)
33 |
34 |
35 | ## 0.0.17
36 |
37 | ### Bugs Squashed
38 |
39 | - Patch not defined in usage.py ([#61](https://github.com/AnswerDotAI/lisette/issues/61))
40 |
41 |
42 | ## 0.0.16
43 |
44 | ### New Features
45 |
46 | - Opus 4.5 ([#56](https://github.com/AnswerDotAI/lisette/issues/56))
47 |
48 |
49 | ## 0.0.15
50 |
51 | ### New Features
52 |
53 | - added contents helper ([#51](https://github.com/AnswerDotAI/lisette/pull/51)), thanks to [@bitcloud2](https://github.com/bitcloud2)
54 |
55 | ### Bugs Squashed
56 |
57 | - hotfix for streaming claude search error ([#54](https://github.com/AnswerDotAI/lisette/pull/54)), thanks to [@RensDimmendaal](https://github.com/RensDimmendaal)
58 |
59 |
60 | ## 0.0.14
61 |
62 | ### New Features
63 |
64 | - add sqlite usage and cost monitoring ([#47](https://github.com/AnswerDotAI/lisette/pull/47)), thanks to [@KeremTurgutlu](https://github.com/KeremTurgutlu)
65 |
66 |
67 | ## 0.0.13
68 |
69 | ### New Features
70 |
71 | - support toolcalls to return toolresponses ([#44](https://github.com/AnswerDotAI/lisette/pull/44)), thanks to [@ncoop57](https://github.com/ncoop57)
72 |
73 |
74 | ## 0.0.12
75 |
76 | ### New Features
77 |
78 | - Custom anthropic `cache_idxs` ([#36](https://github.com/AnswerDotAI/lisette/pull/36)), thanks to [@KeremTurgutlu](https://github.com/KeremTurgutlu)
79 |
80 | ### Bugs Squashed
81 |
82 | - Toolloop: `A maximum of 4 blocks with cache_control may be provided. Found 5.` ([#34](https://github.com/AnswerDotAI/lisette/issues/34))
83 |
84 |
85 | ## 0.0.11
86 |
87 | ### Bugs Squashed
88 |
89 | - Tool call serialization fails if no final text ([#33](https://github.com/AnswerDotAI/lisette/issues/33))
90 |
91 |
92 | ## 0.0.10
93 |
94 | ### New Features
95 |
96 | - reconstruct tool calls ([#30](https://github.com/AnswerDotAI/lisette/pull/30)), thanks to [@comhar](https://github.com/comhar)
97 |
98 |
99 | ## 0.0.7
100 |
101 | ### New Features
102 |
103 | - make tool call ids deterministic ([#27](https://github.com/AnswerDotAI/lisette/pull/27)), thanks to [@comhar](https://github.com/comhar)
104 |
105 |
106 | ## 0.0.6
107 |
108 | ### New Features
109 |
110 | - Add pdf support ([#25](https://github.com/AnswerDotAI/lisette/pull/25)), thanks to [@RensDimmendaal](https://github.com/RensDimmendaal)
111 |
112 |
113 | ## 0.0.5
114 |
115 | ### Bugs Squashed
116 |
117 | - Fix cache control ([#23](https://github.com/AnswerDotAI/lisette/pull/23)), thanks to [@KeremTurgutlu](https://github.com/KeremTurgutlu)
118 |
119 |
120 | ## 0.0.4
121 |
122 | ### New Features
123 |
124 | - Add Sonnet 4.5 ([#21](https://github.com/AnswerDotAI/lisette/issues/21))
125 |
126 |
127 | ## 0.0.3
128 |
129 | ### Bugs Squashed
130 |
131 | - Need to pin to older litellm for now ([#17](https://github.com/AnswerDotAI/lisette/issues/17))
132 |
133 |
134 | ## 0.0.2
135 |
136 | - param updates
137 |
138 |
139 | ## 0.0.1
140 |
141 | - init release
142 |
143 |
--------------------------------------------------------------------------------
/lisette/_modidx.py:
--------------------------------------------------------------------------------
1 | # Autogenerated by nbdev
2 |
3 | d = { 'settings': { 'branch': 'main',
4 | 'doc_baseurl': '/',
5 | 'doc_host': 'https://lisette.answer.ai',
6 | 'git_url': 'https://github.com/AnswerDotAI/lisette',
7 | 'lib_path': 'lisette'},
8 | 'syms': { 'lisette.core': { 'lisette.core.AsyncChat': ('core.html#asyncchat', 'lisette/core.py'),
9 | 'lisette.core.AsyncChat.__call__': ('core.html#asyncchat.__call__', 'lisette/core.py'),
10 | 'lisette.core.AsyncChat._call': ('core.html#asyncchat._call', 'lisette/core.py'),
11 | 'lisette.core.AsyncStreamFormatter': ('core.html#asyncstreamformatter', 'lisette/core.py'),
12 | 'lisette.core.AsyncStreamFormatter.__init__': ('core.html#asyncstreamformatter.__init__', 'lisette/core.py'),
13 | 'lisette.core.AsyncStreamFormatter.format_item': ( 'core.html#asyncstreamformatter.format_item',
14 | 'lisette/core.py'),
15 | 'lisette.core.AsyncStreamFormatter.format_stream': ( 'core.html#asyncstreamformatter.format_stream',
16 | 'lisette/core.py'),
17 | 'lisette.core.Chat': ('core.html#chat', 'lisette/core.py'),
18 | 'lisette.core.Chat.__call__': ('core.html#chat.__call__', 'lisette/core.py'),
19 | 'lisette.core.Chat.__init__': ('core.html#chat.__init__', 'lisette/core.py'),
20 | 'lisette.core.Chat._call': ('core.html#chat._call', 'lisette/core.py'),
21 | 'lisette.core.Chat._prep_msg': ('core.html#chat._prep_msg', 'lisette/core.py'),
22 | 'lisette.core.Chat.print_hist': ('core.html#chat.print_hist', 'lisette/core.py'),
23 | 'lisette.core.ToolResponse': ('core.html#toolresponse', 'lisette/core.py'),
24 | 'lisette.core._add_cache_control': ('core.html#_add_cache_control', 'lisette/core.py'),
25 | 'lisette.core._alite_call_func': ('core.html#_alite_call_func', 'lisette/core.py'),
26 | 'lisette.core._apply_cache_idxs': ('core.html#_apply_cache_idxs', 'lisette/core.py'),
27 | 'lisette.core._bytes2content': ('core.html#_bytes2content', 'lisette/core.py'),
28 | 'lisette.core._extract_tool': ('core.html#_extract_tool', 'lisette/core.py'),
29 | 'lisette.core._filter_srvtools': ('core.html#_filter_srvtools', 'lisette/core.py'),
30 | 'lisette.core._has_cache': ('core.html#_has_cache', 'lisette/core.py'),
31 | 'lisette.core._has_search': ('core.html#_has_search', 'lisette/core.py'),
32 | 'lisette.core._lite_call_func': ('core.html#_lite_call_func', 'lisette/core.py'),
33 | 'lisette.core._mk_content': ('core.html#_mk_content', 'lisette/core.py'),
34 | 'lisette.core._mk_prefill': ('core.html#_mk_prefill', 'lisette/core.py'),
35 | 'lisette.core._trunc_str': ('core.html#_trunc_str', 'lisette/core.py'),
36 | 'lisette.core.adisplay_stream': ('core.html#adisplay_stream', 'lisette/core.py'),
37 | 'lisette.core.astream_with_complete': ('core.html#astream_with_complete', 'lisette/core.py'),
38 | 'lisette.core.cite_footnote': ('core.html#cite_footnote', 'lisette/core.py'),
39 | 'lisette.core.cite_footnotes': ('core.html#cite_footnotes', 'lisette/core.py'),
40 | 'lisette.core.contents': ('core.html#contents', 'lisette/core.py'),
41 | 'lisette.core.fmt2hist': ('core.html#fmt2hist', 'lisette/core.py'),
42 | 'lisette.core.lite_mk_func': ('core.html#lite_mk_func', 'lisette/core.py'),
43 | 'lisette.core.litellm.ModelResponse._repr_markdown_': ( 'core.html#litellm.modelresponse._repr_markdown_',
44 | 'lisette/core.py'),
45 | 'lisette.core.mk_msg': ('core.html#mk_msg', 'lisette/core.py'),
46 | 'lisette.core.mk_msgs': ('core.html#mk_msgs', 'lisette/core.py'),
47 | 'lisette.core.mk_tc': ('core.html#mk_tc', 'lisette/core.py'),
48 | 'lisette.core.mk_tc_req': ('core.html#mk_tc_req', 'lisette/core.py'),
49 | 'lisette.core.mk_tc_result': ('core.html#mk_tc_result', 'lisette/core.py'),
50 | 'lisette.core.mk_tc_results': ('core.html#mk_tc_results', 'lisette/core.py'),
51 | 'lisette.core.mk_tr_details': ('core.html#mk_tr_details', 'lisette/core.py'),
52 | 'lisette.core.patch_litellm': ('core.html#patch_litellm', 'lisette/core.py'),
53 | 'lisette.core.random_tool_id': ('core.html#random_tool_id', 'lisette/core.py'),
54 | 'lisette.core.remove_cache_ckpts': ('core.html#remove_cache_ckpts', 'lisette/core.py'),
55 | 'lisette.core.stream_with_complete': ('core.html#stream_with_complete', 'lisette/core.py'),
56 | 'lisette.core.structured': ('core.html#structured', 'lisette/core.py')},
57 | 'lisette.usage': { 'lisette.usage.LisetteUsageLogger': ('usage.html#lisetteusagelogger', 'lisette/usage.py'),
58 | 'lisette.usage.LisetteUsageLogger.__init__': ('usage.html#lisetteusagelogger.__init__', 'lisette/usage.py'),
59 | 'lisette.usage.LisetteUsageLogger._log_usage': ( 'usage.html#lisetteusagelogger._log_usage',
60 | 'lisette/usage.py'),
61 | 'lisette.usage.LisetteUsageLogger.async_log_success_event': ( 'usage.html#lisetteusagelogger.async_log_success_event',
62 | 'lisette/usage.py'),
63 | 'lisette.usage.LisetteUsageLogger.log_success_event': ( 'usage.html#lisetteusagelogger.log_success_event',
64 | 'lisette/usage.py'),
65 | 'lisette.usage.LisetteUsageLogger.user_id_fn': ( 'usage.html#lisetteusagelogger.user_id_fn',
66 | 'lisette/usage.py'),
67 | 'lisette.usage.Usage': ('usage.html#usage', 'lisette/usage.py'),
68 | 'lisette.usage.Usage.total_cost': ('usage.html#usage.total_cost', 'lisette/usage.py'),
69 | 'lisette.usage.search_count': ('usage.html#search_count', 'lisette/usage.py')}}}
70 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2022, fastai
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Lisette
2 |
3 |
4 |
5 |
6 | > **NB**: If you are reading this in GitHub’s readme, we recommend you
7 | > instead read the much more nicely formatted [documentation
8 | > format](https://lisette.answer.ai/) of this tutorial.
9 |
10 | *Lisette* is a wrapper for the [LiteLLM Python
11 | SDK](https://docs.litellm.ai/), which provides unified access to 100+
12 | LLM providers using the OpenAI API format.
13 |
14 | LiteLLM provides a unified interface to access multiple LLMs, but it’s
15 | quite low level: it leaves the developer to do a lot of stuff manually.
16 | Lisette automates pretty much everything that can be automated, whilst
17 | providing full control. Amongst the features provided:
18 |
19 | - A [`Chat`](https://lisette.answer.ai/core.html#chat) class that
20 | creates stateful dialogs across any LiteLLM-supported model
21 | - Convenient message creation utilities for text, images, and mixed
22 | content
23 | - Simple and convenient support for tool calling with automatic
24 | execution
25 | - Built-in support for web search capabilities (including citations for
26 | supporting models)
27 | - Streaming responses with formatting
28 | - Full async support with
29 | [`AsyncChat`](https://lisette.answer.ai/core.html#asyncchat)
30 | - Prompt caching (for supporting models)
31 |
32 | To use Lisette, you’ll need to set the appropriate API keys as
33 | environment variables for whichever LLM providers you want to use.
34 |
35 | ## Get started
36 |
37 | LiteLLM will automatically be installed with Lisette, if you don’t
38 | already have it.
39 |
40 | ``` python
41 | !pip install lisette -qq
42 | ```
43 |
44 | Lisette only exports the symbols that are needed to use the library, so
45 | you can use import \* to import them. Here’s a quick example showing how
46 | easy it is to switch between different LLM providers:
47 |
48 | ``` python
49 | from lisette import *
50 | ```
51 |
52 | ## Chat
53 |
54 | ``` python
55 | models = ['claude-sonnet-4-20250514', 'gemini/gemini-2.5-flash', 'openai/gpt-4o']
56 |
57 | for model in models:
58 | chat = Chat(model)
59 | res = chat("Please tell me about yourself in one brief sentence.")
60 | display(res)
61 | ```
62 |
63 | I’m Claude, an AI assistant created by Anthropic to be helpful,
64 | harmless, and honest in conversations and tasks.
65 |
66 |
67 |
68 | - id: `chatcmpl-xxx`
69 | - model: `claude-sonnet-4-20250514`
70 | - finish_reason: `stop`
71 | - usage:
72 | `Usage(completion_tokens=29, prompt_tokens=17, total_tokens=46, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`
73 |
74 |
75 |
76 | I am a large language model, trained by Google, designed to assist with
77 | information and generate text.
78 |
79 |
80 |
81 | - id: `chatcmpl-xxx`
82 | - model: `gemini-2.5-flash`
83 | - finish_reason: `stop`
84 | - usage:
85 | `Usage(completion_tokens=603, prompt_tokens=11, total_tokens=614, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=583, rejected_prediction_tokens=None, text_tokens=20), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=11, image_tokens=None))`
86 |
87 |
88 |
89 | I’m an AI language model created by OpenAI, designed to assist with a
90 | wide range of questions and tasks by providing information and
91 | generating text-based responses.
92 |
93 |
94 |
95 | - id: `chatcmpl-xxx`
96 | - model: `gpt-4o-2024-08-06`
97 | - finish_reason: `stop`
98 | - usage:
99 | `Usage(completion_tokens=30, prompt_tokens=17, total_tokens=47, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None))`
100 |
101 |
102 |
103 | That’s it! Lisette handles all the provider-specific details
104 | automatically. Each model will respond in its own style, but the
105 | interface remains the same.
106 |
107 | ## Message formatting
108 |
109 | ### Multiple messages
110 |
111 | Lisette accepts multiple messages in one go:
112 |
113 | ``` python
114 | chat = Chat(models[0])
115 | res = chat(['Hi! My favorite drink coffee.', 'Hello!', 'Whats my favorite drink?'])
116 | display(res)
117 | ```
118 |
119 | Hello! Based on what you just told me, your favorite drink is coffee! ☕
120 |
121 |
122 |
123 | - id: `chatcmpl-xxx`
124 | - model: `claude-sonnet-4-20250514`
125 | - finish_reason: `stop`
126 | - usage:
127 | `Usage(completion_tokens=22, prompt_tokens=23, total_tokens=45, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`
128 |
129 |
130 |
131 | If you have a pre-existing message history, you can also pass it when
132 | you create the [`Chat`](https://lisette.answer.ai/core.html#chat)
133 | object:
134 |
135 | ``` python
136 | chat = Chat(models[0],hist=['Hi! My favorite drink is coffee.', 'Hello!'])
137 | res = chat('Whats my favorite drink?')
138 | display(res)
139 | ```
140 |
141 | Your favorite drink is coffee! You just mentioned that in your previous
142 | message.
143 |
144 |
145 |
146 | - id: `chatcmpl-xxx`
147 | - model: `claude-sonnet-4-20250514`
148 | - finish_reason: `stop`
149 | - usage:
150 | `Usage(completion_tokens=18, prompt_tokens=30, total_tokens=48, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`
151 |
152 |
153 |
154 | ### Images
155 |
156 | Lisette also makes it easy to include images in your prompts:
157 |
158 | ``` python
159 | from pathlib import Path
160 | from IPython.display import Image
161 | ```
162 |
163 | ``` python
164 | fn = Path('samples/puppy.jpg')
165 | img = fn.read_bytes()
166 | Image(img)
167 | ```
168 |
169 | 
170 |
171 | All you have to do is read it in as bytes:
172 |
173 | ``` python
174 | img[:20]
175 | ```
176 |
177 | b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01\x00\x01\x00\x00'
178 |
179 | And you can pass it inside a
180 | [`Chat`](https://lisette.answer.ai/core.html#chat) object:
181 |
182 | ``` python
183 | chat = Chat(models[0])
184 | chat([img, "What's in this image? Be brief."])
185 | ```
186 |
187 | A cute puppy with brown and white fur lying on grass next to purple
188 | flowers.
189 |
190 |
191 |
192 | - id: `chatcmpl-xxx`
193 | - model: `claude-sonnet-4-20250514`
194 | - finish_reason: `stop`
195 | - usage:
196 | `Usage(completion_tokens=20, prompt_tokens=108, total_tokens=128, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`
197 |
198 |
199 |
200 | ### Prefill
201 |
202 | Some providers (e.g. Anthropic) support `prefill`, allowing you to
203 | specify how the assistant’s response should begin:”
204 |
205 | ``` python
206 | chat = Chat(models[0])
207 | chat("Concisely, what's the meaning of life?", prefill="According to Douglas Adams,")
208 | ```
209 |
210 | According to Douglas Adams,it’s 42.
211 |
212 | More seriously, there’s no universal answer. Common perspectives
213 | include: - Creating meaning through relationships, growth, and
214 | contribution - Fulfilling a divine purpose or spiritual calling -
215 | Maximizing well-being and minimizing suffering - Leaving a positive
216 | legacy - Simply experiencing and appreciating existence itself
217 |
218 | The meaning might be something you create rather than discover.
219 |
220 |
221 |
222 | - id: `chatcmpl-xxx`
223 | - model: `claude-sonnet-4-20250514`
224 | - finish_reason: `stop`
225 | - usage:
226 | `Usage(completion_tokens=84, prompt_tokens=24, total_tokens=108, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`
227 |
228 |
229 |
230 | ## Tools
231 |
232 | Lisette makes it easy to give LLMs access to Python functions. Just
233 | define a function with type hints and a docstring:
234 |
235 | ``` python
236 | def add_numbers(
237 | a: int, # First number to add
238 | b: int # Second number to add
239 | ) -> int:
240 | "Add two numbers together"
241 | return a + b
242 | ```
243 |
244 | Now pass the function to
245 | [`Chat`](https://lisette.answer.ai/core.html#chat) and the model can use
246 | it automatically:
247 |
248 | ``` python
249 | chat = Chat(models[0], tools=[add_numbers])
250 | res = chat("What's 47 + 23? Use the tool.")
251 | res
252 | ```
253 |
254 | The result of 47 + 23 is 70.
255 |
256 |
257 |
258 | - id: `chatcmpl-xxx`
259 | - model: `claude-sonnet-4-20250514`
260 | - finish_reason: `stop`
261 | - usage:
262 | `Usage(completion_tokens=18, prompt_tokens=573, total_tokens=591, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`
263 |
264 |
265 |
266 | If you want to see all intermediate messages and outputs you can use the
267 | `return_all=True` feature.
268 |
269 | ``` python
270 | chat = Chat(models[0], tools=[add_numbers])
271 | res = chat("What's 47 + 23 + 59? Use the tool.",max_steps=3,return_all=True)
272 | display(*res)
273 | ```
274 |
275 | I’ll help you calculate 47 + 23 + 59 using the add_numbers tool. Since
276 | the tool can only add two numbers at a time, I’ll need to do this in two
277 | steps.
278 |
279 | 🔧 add_numbers({“a”: 47, “b”: 23})
280 |
281 |
282 |
283 | - id: `chatcmpl-xxx`
284 | - model: `claude-sonnet-4-20250514`
285 | - finish_reason: `tool_calls`
286 | - usage:
287 | `Usage(completion_tokens=116, prompt_tokens=433, total_tokens=549, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`
288 |
289 |
290 |
291 | {'tool_call_id': 'toolu_01F9oakoP8ANHkTMD1DyQDi7',
292 | 'role': 'tool',
293 | 'name': 'add_numbers',
294 | 'content': '70'}
295 |
296 | Now I’ll add the result (70) to the third number (59):
297 |
298 | 🔧 add_numbers({“a”: 70, “b”: 59})
299 |
300 |
301 |
302 | - id: `chatcmpl-xxx`
303 | - model: `claude-sonnet-4-20250514`
304 | - finish_reason: `tool_calls`
305 | - usage:
306 | `Usage(completion_tokens=87, prompt_tokens=562, total_tokens=649, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`
307 |
308 |
309 |
310 | {'tool_call_id': 'toolu_01Cdf3FHJdbx64F8H8ooE1Db',
311 | 'role': 'tool',
312 | 'name': 'add_numbers',
313 | 'content': '129'}
314 |
315 | The answer is **129**.
316 |
317 | I calculated this by first adding 47 + 23 = 70, then adding 70 + 59 =
318 | 129.
319 |
320 |
321 |
322 | - id: `chatcmpl-xxx`
323 | - model: `claude-sonnet-4-20250514`
324 | - finish_reason: `stop`
325 | - usage:
326 | `Usage(completion_tokens=41, prompt_tokens=702, total_tokens=743, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`
327 |
328 |
329 |
330 | It shows the intermediate tool calls, and the tool results!
331 |
332 | ## Web search
333 |
334 | Some models support web search capabilities. Lisette makes this easy to
335 | use:
336 |
337 | ``` python
338 | chat = Chat(models[0], search='l') # 'l'ow, 'm'edium, or 'h'igh search context
339 | res = chat("Please tell me one fun fact about otters. Keep it brief")
340 | res
341 | ```
342 |
343 | Here’s a fun fact about otters: Sea otters allow themselves to get
344 | entangled in kelp forests - this creates a tether so they don’t drift
345 | away on sleep currents as they sleep. They essentially use kelp as a
346 | natural anchor to stay in place while floating and resting on the
347 | water’s surface!
348 |
349 |
350 |
351 | - id: `chatcmpl-xxx`
352 | - model: `claude-sonnet-4-20250514`
353 | - finish_reason: `stop`
354 | - usage:
355 | `Usage(completion_tokens=143, prompt_tokens=15626, total_tokens=15769, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), server_tool_use=ServerToolUse(web_search_requests=1), cache_creation_input_tokens=0, cache_read_input_tokens=0)`
356 |
357 |
358 |
359 | > [!TIP]
360 | >
361 | > Some providers (like Anthropic) provide citations for their search
362 | > results.
363 |
364 | ``` python
365 | res.choices[0].message.provider_specific_fields
366 | ```
367 |
368 | {'citations': [[{'type': 'web_search_result_location',
369 | 'cited_text': 'Sea Otters allow themselves to get entangled in kelp forests this creates a tether so they don’t drift away on sleep currents as they sleep. ',
370 | 'url': 'https://www.mygreenworld.org/blog/facts-about-otters',
371 | 'title': 'Five Fast Facts about Otters — My Green World',
372 | 'encrypted_index': 'EpABCioIBxgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDCMi/kxdYrQXVUX+ZxoMVvW3BHE29cyMhwAFIjBZEBw3PaH+XAslsXWMNucD7FqSwe5Fnnsfh2RzTX9x/q9XQ1Mm1Ke6JOreehNzVI0qFDkJYT4NCX8U4CjHHwoyLKtY66vhGAQ='}]],
373 | 'thinking_blocks': None}
374 |
375 | ## Streaming
376 |
377 | For real-time responses, use `stream=True` to get chunks as they’re
378 | generated rather than waiting for the complete response:
379 |
380 | ``` python
381 | chat = Chat(models[0])
382 | res_gen = chat("Concisely, what are the top 10 biggest animals?", stream=True)
383 | res_gen
384 | ```
385 |
386 |
387 |
388 | ``` python
389 | from litellm import ModelResponse, ModelResponseStream
390 | ```
391 |
392 | You can loop over the generator to get the partial responses:
393 |
394 | ``` python
395 | for chunk in res_gen:
396 | if isinstance(chunk,ModelResponseStream): print(chunk.choices[0].delta.content,end='')
397 | ```
398 |
399 | Here are the top 10 biggest animals by size/weight:
400 |
401 | 1. **Blue whale** - largest animal ever, up to 100 feet long
402 | 2. **Fin whale** - second-largest whale, up to 85 feet
403 | 3. **Bowhead whale** - up to 65 feet, very heavy build
404 | 4. **Right whale** - up to 60 feet, extremely bulky
405 | 5. **Sperm whale** - up to 67 feet, largest toothed whale
406 | 6. **Gray whale** - up to 50 feet
407 | 7. **Humpback whale** - up to 52 feet
408 | 8. **African elephant** - largest land animal, up to 13 feet tall
409 | 9. **Colossal squid** - up to 46 feet long (largest invertebrate)
410 | 10. **Giraffe** - tallest animal, up to 18 feet tall
411 |
412 | *Note: Various whale species dominate due to the ocean's ability to support massive body sizes.*None
413 |
414 | And the final chunk is the complete `ModelResponse`:
415 |
416 | ``` python
417 | chunk
418 | ```
419 |
420 | Here are the top 10 biggest animals by size/weight:
421 |
422 | 1. **Blue whale** - largest animal ever, up to 100 feet long
423 | 2. **Fin whale** - second-largest whale, up to 85 feet
424 | 3. **Bowhead whale** - up to 65 feet, very heavy build
425 | 4. **Right whale** - up to 60 feet, extremely bulky
426 | 5. **Sperm whale** - up to 67 feet, largest toothed whale
427 | 6. **Gray whale** - up to 50 feet
428 | 7. **Humpback whale** - up to 52 feet
429 | 8. **African elephant** - largest land animal, up to 13 feet tall
430 | 9. **Colossal squid** - up to 46 feet long (largest invertebrate)
431 | 10. **Giraffe** - tallest animal, up to 18 feet tall
432 |
433 | *Note: Various whale species dominate due to the ocean’s ability to
434 | support massive body sizes.*
435 |
436 |
437 |
438 | - id: `chatcmpl-xxx`
439 | - model: `claude-sonnet-4-20250514`
440 | - finish_reason: `stop`
441 | - usage:
442 | `Usage(completion_tokens=233, prompt_tokens=22, total_tokens=255, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None), prompt_tokens_details=None)`
443 |
444 |
445 |
446 | ## Async
447 |
448 | For web applications and concurrent operations, like in
449 | [FastHTML](https://fastht.ml), we recommend using
450 | [`AsyncChat`](https://lisette.answer.ai/core.html#asyncchat):
451 |
452 | ``` python
453 | chat = AsyncChat(models[0])
454 | await chat("Hi there")
455 | ```
456 |
457 | Hello! How are you doing today? Is there anything I can help you with?
458 |
459 |
460 |
461 | - id: `chatcmpl-xxx`
462 | - model: `claude-sonnet-4-20250514`
463 | - finish_reason: `stop`
464 | - usage:
465 | `Usage(completion_tokens=20, prompt_tokens=9, total_tokens=29, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`
466 |
467 |
468 |
469 | To wrap up, we’ll show an example of async + streaming + toolcalling +
470 | search:
471 |
472 | ``` python
473 | chat = AsyncChat(models[0], search='l', tools=[add_numbers])
474 | res = await chat("""\
475 | Search the web for the avg weight, in kgs, of male African and Asian elephants. Then add the two.
476 | Keep your replies ultra concise! Dont search the web more than once please.
477 | """, max_steps=4, stream=True)
478 | await adisplay_stream(res) # this is a convenience function to make async streaming look great in notebooks!
479 | ```
480 |
481 | Based on the search results:
482 |
483 | **Male African elephants**:
484 | [\*](https://www.africa-safaris.com/How-Much-Does-An-Elephant-Weigh "How Much Does An Elephant Weigh")
485 | [\*](https://www.quora.com/What-is-the-average-weight-of-an-adult-African-elephant-in-pounds-and-tons "What is the average weight of an adult African elephant in pounds and tons? - Quora")
486 | Average weight is 5,000 kg (11,000 pounds)
487 |
488 | **Male Asian elephants**:
489 | [\*](https://www.ifaw.org/international/journal/difference-african-asian-elephants "African Elephants vs. Asian Elephants | IFAW")
490 | [\*](https://www.ifaw.org/international/journal/difference-african-asian-elephants "African Elephants vs. Asian Elephants | IFAW")
491 | Average weight is 3,600 kg (7,900 pounds)
492 |
493 |
494 | `add_numbers({"a": 5000, "b": 3600})` - `8600`
495 |
496 |
497 |
498 | **Total**: 8,600 kg
499 |
500 | ## Next steps
501 |
502 | Ready to dive deeper?
503 |
504 | - Check out the rest of the
505 | [documentation](https://lisette.answer.ai/core.html).
506 | - Visit the [GitHub repository](https://github.com/answerdotai/lisette)
507 | to contribute or report issues.
508 | - Join our [Discord community](https://discord.gg/y7cDEX7r)!
509 |
--------------------------------------------------------------------------------
/lisette/core.py:
--------------------------------------------------------------------------------
1 | """Lisette Core"""
2 |
3 | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/00_core.ipynb.
4 |
5 | # %% auto 0
6 | __all__ = ['sonn45', 'opus45', 'detls_tag', 're_tools', 'effort', 'patch_litellm', 'remove_cache_ckpts', 'contents', 'mk_msg',
7 | 'fmt2hist', 'mk_msgs', 'stream_with_complete', 'lite_mk_func', 'ToolResponse', 'structured', 'cite_footnote',
8 | 'cite_footnotes', 'Chat', 'random_tool_id', 'mk_tc', 'mk_tc_req', 'mk_tc_result', 'mk_tc_results',
9 | 'astream_with_complete', 'AsyncChat', 'mk_tr_details', 'AsyncStreamFormatter', 'adisplay_stream']
10 |
11 | # %% ../nbs/00_core.ipynb
12 | import asyncio, base64, json, litellm, mimetypes, random, string
13 | from typing import Optional,Callable
14 | from html import escape
15 | from litellm import (acompletion, completion, stream_chunk_builder, Message,
16 | ModelResponse, ModelResponseStream, get_model_info, register_model, Usage)
17 | from litellm.utils import function_to_dict, StreamingChoices, Delta, ChatCompletionMessageToolCall, Function, Choices
18 | from toolslm.funccall import mk_ns, call_func, call_func_async, get_schema
19 | from fastcore.utils import *
20 | from fastcore.meta import delegates
21 | from fastcore import imghdr
22 | from dataclasses import dataclass
23 | from litellm.exceptions import ContextWindowExceededError
24 |
25 | # %% ../nbs/00_core.ipynb
26 | def patch_litellm(seed=0):
27 | "Patch litellm.ModelResponseBase such that `id` and `created` are fixed."
28 | from litellm.types.utils import ModelResponseBase
29 | @patch
30 | def __init__(self: ModelResponseBase, id=None, created=None, *args, **kwargs):
31 | self._orig___init__(id='chatcmpl-xxx', created=1000000000, *args, **kwargs)
32 |
33 | @patch
34 | def __setattr__(self: ModelResponseBase, name, value):
35 | if name == 'id': value = 'chatcmpl-xxx'
36 | elif name == 'created': value = 1000000000
37 | self._orig___setattr__(name, value)
38 |
39 | if seed is not None: random.seed(seed) # ensures random ids like tool call ids are deterministic
40 |
41 | # %% ../nbs/00_core.ipynb
42 | @patch
43 | def _repr_markdown_(self: litellm.ModelResponse):
44 | message = self.choices[0].message
45 | content = ''
46 | if mc:=message.content: content += mc[0]['text'] if isinstance(mc,list) else mc
47 | if message.tool_calls:
48 | tool_calls = [f"\n\n🔧 {nested_idx(tc,'function','name')}({nested_idx(tc,'function','arguments')})\n" for tc in message.tool_calls]
49 | content += "\n".join(tool_calls)
50 | if not content: content = str(message)
51 | details = [
52 | f"id: `{self.id}`",
53 | f"model: `{self.model}`",
54 | f"finish_reason: `{self.choices[0].finish_reason}`"
55 | ]
56 | if hasattr(self, 'usage') and self.usage: details.append(f"usage: `{self.usage}`")
57 | det_str = '\n- '.join(details)
58 |
59 | return f"""{content}
60 |
61 |
62 |
63 | - {det_str}
64 |
65 | """
66 |
67 | # %% ../nbs/00_core.ipynb
68 | register_model({
69 | "claude-opus-4-5": {
70 | "litellm_provider": "anthropic", "mode": "chat",
71 | "max_tokens": 64000, "max_input_tokens": 200000, "max_output_tokens": 64000,
72 | "input_cost_per_token": 0.000005, "output_cost_per_token": 0.000025,
73 | "cache_creation_input_token_cost": 0.000005*1.25, "cache_read_input_token_cost": 0.000005*0.1,
74 | "supports_function_calling": True, "supports_parallel_function_calling": True,
75 | "supports_vision": True, "supports_prompt_caching": True, "supports_response_schema": True,
76 | "supports_system_messages": True, "supports_reasoning": True, "supports_assistant_prefill": True,
77 | "supports_tool_choice": True, "supports_computer_use": True, "supports_web_search": True
78 | }
79 | });
80 | sonn45 = "claude-sonnet-4-5"
81 | opus45 = "claude-opus-4-5"
82 |
83 | # %% ../nbs/00_core.ipynb
84 | def _bytes2content(data):
85 | "Convert bytes to litellm content dict (image, pdf, audio, video)"
86 | mtype = detect_mime(data)
87 | if not mtype: raise ValueError(f'Data must be a supported file type, got {data[:10]}')
88 | encoded = base64.b64encode(data).decode("utf-8")
89 | if mtype.startswith('image/'): return {'type': 'image_url', 'image_url': f'data:{mtype};base64,{encoded}'}
90 | return {'type': 'file', 'file': {'file_data': f'data:{mtype};base64,{encoded}'}}
91 |
92 | # %% ../nbs/00_core.ipynb
93 | def _add_cache_control(msg, # LiteLLM formatted msg
94 | ttl=None): # Cache TTL: '5m' (default) or '1h'
95 | "cache `msg` with default time-to-live (ttl) of 5minutes ('5m'), but can be set to '1h'."
96 | if isinstance(msg["content"], str):
97 | msg["content"] = [{"type": "text", "text": msg["content"]}]
98 | cache_control = {"type": "ephemeral"}
99 | if ttl is not None: cache_control["ttl"] = ttl
100 | if isinstance(msg["content"], list) and msg["content"]:
101 | msg["content"][-1]["cache_control"] = cache_control
102 | return msg
103 |
104 | def _has_cache(msg):
105 | return msg["content"] and isinstance(msg["content"], list) and ('cache_control' in msg["content"][-1])
106 |
107 | def remove_cache_ckpts(msg):
108 | "remove cache checkpoints and return msg."
109 | if _has_cache(msg): msg["content"][-1].pop('cache_control', None)
110 | return msg
111 |
112 | def _mk_content(o):
113 | if isinstance(o, str): return {'type':'text','text':o.strip() or '.'}
114 | elif isinstance(o,bytes): return _bytes2content(o)
115 | return o
116 |
117 | def contents(r):
118 | "Get message object from response `r`."
119 | return r.choices[0].message
120 |
121 | # %% ../nbs/00_core.ipynb
122 | def mk_msg(
123 | content, # Content: str, bytes (image), list of mixed content, or dict w 'role' and 'content' fields
124 | role="user", # Message role if content isn't already a dict/Message
125 | cache=False, # Enable Anthropic caching
126 | ttl=None # Cache TTL: '5m' (default) or '1h'
127 | ):
128 | "Create a LiteLLM compatible message."
129 | if isinstance(content, dict) or isinstance(content, Message): return content
130 | if isinstance(content, ModelResponse): return contents(content)
131 | if isinstance(content, list) and len(content) == 1 and isinstance(content[0], str): c = content[0]
132 | elif isinstance(content, list): c = [_mk_content(o) for o in content]
133 | else: c = content
134 | msg = {"role": role, "content": c}
135 | return _add_cache_control(msg, ttl=ttl) if cache else msg
136 |
137 | # %% ../nbs/00_core.ipynb
138 | detls_tag = ""
139 | re_tools = re.compile(fr"^({detls_tag}\n+```json\n+(.*?)\n+```\n+ )", flags=re.DOTALL|re.MULTILINE)
140 |
141 | # %% ../nbs/00_core.ipynb
142 | def _extract_tool(text:str)->tuple[dict,dict]:
143 | "Extract tool call and results from block"
144 | d = json.loads(text.strip())
145 | call = d['call']
146 | func = call['function']
147 | tc = ChatCompletionMessageToolCall(Function(dumps(call['arguments']),func), d['id'])
148 | tr = {'role': 'tool','tool_call_id': d['id'],'name': func, 'content': d['result']}
149 | return tc,tr
150 |
151 | def fmt2hist(outp:str)->list:
152 | "Transform a formatted output into a LiteLLM compatible history"
153 | lm,hist = Message(),[]
154 | spt = re_tools.split(outp)
155 | for txt,_,tooljson in chunked(spt, 3, pad=True):
156 | txt = txt.strip() if tooljson or txt.strip() else '.'
157 | hist.append(lm:=Message(txt))
158 | if tooljson:
159 | tcr = _extract_tool(tooljson)
160 | if not hist: hist.append(lm) # if LLM calls a tool without talking
161 | lm.tool_calls = lm.tool_calls+[tcr[0]] if lm.tool_calls else [tcr[0]]
162 | hist.append(tcr[1])
163 | return hist
164 |
165 | # %% ../nbs/00_core.ipynb
166 | def _apply_cache_idxs(msgs, cache_idxs=[-1], ttl=None):
167 | 'Add cache control to idxs after filtering tools'
168 | ms = L(msgs).filter(lambda m: not (m.get('tool_calls', []) or m['role'] == 'tool'))
169 | for i in cache_idxs:
170 | try: _add_cache_control(ms[i], ttl)
171 | except IndexError: continue
172 |
173 | # %% ../nbs/00_core.ipynb
174 | def mk_msgs(
175 | msgs, # List of messages (each: str, bytes, list, or dict w 'role' and 'content' fields)
176 | cache=False, # Enable Anthropic caching
177 | cache_idxs=[-1], # Cache breakpoint idxs
178 | ttl=None, # Cache TTL: '5m' (default) or '1h'
179 | ):
180 | "Create a list of LiteLLM compatible messages."
181 | if not msgs: return []
182 | if not isinstance(msgs, list): msgs = [msgs]
183 | res,role = [],'user'
184 | msgs = L(msgs).map(lambda m: fmt2hist(m) if detls_tag in m else [m]).concat()
185 | for m in msgs:
186 | res.append(msg:=remove_cache_ckpts(mk_msg(m, role=role)))
187 | role = 'assistant' if msg['role'] in ('user','function', 'tool') else 'user'
188 | if cache: _apply_cache_idxs(res, cache_idxs, ttl)
189 | return res
190 |
191 | # %% ../nbs/00_core.ipynb
192 | def stream_with_complete(gen, postproc=noop):
193 | "Extend streaming response chunks with the complete response"
194 | chunks = []
195 | for chunk in gen:
196 | chunks.append(chunk)
197 | yield chunk
198 | postproc(chunks)
199 | return stream_chunk_builder(chunks)
200 |
201 | # %% ../nbs/00_core.ipynb
202 | def lite_mk_func(f):
203 | if isinstance(f, dict): return f
204 | return {'type':'function', 'function':get_schema(f, pname='parameters')}
205 |
206 | # %% ../nbs/00_core.ipynb
207 | @dataclass
208 | class ToolResponse:
209 | content: list[str,str]
210 |
211 | # %% ../nbs/00_core.ipynb
212 | def _lite_call_func(tc,ns,raise_on_err=True):
213 | try: fargs = json.loads(tc.function.arguments)
214 | except Exception as e: raise ValueError(f"Failed to parse function arguments: {tc.function.arguments}") from e
215 | res = call_func(tc.function.name, fargs,ns=ns)
216 | if isinstance(res, ToolResponse): res = res.content
217 | else: res = str(res)
218 | return {"tool_call_id": tc.id, "role": "tool", "name": tc.function.name, "content": res}
219 |
220 | # %% ../nbs/00_core.ipynb
221 | @delegates(completion)
222 | def structured(
223 | m:str, # LiteLLM model string
224 | msgs:list, # List of messages
225 | tool:Callable, # Tool to be used for creating the structured output (class, dataclass or Pydantic, function, etc)
226 | **kwargs):
227 | "Return the value of the tool call (generally used for structured outputs)"
228 | t = lite_mk_func(tool)
229 | r = completion(m, msgs, tools=[t], tool_choice=t, **kwargs)
230 | args = json.loads(r.choices[0].message.tool_calls[0].function.arguments)
231 | return tool(**args)
232 |
233 | # %% ../nbs/00_core.ipynb
234 | def _has_search(m):
235 | i = get_model_info(m)
236 | return bool(i.get('search_context_cost_per_query') or i.get('supports_web_search'))
237 |
238 | # %% ../nbs/00_core.ipynb
239 | def cite_footnote(msg):
240 | if not (delta:=nested_idx(msg, 'choices', 0, 'delta')): return
241 | if citation:= nested_idx(delta, 'provider_specific_fields', 'citation'):
242 | title = citation['title'].replace('"', '\\"')
243 | delta.content = f'[*]({citation["url"]} "{title}") '
244 |
245 | def cite_footnotes(stream_list):
246 | "Add markdown footnote citations to stream deltas"
247 | for msg in stream_list: cite_footnote(msg)
248 |
249 | # %% ../nbs/00_core.ipynb
250 | effort = AttrDict({o[0]:o for o in ('low','medium','high')})
251 |
252 | # %% ../nbs/00_core.ipynb
253 | def _mk_prefill(pf): return ModelResponseStream([StreamingChoices(delta=Delta(content=pf,role='assistant'))])
254 |
255 | # %% ../nbs/00_core.ipynb
256 | def _trunc_str(s, mx=2000, replace=""):
257 | "Truncate `s` to `mx` chars max, adding `replace` if truncated"
258 | s = str(s).strip()
259 | if len(s)<=mx: return s
260 | s = s[:mx]
261 | ss = s.split(' ')
262 | if len(ss[-1])>50: ss[-1] = ss[-1][:5]
263 | s = ' '.join(ss)
264 | return s+replace
265 |
266 | # %% ../nbs/00_core.ipynb
267 | _final_prompt = dict(role="user", content="You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed.")
268 |
269 | _cwe_msg = "ContextWindowExceededError: Do no more tool calls and complete your response now. Inform user that you ran out of context and explain what the cause was. This is the response to this tool call, truncated if needed: "
270 |
271 | # %% ../nbs/00_core.ipynb
272 | class Chat:
273 | def __init__(
274 | self,
275 | model:str, # LiteLLM compatible model name
276 | sp='', # System prompt
277 | temp=0, # Temperature
278 | search=False, # Search (l,m,h), if model supports it
279 | tools:list=None, # Add tools
280 | hist:list=None, # Chat history
281 | ns:Optional[dict]=None, # Custom namespace for tool calling
282 | cache=False, # Anthropic prompt caching
283 | cache_idxs:list=[-1], # Anthropic cache breakpoint idxs, use `0` for sys prompt if provided
284 | ttl=None, # Anthropic prompt caching ttl
285 | api_base=None, # API base URL for custom providers
286 | api_key=None, # API key for custom providers
287 | ):
288 | "LiteLLM chat client."
289 | self.model = model
290 | hist,tools = mk_msgs(hist,cache,cache_idxs,ttl),listify(tools)
291 | if ns is None and tools: ns = mk_ns(tools)
292 | elif ns is None: ns = globals()
293 | self.tool_schemas = [lite_mk_func(t) for t in tools] if tools else None
294 | store_attr()
295 |
296 | def _prep_msg(self, msg=None, prefill=None):
297 | "Prepare the messages list for the API call"
298 | sp = [{"role": "system", "content": self.sp}] if self.sp else []
299 | if sp:
300 | if 0 in self.cache_idxs: sp[0] = _add_cache_control(sp[0])
301 | cache_idxs = L(self.cache_idxs).filter().map(lambda o: o-1 if o>0 else o)
302 | else:
303 | cache_idxs = self.cache_idxs
304 | if msg: self.hist = mk_msgs(self.hist+[msg], self.cache and 'claude' in self.model, cache_idxs, self.ttl)
305 | pf = [{"role":"assistant","content":prefill}] if prefill else []
306 | return sp + self.hist + pf
307 |
308 | # %% ../nbs/00_core.ipynb
309 | def _filter_srvtools(tcs): return L(tcs).filter(lambda o: not o.id.startswith('srvtoolu_')) if tcs else None
310 |
311 | # %% ../nbs/00_core.ipynb
312 | @patch
313 | def _call(self:Chat, msg=None, prefill=None, temp=None, think=None, search=None, stream=False, max_steps=2, step=1, final_prompt=None, tool_choice=None, **kwargs):
314 | "Internal method that always yields responses"
315 | if step>max_steps: return
316 | try:
317 | model_info = get_model_info(self.model)
318 | except Exception:
319 | register_model({self.model: {}})
320 | model_info = get_model_info(self.model)
321 | if not model_info.get("supports_assistant_prefill"): prefill=None
322 | if _has_search(self.model) and (s:=ifnone(search,self.search)): kwargs['web_search_options'] = {"search_context_size": effort[s]}
323 | else: _=kwargs.pop('web_search_options',None)
324 | if self.api_base: kwargs['api_base'] = self.api_base
325 | if self.api_key: kwargs['api_key'] = self.api_key
326 | res = completion(
327 | model=self.model, messages=self._prep_msg(msg, prefill), stream=stream,
328 | tools=self.tool_schemas, reasoning_effort = effort.get(think), tool_choice=tool_choice,
329 | # temperature is not supported when reasoning
330 | temperature=None if think else ifnone(temp,self.temp),
331 | caching=self.cache and 'claude' not in self.model,
332 | **kwargs)
333 | if stream:
334 | if prefill: yield _mk_prefill(prefill)
335 | res = yield from stream_with_complete(res,postproc=cite_footnotes)
336 | m = contents(res)
337 | if prefill: m.content = prefill + m.content
338 | self.hist.append(m)
339 | yield res
340 |
341 | if tcs := _filter_srvtools(m.tool_calls):
342 | tool_results=[_lite_call_func(tc, ns=self.ns) for tc in tcs]
343 | self.hist+=tool_results
344 | for r in tool_results: yield r
345 | if step>=max_steps-1: prompt,tool_choice,search = final_prompt,'none',False
346 | else: prompt = None
347 | try: yield from self._call(
348 | prompt, prefill, temp, think, search, stream, max_steps, step+1,
349 | final_prompt, tool_choice, **kwargs)
350 | except ContextWindowExceededError:
351 | for t in tool_results:
352 | if len(t['content'])>1000: t['content'] = _cwe_msg + _trunc_str(t['content'], mx=1000)
353 | yield from self._call(None, prefill, temp, think, search, stream, max_steps, max_steps, final_prompt, 'none', **kwargs)
354 |
355 | # %% ../nbs/00_core.ipynb
356 | @patch
357 | @delegates(Chat._call)
358 | def __call__(self:Chat,
359 | msg=None, # Message str, or list of multiple message parts
360 | prefill=None, # Prefill AI response if model supports it
361 | temp=None, # Override temp set on chat initialization
362 | think=None, # Thinking (l,m,h)
363 | search=None, # Override search set on chat initialization (l,m,h)
364 | stream=False, # Stream results
365 | max_steps=2, # Maximum number of tool calls
366 | final_prompt=_final_prompt, # Final prompt when tool calls have ran out
367 | return_all=False, # Returns all intermediate ModelResponses if not streaming and has tool calls
368 | **kwargs):
369 | "Main call method - handles streaming vs non-streaming"
370 | result_gen = self._call(msg, prefill, temp, think, search, stream, max_steps, 1, final_prompt, **kwargs)
371 | if stream: return result_gen # streaming
372 | elif return_all: return list(result_gen) # toolloop behavior
373 | else: return last(result_gen) # normal chat behavior
374 |
375 | # %% ../nbs/00_core.ipynb
376 | @patch
377 | def print_hist(self:Chat):
378 | "Print each message on a different line"
379 | for r in self.hist: print(r, end='\n\n')
380 |
381 | # %% ../nbs/00_core.ipynb
382 | def random_tool_id():
383 | "Generate a random tool ID with 'toolu_' prefix"
384 | random_part = ''.join(random.choices(string.ascii_letters + string.digits, k=25))
385 | return f'toolu_{random_part}'
386 |
387 | # %% ../nbs/00_core.ipynb
388 | def mk_tc(func, args, tcid=None, idx=1):
389 | if not tcid: tcid = random_tool_id()
390 | return {'index': idx, 'function': {'arguments': args, 'name': func}, 'id': tcid, 'type': 'function'}
391 |
392 | # %% ../nbs/00_core.ipynb
393 | def mk_tc_req(content, tcs):
394 | msg = Message(content=content, role='assistant', tool_calls=tcs, function_call=None)
395 | msg.tool_calls = [{**dict(tc), 'function': dict(tc['function'])} for tc in msg.tool_calls]
396 | return msg
397 |
398 | # %% ../nbs/00_core.ipynb
399 | def mk_tc_result(tc, result): return {'tool_call_id': tc['id'], 'role': 'tool', 'name': tc['function']['name'], 'content': result}
400 |
401 | # %% ../nbs/00_core.ipynb
402 | def mk_tc_results(tcq, results): return [mk_tc_result(a,b) for a,b in zip(tcq.tool_calls, results)]
403 |
404 | # %% ../nbs/00_core.ipynb
405 | async def _alite_call_func(tc, ns, raise_on_err=True):
406 | try: fargs = json.loads(tc.function.arguments)
407 | except Exception as e: raise ValueError(f"Failed to parse function arguments: {tc.function.arguments}") from e
408 | res = await call_func_async(tc.function.name, fargs, ns=ns)
409 | if isinstance(res, ToolResponse): res = res.content
410 | else: res = str(res)
411 | return {"tool_call_id": tc.id, "role": "tool", "name": tc.function.name, "content": res}
412 |
413 | # %% ../nbs/00_core.ipynb
414 | @asave_iter
415 | async def astream_with_complete(self, agen, postproc=noop):
416 | chunks = []
417 | async for chunk in agen:
418 | chunks.append(chunk)
419 | postproc(chunk)
420 | yield chunk
421 | self.value = stream_chunk_builder(chunks)
422 |
423 | # %% ../nbs/00_core.ipynb
424 | class AsyncChat(Chat):
425 | async def _call(self, msg=None, prefill=None, temp=None, think=None, search=None, stream=False, max_steps=2, step=1, final_prompt=None, tool_choice=None, **kwargs):
426 | if step>max_steps+1: return
427 | if not get_model_info(self.model).get("supports_assistant_prefill"): prefill=None
428 | if _has_search(self.model) and (s:=ifnone(search,self.search)): kwargs['web_search_options'] = {"search_context_size": effort[s]}
429 | else: _=kwargs.pop('web_search_options',None)
430 | res = await acompletion(model=self.model, messages=self._prep_msg(msg, prefill), stream=stream,
431 | tools=self.tool_schemas, reasoning_effort=effort.get(think), tool_choice=tool_choice,
432 | # temperature is not supported when reasoning
433 | temperature=None if think else ifnone(temp,self.temp),
434 | caching=self.cache and 'claude' not in self.model,
435 | **kwargs)
436 | if stream:
437 | if prefill: yield _mk_prefill(prefill)
438 | res = astream_with_complete(res,postproc=cite_footnote)
439 | async for chunk in res: yield chunk
440 | res = res.value
441 | m=contents(res)
442 | if prefill: m.content = prefill + m.content
443 | yield res
444 | self.hist.append(m)
445 |
446 | if tcs := _filter_srvtools(m.tool_calls):
447 | tool_results = []
448 | for tc in tcs:
449 | result = await _alite_call_func(tc, ns=self.ns)
450 | tool_results.append(result)
451 | yield result
452 | self.hist+=tool_results
453 | if step>=max_steps-1: prompt,tool_choice,search = final_prompt,'none',False
454 | else: prompt = None
455 | try:
456 | async for result in self._call(
457 | prompt, prefill, temp, think, search, stream, max_steps, step+1,
458 | final_prompt, tool_choice=tool_choice, **kwargs): yield result
459 | except ContextWindowExceededError:
460 | for t in tool_results:
461 | if len(t['content'])>1000: t['content'] = _cwe_msg + _trunc_str(t['content'], mx=1000)
462 | async for result in self._call(
463 | prompt, prefill, temp, think, search, stream, max_steps, step+1,
464 | final_prompt, tool_choice='none', **kwargs): yield result
465 |
466 | # %% ../nbs/00_core.ipynb
467 | @patch
468 | @delegates(Chat._call)
469 | async def __call__(
470 | self:AsyncChat,
471 | msg=None, # Message str, or list of multiple message parts
472 | prefill=None, # Prefill AI response if model supports it
473 | temp=None, # Override temp set on chat initialization
474 | think=None, # Thinking (l,m,h)
475 | search=None, # Override search set on chat initialization (l,m,h)
476 | stream=False, # Stream results
477 | max_steps=2, # Maximum number of tool calls
478 | final_prompt=_final_prompt, # Final prompt when tool calls have ran out
479 | return_all=False, # Returns all intermediate ModelResponses if not streaming and has tool calls
480 | **kwargs
481 | ):
482 | result_gen = self._call(msg, prefill, temp, think, search, stream, max_steps, 1, final_prompt, **kwargs)
483 | if stream or return_all: return result_gen
484 | async for res in result_gen: pass
485 | return res # normal chat behavior only return last msg
486 |
487 | # %% ../nbs/00_core.ipynb
488 | def mk_tr_details(tr, tc, mx=2000):
489 | "Create block for tool call as JSON"
490 | args = {k:_trunc_str(v, mx=mx) for k,v in json.loads(tc.function.arguments).items()}
491 | res = {'id':tr['tool_call_id'],
492 | 'call':{'function': tc.function.name, 'arguments': args},
493 | 'result':_trunc_str(tr.get('content'), mx=mx),}
494 | return f"\n\n{detls_tag}\n\n```json\n{dumps(res, indent=2)}\n```\n\n \n\n"
495 |
496 | # %% ../nbs/00_core.ipynb
497 | class AsyncStreamFormatter:
498 | def __init__(self, include_usage=False, mx=2000):
499 | self.outp,self.tcs,self.include_usage,self.mx = '',{},include_usage,mx
500 |
501 | def format_item(self, o):
502 | "Format a single item from the response stream."
503 | res = ''
504 | if isinstance(o, ModelResponseStream):
505 | d = o.choices[0].delta
506 | if nested_idx(d, 'reasoning_content') and d['reasoning_content']!='{"text": ""}':
507 | res+= '🧠' if not self.outp or self.outp[-1]=='🧠' else '\n\n🧠' # gemini can interleave reasoning
508 | elif self.outp and self.outp[-1] == '🧠': res+= '\n\n'
509 | if c:=d.content: # gemini has text content in last reasoning chunk
510 | res+=f"\n\n{c}" if res and res[-1] == '🧠' else c
511 | elif isinstance(o, ModelResponse):
512 | if self.include_usage: res += f"\nUsage: {o.usage}"
513 | if c:=getattr(contents(o),'tool_calls',None):
514 | self.tcs = {tc.id:tc for tc in c}
515 | elif isinstance(o, dict) and 'tool_call_id' in o:
516 | res += mk_tr_details(o, self.tcs.pop(o['tool_call_id']), mx=self.mx)
517 | self.outp+=res
518 | return res
519 |
520 | async def format_stream(self, rs):
521 | "Format the response stream for markdown display."
522 | async for o in rs: yield self.format_item(o)
523 |
524 | # %% ../nbs/00_core.ipynb
525 | async def adisplay_stream(rs):
526 | "Use IPython.display to markdown display the response stream."
527 | try: from IPython.display import display, Markdown
528 | except ModuleNotFoundError: raise ModuleNotFoundError("This function requires ipython. Please run `pip install ipython` to use.")
529 | fmt = AsyncStreamFormatter()
530 | md = ''
531 | async for o in fmt.format_stream(rs):
532 | md+=o
533 | display(Markdown(md),clear=True)
534 | return fmt
535 |
--------------------------------------------------------------------------------
/nbs/01_usage.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "3bd3dbfa",
6 | "metadata": {},
7 | "source": [
8 | "# Usage\n",
9 | "\n",
10 | "> Lisette usage and cost monitoring "
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": null,
16 | "id": "c2c9427c",
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "#| default_exp usage"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": null,
26 | "id": "5b6856c6",
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "#| export\n",
31 | "from litellm.integrations.custom_logger import CustomLogger\n",
32 | "from fastcore.utils import *\n",
33 | "import time\n",
34 | "try: from fastlite import *\n",
35 | "except ImportError: raise ImportError(\"Please install `fastlite` to use sqlite based lisette usage logging.\")"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": null,
41 | "id": "743eedcf",
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "import litellm, importlib, httpx\n",
46 | "from lisette.core import Chat, AsyncChat, patch_litellm\n",
47 | "from cachy import enable_cachy, disable_cachy\n",
48 | "from fastcore.test import *"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": null,
54 | "id": "69144ce3",
55 | "metadata": {},
56 | "outputs": [],
57 | "source": [
58 | "#| hide\n",
59 | "enable_cachy()"
60 | ]
61 | },
62 | {
63 | "cell_type": "markdown",
64 | "id": "2cff8990",
65 | "metadata": {},
66 | "source": [
67 | "## Lisette Usage Logger"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": null,
73 | "id": "c9acabfc",
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "_ = importlib.reload(litellm) # to re-run the notebook without kernel restart"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "id": "f85329c0",
84 | "metadata": {},
85 | "outputs": [],
86 | "source": [
87 | "# litellm._turn_on_debug()"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": null,
93 | "id": "7beb5064",
94 | "metadata": {},
95 | "outputs": [],
96 | "source": [
97 | "patch_litellm()"
98 | ]
99 | },
100 | {
101 | "cell_type": "code",
102 | "execution_count": null,
103 | "id": "aed71558",
104 | "metadata": {},
105 | "outputs": [],
106 | "source": [
107 | "#| export\n",
108 | "class Usage: id:int; timestamp:float; model:str; user_id:str; prompt_tokens:int; completion_tokens:int; total_tokens:int; cached_tokens:int; cache_creation_tokens:int; cache_read_tokens:int; web_search_requests:int; response_cost:int"
109 | ]
110 | },
111 | {
112 | "cell_type": "markdown",
113 | "id": "bc4301de",
114 | "metadata": {},
115 | "source": [
116 | "Anthropic provides web search request counts directly via `usage.server_tool_use.web_search_requests`, billed at $10 per 1,000 searches ([pricing](https://docs.claude.com/en/docs/about-claude/pricing)). Gemini returns queries in `groundingMetadata.webSearchQueries`—each query counts as a separate billable use—with 5,000 free prompts per month, then $14 per 1,000 search queries (coming soon) ([pricing](https://ai.google.dev/gemini-api/docs/pricing), [grounding docs](https://ai.google.dev/gemini-api/docs/google-search))."
117 | ]
118 | },
119 | {
120 | "cell_type": "code",
121 | "execution_count": null,
122 | "id": "4323da97",
123 | "metadata": {},
124 | "outputs": [],
125 | "source": [
126 | "#| export\n",
127 | "def search_count(r):\n",
128 | " if cnt := nested_idx(r.usage, 'server_tool_use', 'web_search_requests'): return cnt # Anthropic\n",
129 | " if meta := getattr(r, 'vertex_ai_grounding_metadata', None): # Gemini\n",
130 | " if meta and (queries := meta[0].get('webSearchQueries')): return len(queries)\n",
131 | " if cnt := nested_idx(r.usage, 'prompt_tokens_details', 'web_search_requests'): return cnt # streaming with `include_usage`\n",
132 | " return 0"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "id": "c9bf5fc1",
138 | "metadata": {},
139 | "source": [
140 | "The precomputed response cost provided is available in `kwargs['response_cost']` according to the [litellm docs](https://docs.litellm.ai/docs/observability/custom_callback#whats-available-in-kwargs):"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "id": "0ad2e088",
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "#| export\n",
151 | "class LisetteUsageLogger(CustomLogger):\n",
152 | " def __init__(self, db_path): \n",
153 | " self.db = Database(db_path)\n",
154 | " self.usage = self.db.create(Usage)\n",
155 | " \n",
156 | " async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): self._log_usage(response_obj, kwargs['response_cost'], start_time, end_time)\n",
157 | " def log_success_event(self, kwargs, response_obj, start_time, end_time): self._log_usage(response_obj, kwargs['response_cost'], start_time, end_time)\n",
158 | " def _log_usage(self, response_obj, response_cost, start_time, end_time):\n",
159 | " usage = response_obj.usage\n",
160 | " ptd = usage.prompt_tokens_details\n",
161 | " self.usage.insert(Usage(timestamp=time.time(),\n",
162 | " model=response_obj.model,\n",
163 | " user_id=self.user_id_fn(),\n",
164 | " prompt_tokens=usage.prompt_tokens,\n",
165 | " completion_tokens=usage.completion_tokens,\n",
166 | " total_tokens=usage.total_tokens,\n",
167 | " cached_tokens=ptd.cached_tokens if ptd else 0, # used by gemini (read tokens)\n",
168 | " cache_creation_tokens=nested_idx(usage, 'cache_creation_input_tokens'),\n",
169 | " cache_read_tokens=nested_idx(usage, 'cache_read_input_tokens'), # used by anthropic\n",
170 | " web_search_requests=search_count(response_obj),\n",
171 | " response_cost=response_cost))\n",
172 | " \n",
173 | " def user_id_fn(self): raise NotImplementedError('Please implement `LisetteUsageLogger.user_id_fn` before initializing, e.g using fastcore.patch.')"
174 | ]
175 | },
176 | {
177 | "cell_type": "markdown",
178 | "id": "3bdfd5ca",
179 | "metadata": {},
180 | "source": [
181 | "## Cost Utils"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "id": "5ce652ed",
188 | "metadata": {},
189 | "outputs": [],
190 | "source": [
191 | "class PrefixDict(dict):\n",
192 | " def __getitem__(self, key):\n",
193 | " if key in self.keys(): return super().__getitem__(key)\n",
194 | " for k in self.keys(): \n",
195 | " if key.startswith(k): return super().__getitem__(k)\n",
196 | " raise KeyError(key)"
197 | ]
198 | },
199 | {
200 | "cell_type": "code",
201 | "execution_count": null,
202 | "id": "847758d3",
203 | "metadata": {},
204 | "outputs": [],
205 | "source": [
206 | "model_prices = PrefixDict({\n",
207 | " 'claude-sonnet-4-5': dict(input_prc = 3/1e6, cache_write_prc = 3.75/1e6, cache_read_prc = 0.3/1e6, output_prc = 15/1e6, web_search_prc = 10/1e3)\n",
208 | "})"
209 | ]
210 | },
211 | {
212 | "cell_type": "markdown",
213 | "id": "42be909f",
214 | "metadata": {},
215 | "source": [
216 | "Simplified cost utils to demonstrate total cost calculation (use `Usage.response_cost` in prod):"
217 | ]
218 | },
219 | {
220 | "cell_type": "code",
221 | "execution_count": null,
222 | "id": "6624d659",
223 | "metadata": {},
224 | "outputs": [],
225 | "source": [
226 | "@patch(as_prop=True)\n",
227 | "def inp_cost(self:Usage): return model_prices[self.model]['input_prc'] * (self.prompt_tokens - self.cache_read_tokens)\n",
228 | "@patch(as_prop=True)\n",
229 | "def cache_write_cost(self:Usage): return model_prices[self.model]['cache_write_prc'] * self.cache_creation_tokens\n",
230 | "@patch(as_prop=True)\n",
231 | "def cache_read_cost(self:Usage): return model_prices[self.model]['cache_read_prc'] * self.cache_read_tokens\n",
232 | "@patch(as_prop=True)\n",
233 | "def out_cost(self:Usage): return model_prices[self.model]['output_prc'] * self.completion_tokens\n",
234 | "@patch(as_prop=True)\n",
235 | "def web_cost(self:Usage): return model_prices[self.model]['web_search_prc'] * ifnone(self.web_search_requests, 0)\n",
236 | "@patch(as_prop=True)\n",
237 | "def cost(self:Usage): return self.inp_cost + self.cache_write_cost + self.cache_read_cost + self.out_cost + self.web_cost\n"
238 | ]
239 | },
240 | {
241 | "cell_type": "markdown",
242 | "id": "432ef6d0",
243 | "metadata": {},
244 | "source": [
245 | "A mapping of model pricing is also available in litellm, which is used to calculate the `response_cost`"
246 | ]
247 | },
248 | {
249 | "cell_type": "code",
250 | "execution_count": null,
251 | "id": "b90af6ed",
252 | "metadata": {},
253 | "outputs": [],
254 | "source": [
255 | "model_pricing = dict2obj(httpx.get(litellm.model_cost_map_url).json())"
256 | ]
257 | },
258 | {
259 | "cell_type": "code",
260 | "execution_count": null,
261 | "id": "35cc0ba6",
262 | "metadata": {},
263 | "outputs": [],
264 | "source": [
265 | "# model_pricing['claude-sonnet-4-5']"
266 | ]
267 | },
268 | {
269 | "cell_type": "code",
270 | "execution_count": null,
271 | "id": "19ff68bd",
272 | "metadata": {},
273 | "outputs": [],
274 | "source": [
275 | "# model_pricing['gemini-3-pro-preview']"
276 | ]
277 | },
278 | {
279 | "cell_type": "markdown",
280 | "id": "0fd2987b",
281 | "metadata": {},
282 | "source": [
283 | "## Examples"
284 | ]
285 | },
286 | {
287 | "cell_type": "code",
288 | "execution_count": null,
289 | "id": "a045f396",
290 | "metadata": {},
291 | "outputs": [],
292 | "source": [
293 | "from tempfile import NamedTemporaryFile\n",
294 | "tf =NamedTemporaryFile(suffix='.db')"
295 | ]
296 | },
297 | {
298 | "cell_type": "code",
299 | "execution_count": null,
300 | "id": "1e4a50ae",
301 | "metadata": {},
302 | "outputs": [],
303 | "source": [
304 | "@patch\n",
305 | "def user_id_fn(self:LisetteUsageLogger): return 'user-123'\n",
306 | "tf=NamedTemporaryFile(suffix='.db')\n",
307 | "logger = LisetteUsageLogger(tf.name)\n",
308 | "litellm.callbacks = [logger]"
309 | ]
310 | },
311 | {
312 | "cell_type": "code",
313 | "execution_count": null,
314 | "id": "5842bb0f",
315 | "metadata": {},
316 | "outputs": [],
317 | "source": [
318 | "slc = ','.join('id model user_id prompt_tokens completion_tokens total_tokens cached_tokens cache_creation_tokens cache_read_tokens web_search_requests response_cost'.split())"
319 | ]
320 | },
321 | {
322 | "cell_type": "code",
323 | "execution_count": null,
324 | "id": "ac32ac47",
325 | "metadata": {},
326 | "outputs": [],
327 | "source": [
328 | "# litellm.set_verbose = True"
329 | ]
330 | },
331 | {
332 | "cell_type": "markdown",
333 | "id": "1d0af81a",
334 | "metadata": {},
335 | "source": [
336 | "A simple example:"
337 | ]
338 | },
339 | {
340 | "cell_type": "code",
341 | "execution_count": null,
342 | "id": "a9215558",
343 | "metadata": {},
344 | "outputs": [],
345 | "source": [
346 | "chat = Chat('claude-sonnet-4-5-20250929')\n",
347 | "r = chat(\"What is 2+2?\")"
348 | ]
349 | },
350 | {
351 | "cell_type": "code",
352 | "execution_count": null,
353 | "id": "c4b82ed4",
354 | "metadata": {},
355 | "outputs": [
356 | {
357 | "data": {
358 | "text/plain": [
359 | "Usage(id=1, timestamp=UNSET, model='claude-sonnet-4-5-20250929', user_id='user-123', prompt_tokens=14, completion_tokens=11, total_tokens=25, cached_tokens=0, cache_creation_tokens=0, cache_read_tokens=0, web_search_requests=0, response_cost=0.000207)"
360 | ]
361 | },
362 | "execution_count": null,
363 | "metadata": {},
364 | "output_type": "execute_result"
365 | }
366 | ],
367 | "source": [
368 | "time.sleep(0.3) # wait for callback db write\n",
369 | "u = logger.usage(select=slc)[-1]; u"
370 | ]
371 | },
372 | {
373 | "cell_type": "markdown",
374 | "id": "39087125",
375 | "metadata": {},
376 | "source": [
377 | "Our calculated cost matches litellm's `response_cost`. In some cases it might be better to use the custom calculation as we'll see in the remaining of this notebook:"
378 | ]
379 | },
380 | {
381 | "cell_type": "code",
382 | "execution_count": null,
383 | "id": "367cb32f",
384 | "metadata": {},
385 | "outputs": [],
386 | "source": [
387 | "test_eq(u.cost, u.response_cost)"
388 | ]
389 | },
390 | {
391 | "cell_type": "markdown",
392 | "id": "55735017",
393 | "metadata": {},
394 | "source": [
395 | "Now, let's test with streaming:"
396 | ]
397 | },
398 | {
399 | "cell_type": "code",
400 | "execution_count": null,
401 | "id": "58b8cde6",
402 | "metadata": {},
403 | "outputs": [],
404 | "source": [
405 | "chat = Chat('claude-sonnet-4-5')\n",
406 | "res = chat(\"Count from 1 to 5\", stream=True)\n",
407 | "for o in res: pass"
408 | ]
409 | },
410 | {
411 | "cell_type": "code",
412 | "execution_count": null,
413 | "id": "72e8eb30",
414 | "metadata": {},
415 | "outputs": [
416 | {
417 | "data": {
418 | "text/plain": [
419 | "Usage(id=2, timestamp=UNSET, model='claude-sonnet-4-5', user_id='user-123', prompt_tokens=15, completion_tokens=17, total_tokens=32, cached_tokens=0, cache_creation_tokens=0, cache_read_tokens=0, web_search_requests=0, response_cost=0.00030000000000000003)"
420 | ]
421 | },
422 | "execution_count": null,
423 | "metadata": {},
424 | "output_type": "execute_result"
425 | }
426 | ],
427 | "source": [
428 | "time.sleep(0.3)\n",
429 | "u = logger.usage(select=slc)[-1]; u\n"
430 | ]
431 | },
432 | {
433 | "cell_type": "code",
434 | "execution_count": null,
435 | "id": "13fadb74",
436 | "metadata": {},
437 | "outputs": [],
438 | "source": [
439 | "test_eq(u.cost, u.response_cost)"
440 | ]
441 | },
442 | {
443 | "cell_type": "markdown",
444 | "id": "eab9bb4b",
445 | "metadata": {},
446 | "source": [
447 | "Streaming logged successfully. Let's also verify async chat calls are logged properly."
448 | ]
449 | },
450 | {
451 | "cell_type": "code",
452 | "execution_count": null,
453 | "id": "5270a8f1",
454 | "metadata": {},
455 | "outputs": [
456 | {
457 | "data": {
458 | "text/markdown": [
459 | "3 + 3 = 6\n",
460 | "\n",
461 | "\n",
462 | "\n",
463 | "- id: `chatcmpl-xxx`\n",
464 | "- model: `claude-sonnet-4-5-20250929`\n",
465 | "- finish_reason: `stop`\n",
466 | "- usage: `Usage(completion_tokens=13, prompt_tokens=14, total_tokens=27, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n",
467 | "\n",
468 | " "
469 | ],
470 | "text/plain": [
471 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='3 + 3 = 6', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=13, prompt_tokens=14, total_tokens=27, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0))"
472 | ]
473 | },
474 | "execution_count": null,
475 | "metadata": {},
476 | "output_type": "execute_result"
477 | }
478 | ],
479 | "source": [
480 | "chat_async = AsyncChat('claude-sonnet-4-5-20250929')\n",
481 | "await chat_async(\"What is 3+3?\")"
482 | ]
483 | },
484 | {
485 | "cell_type": "code",
486 | "execution_count": null,
487 | "id": "e7a75d42",
488 | "metadata": {},
489 | "outputs": [
490 | {
491 | "data": {
492 | "text/plain": [
493 | "Usage(id=2, timestamp=UNSET, model='claude-sonnet-4-5', user_id='user-123', prompt_tokens=15, completion_tokens=17, total_tokens=32, cached_tokens=0, cache_creation_tokens=0, cache_read_tokens=0, web_search_requests=0, response_cost=0.00030000000000000003)"
494 | ]
495 | },
496 | "execution_count": null,
497 | "metadata": {},
498 | "output_type": "execute_result"
499 | }
500 | ],
501 | "source": [
502 | "time.sleep(0.3)\n",
503 | "u = logger.usage(select=slc)[-1]; u"
504 | ]
505 | },
506 | {
507 | "cell_type": "code",
508 | "execution_count": null,
509 | "id": "1916085a",
510 | "metadata": {},
511 | "outputs": [],
512 | "source": [
513 | "test_eq(u.cost, u.response_cost)"
514 | ]
515 | },
516 | {
517 | "cell_type": "markdown",
518 | "id": "3f82d440",
519 | "metadata": {},
520 | "source": [
521 | "Finally, let's test async streaming to ensure all API patterns are covered."
522 | ]
523 | },
524 | {
525 | "cell_type": "code",
526 | "execution_count": null,
527 | "id": "d7791bff",
528 | "metadata": {},
529 | "outputs": [
530 | {
531 | "name": "stdout",
532 | "output_type": "stream",
533 | "text": [
534 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='10, 11, 12, 13, 14, 15', role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None))], usage=Usage(completion_tokens=20, prompt_tokens=38, total_tokens=58, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))\n"
535 | ]
536 | }
537 | ],
538 | "source": [
539 | "res = await chat_async(\"Count from 10 to 15\", stream=True)\n",
540 | "async for o in res: pass\n",
541 | "print(o)"
542 | ]
543 | },
544 | {
545 | "cell_type": "code",
546 | "execution_count": null,
547 | "id": "abd6b744",
548 | "metadata": {},
549 | "outputs": [
550 | {
551 | "data": {
552 | "text/plain": [
553 | "Usage(id=4, timestamp=UNSET, model='claude-sonnet-4-5-20250929', user_id='user-123', prompt_tokens=38, completion_tokens=20, total_tokens=58, cached_tokens=0, cache_creation_tokens=0, cache_read_tokens=0, web_search_requests=0, response_cost=0.00041400000000000003)"
554 | ]
555 | },
556 | "execution_count": null,
557 | "metadata": {},
558 | "output_type": "execute_result"
559 | }
560 | ],
561 | "source": [
562 | "time.sleep(0.3)\n",
563 | "u = logger.usage(select=slc)[-1]; u"
564 | ]
565 | },
566 | {
567 | "cell_type": "code",
568 | "execution_count": null,
569 | "id": "85ea9299",
570 | "metadata": {},
571 | "outputs": [],
572 | "source": [
573 | "test_eq(u.cost, u.response_cost)"
574 | ]
575 | },
576 | {
577 | "cell_type": "markdown",
578 | "id": "050cf3a5",
579 | "metadata": {},
580 | "source": [
581 | "### Search"
582 | ]
583 | },
584 | {
585 | "cell_type": "markdown",
586 | "id": "bc60ec86",
587 | "metadata": {},
588 | "source": [
589 | "Now let's run a prompt with web search:"
590 | ]
591 | },
592 | {
593 | "cell_type": "code",
594 | "execution_count": null,
595 | "id": "c79df374",
596 | "metadata": {},
597 | "outputs": [
598 | {
599 | "data": {
600 | "text/markdown": [
601 | "In New York City, as of Monday, December 15, 2025, it is mostly sunny with a temperature of 24°F (-4°C), feeling like 16°F (-9°C). The humidity is around 52%.\n",
602 | "\n",
603 | "The forecast for today, Monday, December 15, includes light snow during the day and partly cloudy skies at night, with a 20% chance of snow throughout the day and night. Temperatures are expected to range between 20°F (-7°C) and 28°F (-2°C), with humidity around 57%.\n",
604 | "\n",
605 | "\n",
606 | "\n",
607 | "- id: `chatcmpl-xxx`\n",
608 | "- model: `gemini-2.5-flash`\n",
609 | "- finish_reason: `stop`\n",
610 | "- usage: `Usage(completion_tokens=295, prompt_tokens=12, total_tokens=395, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=148, rejected_prediction_tokens=None, text_tokens=147, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=12, image_tokens=None))`\n",
611 | "\n",
612 | " "
613 | ],
614 | "text/plain": [
615 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-flash', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='In New York City, as of Monday, December 15, 2025, it is mostly sunny with a temperature of 24°F (-4°C), feeling like 16°F (-9°C). The humidity is around 52%.\\n\\nThe forecast for today, Monday, December 15, includes light snow during the day and partly cloudy skies at night, with a 20% chance of snow throughout the day and night. Temperatures are expected to range between 20°F (-7°C) and 28°F (-2°C), with humidity around 57%.', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None, annotations=[{'type': 'url_citation', 'url_citation': {'end_index': 162, 'start_index': 135, 'title': 'Weather information for New York, NY, US', 'url': 'https://www.google.com/search?q=weather+in+New York, NY,+US'}}, {'type': 'url_citation', 'url_citation': {'end_index': 435, 'start_index': 334, 'title': 'Weather information for New York, NY, US', 'url': 'https://www.google.com/search?q=weather+in+New York, NY,+US'}}]))], usage=Usage(completion_tokens=295, prompt_tokens=12, total_tokens=395, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=148, rejected_prediction_tokens=None, text_tokens=147, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=12, image_tokens=None)), vertex_ai_grounding_metadata=[{'searchEntryPoint': {'renderedContent': '\\n\\n
\\n
\\n
\\n
\\n
\\n
\\n
\\n'}, 'groundingChunks': [{'web': {'uri': 'https://www.google.com/search?q=weather+in+New York, NY,+US', 'title': 'Weather information for New York, NY, US'}}], 'groundingSupports': [{'segment': {'startIndex': 135, 'endIndex': 162, 'text': 'The humidity is around 52%.'}, 'groundingChunkIndices': [0]}, {'segment': {'startIndex': 334, 'endIndex': 435, 'text': 'Temperatures are expected to range between 20°F (-7°C) and 28°F (-2°C), with humidity around 57%.'}, 'groundingChunkIndices': [0]}], 'webSearchQueries': ['weather in NYC']}], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])"
616 | ]
617 | },
618 | "execution_count": null,
619 | "metadata": {},
620 | "output_type": "execute_result"
621 | }
622 | ],
623 | "source": [
624 | "chat = Chat('gemini/gemini-2.5-flash')\n",
625 | "chat(\"What is the weather like in NYC? Search web.\", search=\"m\")"
626 | ]
627 | },
628 | {
629 | "cell_type": "code",
630 | "execution_count": null,
631 | "id": "430a280f",
632 | "metadata": {},
633 | "outputs": [
634 | {
635 | "data": {
636 | "text/plain": [
637 | "Usage(id=5, timestamp=UNSET, model='gemini-2.5-flash', user_id='user-123', prompt_tokens=12, completion_tokens=295, total_tokens=395, cached_tokens=None, cache_creation_tokens=None, cache_read_tokens=None, web_search_requests=1, response_cost=0.0007411000000000001)"
638 | ]
639 | },
640 | "execution_count": null,
641 | "metadata": {},
642 | "output_type": "execute_result"
643 | }
644 | ],
645 | "source": [
646 | "time.sleep(0.3)\n",
647 | "u = logger.usage(select=slc)[-1]; u"
648 | ]
649 | },
650 | {
651 | "cell_type": "code",
652 | "execution_count": null,
653 | "id": "0b597375",
654 | "metadata": {},
655 | "outputs": [],
656 | "source": [
657 | "test_eq(u.web_search_requests,1)"
658 | ]
659 | },
660 | {
661 | "cell_type": "code",
662 | "execution_count": null,
663 | "id": "d76d3c46",
664 | "metadata": {},
665 | "outputs": [],
666 | "source": [
667 | "chat = Chat('claude-sonnet-4-5-20250929')\n",
668 | "r = chat(\"What is the weather like in NYC? Search web.\", search=\"m\")"
669 | ]
670 | },
671 | {
672 | "cell_type": "code",
673 | "execution_count": null,
674 | "id": "fb86f247",
675 | "metadata": {},
676 | "outputs": [
677 | {
678 | "data": {
679 | "text/plain": [
680 | "Usage(id=6, timestamp=UNSET, model='claude-sonnet-4-5-20250929', user_id='user-123', prompt_tokens=10532, completion_tokens=318, total_tokens=10850, cached_tokens=0, cache_creation_tokens=0, cache_read_tokens=0, web_search_requests=1, response_cost=0.036365999999999996)"
681 | ]
682 | },
683 | "execution_count": null,
684 | "metadata": {},
685 | "output_type": "execute_result"
686 | }
687 | ],
688 | "source": [
689 | "time.sleep(0.3)\n",
690 | "u = logger.usage(select=slc)[-1]; u"
691 | ]
692 | },
693 | {
694 | "cell_type": "code",
695 | "execution_count": null,
696 | "id": "50379ec9",
697 | "metadata": {},
698 | "outputs": [],
699 | "source": [
700 | "test_eq(u.web_search_requests,1)"
701 | ]
702 | },
703 | {
704 | "cell_type": "markdown",
705 | "id": "3f7d58e3",
706 | "metadata": {},
707 | "source": [
708 | "::: {.callout-important}\n",
709 | "Litellm's `response_cost` doesn't take web search request cost into account!\n",
710 | ":::"
711 | ]
712 | },
713 | {
714 | "cell_type": "markdown",
715 | "id": "c9a35480",
716 | "metadata": {},
717 | "source": [
718 | "Now, this is a case where using the custom calculations is better as it will also include the web search request cost:"
719 | ]
720 | },
721 | {
722 | "cell_type": "code",
723 | "execution_count": null,
724 | "id": "dc09ac23",
725 | "metadata": {},
726 | "outputs": [],
727 | "source": [
728 | "test_eq(u.cost, u.response_cost + u.web_search_requests * model_prices[u.model]['web_search_prc'])"
729 | ]
730 | },
731 | {
732 | "cell_type": "markdown",
733 | "id": "77e9b5a8",
734 | "metadata": {},
735 | "source": [
736 | "### Search with streaming"
737 | ]
738 | },
739 | {
740 | "cell_type": "markdown",
741 | "id": "f8929bae",
742 | "metadata": {},
743 | "source": [
744 | "Web search with streaming:"
745 | ]
746 | },
747 | {
748 | "cell_type": "markdown",
749 | "id": "4b5d1da0",
750 | "metadata": {},
751 | "source": [
752 | "::: {.callout-important}\n",
753 | "Gemini web search requests are part of `prompt_tokens_details` which is only included with `stream_options={\"include_usage\": True}` when `stream=True`. \n",
754 | "\n",
755 | "There is currently a bug with gemini web search request counts, [Issue](https://github.com/BerriAI/litellm/issues/17919) and [PR](https://github.com/BerriAI/litellm/pull/17921). Waiting for litellm 1.80.11 pypi release.\n",
756 | ":::"
757 | ]
758 | },
759 | {
760 | "cell_type": "code",
761 | "execution_count": null,
762 | "id": "1faec06f",
763 | "metadata": {},
764 | "outputs": [],
765 | "source": [
766 | "chat = Chat('gemini/gemini-2.5-flash')\n",
767 | "res = chat(\"What is the weather like in NYC? Search web.\", search=\"m\", stream=True, stream_options={\"include_usage\": True})\n",
768 | "for o in res: pass\n",
769 | "# print(o)"
770 | ]
771 | },
772 | {
773 | "cell_type": "code",
774 | "execution_count": null,
775 | "id": "21056927",
776 | "metadata": {},
777 | "outputs": [
778 | {
779 | "data": {
780 | "text/plain": [
781 | "Usage(id=7, timestamp=UNSET, model='gemini-2.5-flash', user_id='user-123', prompt_tokens=12, completion_tokens=588, total_tokens=600, cached_tokens=None, cache_creation_tokens=None, cache_read_tokens=None, web_search_requests=1, response_cost=0.0364736)"
782 | ]
783 | },
784 | "execution_count": null,
785 | "metadata": {},
786 | "output_type": "execute_result"
787 | }
788 | ],
789 | "source": [
790 | "time.sleep(0.3)\n",
791 | "u = logger.usage(select=slc)[-1]; u"
792 | ]
793 | },
794 | {
795 | "cell_type": "markdown",
796 | "id": "ae560895",
797 | "metadata": {},
798 | "source": [
799 | "::: {.callout-important}\n",
800 | "Anthropic web search requests are available in `usage.server_tool_use`\n",
801 | ":::"
802 | ]
803 | },
804 | {
805 | "cell_type": "code",
806 | "execution_count": null,
807 | "id": "ff66e5d8",
808 | "metadata": {},
809 | "outputs": [],
810 | "source": [
811 | "chat = Chat('claude-sonnet-4-5')\n",
812 | "res = chat(\"What is the weather like in NYC now? Search web.\", search=\"m\", stream=True, stream_options={\"include_usage\": True})\n",
813 | "for o in res: pass\n",
814 | "# print(o)"
815 | ]
816 | },
817 | {
818 | "cell_type": "code",
819 | "execution_count": null,
820 | "id": "07c9bb1e",
821 | "metadata": {},
822 | "outputs": [
823 | {
824 | "data": {
825 | "text/plain": [
826 | "Usage(id=8, timestamp=UNSET, model='claude-sonnet-4-5', user_id='user-123', prompt_tokens=10477, completion_tokens=303, total_tokens=10780, cached_tokens=0, cache_creation_tokens=0, cache_read_tokens=0, web_search_requests=1, response_cost=0.035976)"
827 | ]
828 | },
829 | "execution_count": null,
830 | "metadata": {},
831 | "output_type": "execute_result"
832 | }
833 | ],
834 | "source": [
835 | "time.sleep(0.3)\n",
836 | "u = logger.usage(select=slc)[-1]; u"
837 | ]
838 | },
839 | {
840 | "cell_type": "code",
841 | "execution_count": null,
842 | "id": "23f51ee5",
843 | "metadata": {},
844 | "outputs": [],
845 | "source": [
846 | "test_eq(u.cost, u.response_cost + u.web_search_requests * model_prices[u.model]['web_search_prc'])"
847 | ]
848 | },
849 | {
850 | "cell_type": "code",
851 | "execution_count": null,
852 | "id": "cc36e5e8",
853 | "metadata": {},
854 | "outputs": [],
855 | "source": [
856 | "test_eq(len(logger.usage()), 8)"
857 | ]
858 | },
859 | {
860 | "cell_type": "code",
861 | "execution_count": null,
862 | "id": "9ef8ac82",
863 | "metadata": {},
864 | "outputs": [],
865 | "source": [
866 | "#| export\n",
867 | "@patch\n",
868 | "def total_cost(self:Usage, sc=0.01): return self.response_cost + sc * ifnone(self.web_search_requests, 0)"
869 | ]
870 | },
871 | {
872 | "cell_type": "code",
873 | "execution_count": null,
874 | "id": "4ef5dc43",
875 | "metadata": {},
876 | "outputs": [
877 | {
878 | "data": {
879 | "text/plain": [
880 | "0.1107147"
881 | ]
882 | },
883 | "execution_count": null,
884 | "metadata": {},
885 | "output_type": "execute_result"
886 | }
887 | ],
888 | "source": [
889 | "L(logger.usage()).attrgot('response_cost').sum()"
890 | ]
891 | },
892 | {
893 | "cell_type": "code",
894 | "execution_count": null,
895 | "id": "5a00a720",
896 | "metadata": {},
897 | "outputs": [],
898 | "source": [
899 | "disable_cachy()"
900 | ]
901 | },
902 | {
903 | "cell_type": "markdown",
904 | "id": "b1e57bed",
905 | "metadata": {},
906 | "source": [
907 | "A simple Gemini example (requires min tokens and running twice to see `cached_tokens`):"
908 | ]
909 | },
910 | {
911 | "cell_type": "code",
912 | "execution_count": null,
913 | "id": "4ad22437",
914 | "metadata": {},
915 | "outputs": [
916 | {
917 | "data": {
918 | "text/markdown": [
919 | "2 + 2 = 4\n",
920 | "\n",
921 | "\n",
922 | "\n",
923 | "- id: `chatcmpl-xxx`\n",
924 | "- model: `gemini-2.5-flash`\n",
925 | "- finish_reason: `stop`\n",
926 | "- usage: `Usage(completion_tokens=41, prompt_tokens=7010, total_tokens=7051, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=34, rejected_prediction_tokens=None, text_tokens=7, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=6117, text_tokens=893, image_tokens=None))`\n",
927 | "\n",
928 | " "
929 | ],
930 | "text/plain": [
931 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-flash', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='2 + 2 = 4', role='assistant', tool_calls=None, function_call=None, images=[], thinking_blocks=[], provider_specific_fields=None))], usage=Usage(completion_tokens=41, prompt_tokens=7010, total_tokens=7051, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=34, rejected_prediction_tokens=None, text_tokens=7, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=6117, text_tokens=893, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])"
932 | ]
933 | },
934 | "execution_count": null,
935 | "metadata": {},
936 | "output_type": "execute_result"
937 | }
938 | ],
939 | "source": [
940 | "#| notest\n",
941 | "chat = Chat('gemini/gemini-2.5-flash')\n",
942 | "chat(\"What is 2+2?\"* 500)\n",
943 | "time.sleep(5)\n",
944 | "chat(\"What is 2+2?\"* 500)"
945 | ]
946 | },
947 | {
948 | "cell_type": "code",
949 | "execution_count": null,
950 | "id": "aa0621b4",
951 | "metadata": {},
952 | "outputs": [
953 | {
954 | "data": {
955 | "text/plain": [
956 | "Usage(id=10, timestamp=UNSET, model='gemini-2.5-flash', user_id='user-123', prompt_tokens=7010, completion_tokens=41, total_tokens=7051, cached_tokens=6117, cache_creation_tokens=None, cache_read_tokens=None, web_search_requests=0, response_cost=0.00055391)"
957 | ]
958 | },
959 | "execution_count": null,
960 | "metadata": {},
961 | "output_type": "execute_result"
962 | }
963 | ],
964 | "source": [
965 | "#| notest\n",
966 | "time.sleep(0.3) # wait for callback db write\n",
967 | "u = logger.usage(select=slc)[-1];u"
968 | ]
969 | },
970 | {
971 | "cell_type": "code",
972 | "execution_count": null,
973 | "id": "6133e694",
974 | "metadata": {},
975 | "outputs": [],
976 | "source": [
977 | "#| notest\n",
978 | "test_eq(len(logger.usage()), 10)\n",
979 | "test_eq(logger.usage()[-1].cached_tokens > 3000, True)"
980 | ]
981 | },
982 | {
983 | "cell_type": "code",
984 | "execution_count": null,
985 | "id": "b3577f0e",
986 | "metadata": {},
987 | "outputs": [],
988 | "source": [
989 | "tf.close()"
990 | ]
991 | },
992 | {
993 | "cell_type": "markdown",
994 | "id": "41e335da",
995 | "metadata": {},
996 | "source": [
997 | "# Export -"
998 | ]
999 | },
1000 | {
1001 | "cell_type": "code",
1002 | "execution_count": null,
1003 | "id": "2b59b8b0",
1004 | "metadata": {},
1005 | "outputs": [],
1006 | "source": [
1007 | "#| hide\n",
1008 | "import nbdev; nbdev.nbdev_export()"
1009 | ]
1010 | },
1011 | {
1012 | "cell_type": "code",
1013 | "execution_count": null,
1014 | "id": "d620e45d",
1015 | "metadata": {},
1016 | "outputs": [],
1017 | "source": []
1018 | }
1019 | ],
1020 | "metadata": {},
1021 | "nbformat": 4,
1022 | "nbformat_minor": 5
1023 | }
1024 |
--------------------------------------------------------------------------------
/nbs/index.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "0cb9feba",
6 | "metadata": {},
7 | "source": [
8 | "# Lisette\n",
9 | "\n",
10 | "> Lisette makes LiteLLM easier to use"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": null,
16 | "id": "e0f08655",
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "#| hide\n",
21 | "from cachy import enable_cachy\n",
22 | "from lisette import patch_litellm"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": null,
28 | "id": "c11d807e",
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "#| hide\n",
33 | "enable_cachy()\n",
34 | "patch_litellm()"
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "id": "5935ea64",
40 | "metadata": {},
41 | "source": [
42 | "> **NB**: If you are reading this in GitHub's readme, we recommend you instead read the much more nicely formatted [documentation format](https://lisette.answer.ai/) of this tutorial.\n",
43 | "\n",
44 | "_Lisette_ is a wrapper for the [LiteLLM Python SDK](https://docs.litellm.ai/), which provides unified access to 100+ LLM providers using the OpenAI API format.\n",
45 | "\n",
46 | "LiteLLM provides a unified interface to access multiple LLMs, but it's quite low level: it leaves the developer to do a lot of stuff manually. Lisette automates pretty much everything that can be automated, whilst providing full control. Amongst the features provided:\n",
47 | "\n",
48 | "* A `Chat` class that creates stateful dialogs across any LiteLLM-supported model\n",
49 | "* Convenient message creation utilities for text, images, and mixed content\n",
50 | "* Simple and convenient support for tool calling with automatic execution\n",
51 | "* Built-in support for web search capabilities (including citations for supporting models)\n",
52 | "* Streaming responses with formatting\n",
53 | "* Full async support with `AsyncChat`\n",
54 | "* Prompt caching (for supporting models)\n",
55 | "\n",
56 | "To use Lisette, you'll need to set the appropriate API keys as environment variables for whichever LLM providers you want to use."
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "id": "5092a496",
62 | "metadata": {},
63 | "source": [
64 | "## Get started\n",
65 | "\n",
66 | "LiteLLM will automatically be installed with Lisette, if you don't already have it."
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": null,
72 | "id": "e3f842b6",
73 | "metadata": {},
74 | "outputs": [],
75 | "source": [
76 | "!pip install lisette -qq"
77 | ]
78 | },
79 | {
80 | "cell_type": "markdown",
81 | "id": "8bd64962",
82 | "metadata": {},
83 | "source": [
84 | "Lisette only exports the symbols that are needed to use the library, so you can use import * to import them. Here's a quick example showing how easy it is to switch between different LLM providers:"
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": null,
90 | "id": "eab5f63e",
91 | "metadata": {},
92 | "outputs": [],
93 | "source": [
94 | "from lisette import *"
95 | ]
96 | },
97 | {
98 | "cell_type": "markdown",
99 | "id": "11520215",
100 | "metadata": {},
101 | "source": [
102 | "## Chat"
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": null,
108 | "id": "dbbfd7b8",
109 | "metadata": {},
110 | "outputs": [
111 | {
112 | "data": {
113 | "text/markdown": [
114 | "I'm Claude, an AI assistant created by Anthropic to be helpful, harmless, and honest in conversations and tasks.\n",
115 | "\n",
116 | "\n",
117 | "\n",
118 | "- id: `chatcmpl-xxx`\n",
119 | "- model: `claude-sonnet-4-20250514`\n",
120 | "- finish_reason: `stop`\n",
121 | "- usage: `Usage(completion_tokens=29, prompt_tokens=17, total_tokens=46, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n",
122 | "\n",
123 | " "
124 | ],
125 | "text/plain": [
126 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-20250514', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"I'm Claude, an AI assistant created by Anthropic to be helpful, harmless, and honest in conversations and tasks.\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=29, prompt_tokens=17, total_tokens=46, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0))"
127 | ]
128 | },
129 | "metadata": {},
130 | "output_type": "display_data"
131 | },
132 | {
133 | "data": {
134 | "text/markdown": [
135 | "I am a large language model, trained by Google to assist with information and generate text.\n",
136 | "\n",
137 | "\n",
138 | "\n",
139 | "- id: `chatcmpl-xxx`\n",
140 | "- model: `gemini-2.5-flash`\n",
141 | "- finish_reason: `stop`\n",
142 | "- usage: `Usage(completion_tokens=988, prompt_tokens=11, total_tokens=999, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=970, rejected_prediction_tokens=None, text_tokens=18), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=11, image_tokens=None))`\n",
143 | "\n",
144 | " "
145 | ],
146 | "text/plain": [
147 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gemini-2.5-flash', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='I am a large language model, trained by Google to assist with information and generate text.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None))], usage=Usage(completion_tokens=988, prompt_tokens=11, total_tokens=999, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=970, rejected_prediction_tokens=None, text_tokens=18), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=None, text_tokens=11, image_tokens=None)), vertex_ai_grounding_metadata=[], vertex_ai_url_context_metadata=[], vertex_ai_safety_results=[], vertex_ai_citation_metadata=[])"
148 | ]
149 | },
150 | "metadata": {},
151 | "output_type": "display_data"
152 | },
153 | {
154 | "data": {
155 | "text/markdown": [
156 | "I'm an AI language model created by OpenAI, designed to assist with a wide range of questions and tasks by providing information and generating text-based responses.\n",
157 | "\n",
158 | "\n",
159 | "\n",
160 | "- id: `chatcmpl-xxx`\n",
161 | "- model: `gpt-4o-2024-08-06`\n",
162 | "- finish_reason: `stop`\n",
163 | "- usage: `Usage(completion_tokens=30, prompt_tokens=17, total_tokens=47, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None))`\n",
164 | "\n",
165 | " "
166 | ],
167 | "text/plain": [
168 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='gpt-4o-2024-08-06', object='chat.completion', system_fingerprint='fp_e819e3438b', choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"I'm an AI language model created by OpenAI, designed to assist with a wide range of questions and tasks by providing information and generating text-based responses.\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'refusal': None}, annotations=[]), provider_specific_fields={})], usage=Usage(completion_tokens=30, prompt_tokens=17, total_tokens=47, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None)), service_tier='default')"
169 | ]
170 | },
171 | "metadata": {},
172 | "output_type": "display_data"
173 | }
174 | ],
175 | "source": [
176 | "models = ['claude-sonnet-4-20250514', 'gemini/gemini-2.5-flash', 'openai/gpt-4o']\n",
177 | "\n",
178 | "for model in models:\n",
179 | " chat = Chat(model)\n",
180 | " res = chat(\"Please tell me about yourself in one brief sentence.\")\n",
181 | " display(res)"
182 | ]
183 | },
184 | {
185 | "cell_type": "markdown",
186 | "id": "c05085ec",
187 | "metadata": {},
188 | "source": [
189 | "That's it! Lisette handles all the provider-specific details automatically. Each model will respond in its own style, but the interface remains the same."
190 | ]
191 | },
192 | {
193 | "cell_type": "markdown",
194 | "id": "6a2a544d",
195 | "metadata": {},
196 | "source": [
197 | "## Message formatting"
198 | ]
199 | },
200 | {
201 | "cell_type": "markdown",
202 | "id": "493ff5ba",
203 | "metadata": {},
204 | "source": [
205 | "### Multiple messages"
206 | ]
207 | },
208 | {
209 | "cell_type": "markdown",
210 | "id": "1cc2397b",
211 | "metadata": {},
212 | "source": [
213 | "Lisette accepts multiple messages in one go:"
214 | ]
215 | },
216 | {
217 | "cell_type": "code",
218 | "execution_count": null,
219 | "id": "f7b5b8c0",
220 | "metadata": {},
221 | "outputs": [
222 | {
223 | "data": {
224 | "text/markdown": [
225 | "Hello! Based on what you just told me, your favorite drink is coffee! ☕\n",
226 | "\n",
227 | "\n",
228 | "\n",
229 | "- id: `chatcmpl-xxx`\n",
230 | "- model: `claude-sonnet-4-20250514`\n",
231 | "- finish_reason: `stop`\n",
232 | "- usage: `Usage(completion_tokens=22, prompt_tokens=23, total_tokens=45, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n",
233 | "\n",
234 | " "
235 | ],
236 | "text/plain": [
237 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-20250514', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Hello! Based on what you just told me, your favorite drink is coffee! ☕', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=22, prompt_tokens=23, total_tokens=45, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0))"
238 | ]
239 | },
240 | "metadata": {},
241 | "output_type": "display_data"
242 | }
243 | ],
244 | "source": [
245 | "chat = Chat(models[0])\n",
246 | "res = chat(['Hi! My favorite drink coffee.', 'Hello!', 'Whats my favorite drink?'])\n",
247 | "display(res)"
248 | ]
249 | },
250 | {
251 | "cell_type": "markdown",
252 | "id": "31884be4",
253 | "metadata": {},
254 | "source": [
255 | "If you have a pre-existing message history, you can also pass it when you create the `Chat` object:"
256 | ]
257 | },
258 | {
259 | "cell_type": "code",
260 | "execution_count": null,
261 | "id": "b944535d",
262 | "metadata": {},
263 | "outputs": [
264 | {
265 | "data": {
266 | "text/markdown": [
267 | "Your favorite drink is coffee! You just mentioned that in your previous message.\n",
268 | "\n",
269 | "\n",
270 | "\n",
271 | "- id: `chatcmpl-xxx`\n",
272 | "- model: `claude-sonnet-4-20250514`\n",
273 | "- finish_reason: `stop`\n",
274 | "- usage: `Usage(completion_tokens=18, prompt_tokens=30, total_tokens=48, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n",
275 | "\n",
276 | " "
277 | ],
278 | "text/plain": [
279 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-20250514', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Your favorite drink is coffee! You just mentioned that in your previous message.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=18, prompt_tokens=30, total_tokens=48, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0))"
280 | ]
281 | },
282 | "metadata": {},
283 | "output_type": "display_data"
284 | }
285 | ],
286 | "source": [
287 | "chat = Chat(models[0],hist=['Hi! My favorite drink is coffee.', 'Hello!'])\n",
288 | "res = chat('Whats my favorite drink?')\n",
289 | "display(res)"
290 | ]
291 | },
292 | {
293 | "cell_type": "markdown",
294 | "id": "63b6ddca",
295 | "metadata": {},
296 | "source": [
297 | "### Images\n",
298 | "\n",
299 | "Lisette also makes it easy to include images in your prompts:"
300 | ]
301 | },
302 | {
303 | "cell_type": "code",
304 | "execution_count": null,
305 | "id": "6acd8e99",
306 | "metadata": {},
307 | "outputs": [],
308 | "source": [
309 | "from pathlib import Path\n",
310 | "from IPython.display import Image"
311 | ]
312 | },
313 | {
314 | "cell_type": "code",
315 | "execution_count": null,
316 | "id": "61386e80",
317 | "metadata": {},
318 | "outputs": [
319 | {
320 | "data": {
321 | "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/4gxUSUNDX1BST0ZJTEUAAQEAAAxEVUNDTQJAAABtbnRyUkdCIFhZWiAH0wAEAAQAAAAAAABhY3NwTVNGVAAAAABDQU5PWjAwOQAAAAAAAAAAAAAAAAAA9tYAAQAAAADTLUNBTk8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5yVFJDAAABLAAACAxnVFJDAAABLAAACAxiVFJDAAABLAAACAxyWFlaAAAJOAAAABRnWFlaAAAJTAAAABRiWFlaAAAJYAAAABRjaGFkAAAJdAAAACxjcHJ0AAAJoAAAAEBkbW5kAAAJ4AAAAHxkbWRkAAAKXAAAAJR3dHB0AAAK8AAAABR0ZWNoAAALBAAAAAxkZXNjAAAKXAAAAJR1Y21JAAALEAAAATRjdXJ2AAAAAAAABAAAAAAEAAkADgATABgAHQAiACcALAAxADYAOwBAAEUASgBPAFQAWQBeAGMAaABtAHIAdgB7AIAAhQCKAI8AlACZAJ4AowCoAK0AsgC3ALwAwQDGAMsA0ADVANoA3wDlAOoA8AD1APsBAQEGAQwBEgEYAR4BJAErATEBNwE+AUQBSwFSAVkBXwFmAW0BdQF8AYMBigGSAZkBoQGpAbABuAHAAcgB0AHYAeEB6QHxAfoCAgILAhQCHQImAi8COAJBAkoCUwJdAmYCcAJ6AoMCjQKXAqECrAK2AsACygLVAuAC6gL1AwADCwMWAyEDLAM3A0MDTgNaA2YDcQN9A4kDlQOhA60DugPGA9MD3wPsA/kEBgQTBCAELQQ6BEcEVQRiBHAEfgSMBJoEqAS2BMQE0gThBO8E/gUNBRsFKgU5BUgFWAVnBXYFhgWVBaUFtQXFBdUF5QX1BgUGFgYmBjcGSAZYBmkGegaLBp0Grga/BtEG4wb0BwYHGAcqBzwHTwdhB3MHhgeZB6sHvgfRB+QH+AgLCB4IMghFCFkIbQiBCJUIqQi+CNII5gj7CRAJJAk5CU4JZAl5CY4JpAm5Cc8J5Qn7ChEKJwo9ClMKagqACpcKrgrFCtwK8wsKCyELOQtQC2gLgAuYC7ALyAvgC/kMEQwqDEIMWwx0DI0MpgzADNkM8g0MDSYNQA1aDXQNjg2oDcMN3Q34DhMOLg5JDmQOfw6aDrYO0Q7tDwkPJQ9BD10PeQ+WD7IPzw/sEAkQJhBDEGAQfRCbELkQ1hD0ERIRMBFOEW0RixGqEcgR5xIGEiUSRBJkEoMSoxLCEuITAhMiE0ITYxODE6QTxBPlFAYUJxRIFGkUixSsFM4U8BURFTQVVhV4FZoVvRXfFgIWJRZIFmsWjxayFtUW+RcdF0EXZReJF60X0hf2GBsYQBhlGIoYrxjUGPoZHxlFGWsZkRm3Gd0aAxoqGlAadxqeGsUa7BsTGzsbYhuKG7Eb2RwBHCkcUhx6HKMcyxz0HR0dRh1vHZkdwh3sHhYePx5pHpMevh7oHxMfPR9oH5Mfvh/pIBUgQCBsIJcgwyDvIRshSCF0IaEhzSH6IiciVCKBIq8i3CMKIzcjZSOTI8Ij8CQeJE0kfCSqJNklCCU4JWcllyXGJfYmJiZWJoYmtybnJxgnSSd5J6on3CgNKD4ocCiiKNQpBik4KWopnSnPKgIqNSpoKpsqzisBKzUraSudK9EsBSw5LG0soizXLQstQC11Last4C4WLksugS63Lu0vIy9aL5Avxy/+MDUwbDCjMNoxEjFKMYExuTHxMioyYjKbMtMzDDNFM34ztzPxNCo0ZDSeNNg1EjVMNYc1wTX8Njc2cjatNug3JDdfN5s31zgTOE84jDjIOQU5QTl+Obs5+To2OnM6sTrvOy07azupO+c8JjxlPKQ84z0iPWE9oD3gPiA+YD6gPuA/ID9hP6E/4kAjQGRApUDnQShBakGsQe5CMEJyQrRC90M6Q31DwEQDREZEikTNRRFFVUWZRd1GIkZmRqtG8Ec1R3pHv0gFSEpIkEjWSRxJYkmpSe9KNkp9SsRLC0tSS5pL4UwpTHFMuU0CTUpNkk3bTiRObU62TwBPSU+TT9xQJlBwULtRBVFQUZpR5VIwUnxSx1MSU15TqlP2VEJUjlTbVSdVdFXBVg5WW1apVvZXRFeSV+BYLlh8WMtZGlloWbdaB1pWWqVa9VtFW5Vb5Vw1XIVc1l0nXXddyV4aXmtevV8OX2BfsmAEYFdgqWD8YU9homH1Ykhim2LvY0Njl2PrZD9klGToZT1lkmXnZjxmkmbnZz1nk2fpaD9olWjsaUNpmWnwakhqn2r3a05rpmv+bFZsr20HbWBtuW4RbmtuxG8db3dv0XArcIVw33E6cZRx73JKcqVzAXNcc7h0E3RvdMx1KHWEdeF2Pnabdvh3VXezeBB4bnjMeSp5iHnnekV6pHsDe2J7wXwhfIF84H1AfaB+AX5hfsJ/I3+Ef+WARoCogQmBa4HNgi+CkYL0g1eDuYQchICE44VGhaqGDoZyhtaHOoefiASIaIjNiTOJmIn+imOKyYsvi5WL/IxijMmNMI2Xjf6OZo7NjzWPnZAFkG2Q1pE/kaeSEJJ5kuOTTJO2lCCUipT0lV6VyZYzlp6XCZd1l+CYTJi3mSOZj5n7mmia1ZtBm66cG5yJnPadZJ3SnkCerp8cn4uf+aBooNehRqG2oiWilaMFo3Wj5aRWpMalN6Wophmmi6b8p26n4KhSqMSpNqmpqhyqjqsCq3Wr6KxcrNCtRK24riyuoa8Vr4qv/7B0sOqxX7HVskuywbM3s660JLSbtRK1ibYBtni28Ldot+C4WLjRuUm5wro7urS7LbunvCG8mr0UvY++Cb6Evv6/eb/0wHDA68FnwePCX8Lbw1fD1MRRxM3FS8XIxkXGw8dBx7/IPci7yTrJuco4yrfLNsu1zDXMtc01zbXONc62zzfPuNA50LrRO9G90j/SwdND08XUSNTL1U7V0dZU1tjXW9ff2GPY59ls2fDaddr623/cBNyK3RDdlt4c3qLfKN+v4DbgveFE4cviU+La42Lj6uRz5PvlhOYN5pbnH+eo6DLovOlG6dDqWurl62/r+uyF7RDtnO4n7rPvP+/L8Fjw5PFx8f7yi/MZ86b0NPTC9VD13vZs9vv3ivgZ+Kj5N/nH+lf65/t3/Af8mP0o/bn+Sv7b/23//1hZWiAAAAAAAABvoAAAOPIAAAOPWFlaIAAAAAAAAGKWAAC3igAAGNpYWVogAAAAAAAAJKAAAA+FAAC2xHNmMzIAAAAAAAEMPwAABdz///MnAAAHkAAA/ZL///ui///9owAAA9wAAMBxdGV4dAAAAABDb3B5cmlnaHQgKGMpIDIwMDMsIENhbm9uIEluYy4gIEFsbCByaWdodHMgcmVzZXJ2ZWQuAAAAAGRlc2MAAAAAAAAAC0Nhbm9uIEluYy4AAAAAAAAAAAoAQwBhAG4AbwBuACAASQBuAGMALgAAC0Nhbm9uIEluYy4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABkZXNjAAAAAAAAABNzUkdCIHYxLjMxIChDYW5vbikAAAAAAAAAABIAcwBSAEcAQgAgAHYAMQAuADMAMQAgACgAQwBhAG4AbwBuACkAABNzUkdCIHYxLjMxIChDYW5vbikAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWFlaIAAAAAAAAPbWAAEAAAAA0y1zaWcgAAAAAENSVCB1Y21JQ1NJRwAAASgBCAAAAQgAAAEAAAAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAVklUIExhYm9yYXRvcnkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAENJTkMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADzVAABAAAAARbPAAAAAAAAAAAAAAAAAAAAAwAAAAAAAAAAABQAAAAAAAEAAQAAAAAAAf/bAEMABAMDBAMDBAQDBAUEBAUGCgcGBgYGDQkKCAoPDRAQDw0PDhETGBQREhcSDg8VHBUXGRkbGxsQFB0fHRofGBobGv/bAEMBBAUFBgUGDAcHDBoRDxEaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGv/AABEIAMgBLAMBIgACEQEDEQH/xAAdAAABBAMBAQAAAAAAAAAAAAAGAwQFBwACCAEJ/8QAQhAAAgECBAQEBAMHAwQBAwUAAQIDBBEABRIhBhMxQSJRYXEHFDKBkaGxCBUjQlLB8DPR8RYkYuGCQ3KSCRg0U6L/xAAaAQADAQEBAQAAAAAAAAAAAAACAwQBBQAG/8QAMREAAgICAgEDAgQFBQEBAAAAAQIAEQMhEjEEE0FhIlEjcaHwFDKxwdFCgZHh8TNi/9oADAMBAAIRAxEAPwCtuF+JeC6SDNsygzareKiAu4kZGjuPoHS9yNididr4p74qfENuLpRSUVdVVeWU8xKySnwygjwtpYa1IuQQSRfcYr4SOheKOR9DoRKqGwbe9j54UpDTTVSxVQZKXWCxQXcLft54a3k/gjEihV+BGNlbJ2Y94WySHOc3io55DFG8bP4CNbkfyrfv+PthXiDJlyWqIiqObGygXZLFbW2I97flhtLTxQ1UlTltQwhhkC0+vZyLbHbv/vhLMcyrM2qWFdJLOyR6QWU3Fu59ffEf81EGCCApBG5GtqEpbWtyb++FquqeomvZRpGkW7+uGsWzXI2XfCmuI28LavfB0LuBZjyZ4mjjMSaSo8TEm5NulvTG9OGbTZb6+m+9/bDcVEaR20KzkX3X6fv3xYPw5pEkyvNMyraekrItaxurwGR0UeIk9lU7epsewxipyB+AT9zqexpzapC0fD1dW5LUZrDSLNSU7jVp1K+k7FwehCkgN3FwbW3xP5/8Oc3yKanWn15k8omYpTxF25UZALsPI3Htt3xa+V0wi4cC08XIQXSOmZDrcHcjTa2k6j1seuCLI6PMsypav5RJKCoI5C1OyOoB+lS4IDC43sd+nni/H4+JuCFWtxfXVdg/Yyr+HAB3OWKj5f8Af1PJVUjVUYZJKiLUVdgDYr128vTFhycHZfxrmT1OQUByOnAZOVG/MTmAdLXJB3U9dwGt0xD1fw/zKPibMstnf93yRSgCWpOtmVm+slRudwTt+eHPCua1fAMwir4m+UqQC00NjoFzbUCtydrgXBsduuIkw8GDZAaFxY+zDUY8QcD1mRSV1KZoaxcupYJ5nRSulZugt2NyL/lgZSBYAWmUBgRp0tvqOOmaeXI+IuD8zBq8rrZatGrSJZXhEukaQ8nQnSwC7nw7DyJ5eliZSyKSUDX1Hz8sZmxcFVqrluu6+LEHKoQ/TuT2U5/meUUscVBWTJSrzf4BN1OtQrgqbixsNvQHqL4e5dNV5jmaTu80swTxSu2skkWuSdybWGGL5JVZZl9DXVQpZ4KxLqsc2pkJ/wD7FG6mxv62w6yWjmirI3hvDN9cXgK+1z5nfYYk58iFY2PzhICDRhpQrkWTy/MZvTPVo8RQJKTpJPU36Bhbp64hqyVY6WSSNWSkMn/bM6/y/wBJttcd8TVCK2qhNDUPI1HGplSIMBy5CbA3PVf6u+BvMcylqqgUEhhJefSwF+XfVYEX9D1HW2MxY2V2yve9fH9qP3qWkg4wK/zH/DFRSpWPU11TyVRbRNYAFjtY37e2E+JqiD971Bgm542u2nTvbfDdYKnJ6QVFVlzrDN/28mtPDuuqyn/7WBv2JxEmllIaUROkQJ2JvYdgT9xvhysQhVj79fb/ANg0eIAkNnEoaQC/fEeV5inG+biWGtKTqUNgwB8j0whFJpRi2DXqLyG2mfLKoDNhKblqLL1xo9QSdugwizFgScMiYi9rm2HWULetX2w0OH+Si9WT5Ljx/lMwfzSXmO5HntthC76yF2uML1A7oNhvhESBH2323FsSGVDubxxCNDdmsbat74cIpN9N1Vh4Tq3OGqTeBA67A36YerOx0WW1rNYCwwlrjkqbGBjpZnKA32J3IwsaZFiR+cFJYlrE9LYSjlkcuzW3/lAtcY30c6RWIOg9rd8K3HCo8hhhsHeUyWsVBWy/c3xJ01DTz6I4njEhYF21MAl+3r74iaepMSOCU5bC7XBJ69vXDzL61OZqq1eQX1SG9t+wH5bYS11HJV7hKI5qWKalzFVEqkLHK0ZugHcE7m9xiKqKKQFV1JVSNtrAYFd/LzxI09ZV1yOtLIsskcYjsNmcX73PbCGYMaeeSB2TmI3idUA3HWx8xiZNHfcobYsdSPePTYNPA8khJBMTMQR239sNGleV3b/ti2ohjyDuQcPkrpgjRpIF+rSzDbzONI5YIkUEAORdrN1Pn6YaDUTVwNajpI6OMxpJ80d2bXZT+ONqfK2qFjpYKaRq47gmULqF+tmta3nhWlekkp+dLJNFKEvDy1DaWB737bYkqziuo4kMUmdlagwOFja+kjb02N7dMdS2F1Pn1VSNmRVPltdSVAM0UUQhY/6pDLcd+/2Iw0gqDUVUrVtS9OtrNMgLFu1j33wmk8gjsJHILWAPYb48XVbxbqDY9rjDADu4LMAKWIiAVEjLCgjDsqrv6279ziRzXhwZM1OKqVC0l9cam5QC1zfoe/4YQlmgdH0agvVreEgennho9VPVyhFMkkrOAutrkb2AwY5E66grXE33JjKuDc3zL5iopctqaqkpjaSVF2H+4tvthPJ86rch1fJTN8s0gdoyLq5B6keeCT/quu4cjqYsonJDSPZHAAjtsGF/Yf4MAkuoqJJWJdiSRbvffCsD5y7FqrVVd/NxjBFUFCb94ccPfErMslrK6qEMVRLVSiUF92TxhiinqEa1jbe3lg7rPjHxDPM9BRUyU8tZDHNVLLHZIJdZJK3uGRoyFIPXa24wC/Dykoq6SZI3nknZQainaj1x2B8LK4N0YHoTbc2sQcWFmvD4zp5uTIlJmb6Qz7KbKCOWVuPxPTttjpN5D4UVPUouaH5x2JXyAm48oKf96CjrWWiindHaDXULHNUxqvi1KSey3BJHQDDHiKiymegdc3knpIle9pA0aCS22twrWNiNjbFd55S8QcDV8/zcaU8uYQPDExUM4Q2uV8iel8GGdZ/nVbQPnlPSw09QtItDmFI6NoqYiAqBomAJlRrm/wD5LpuMFgxLjFHlaggi7Bvd/n/bUY+fkCtfpKoinVJY9UjuobZQbEjv7X/y+H1RBG8cM0c2oPcNEPqS36389seUmTs0zU9Q0lNWxy8hoOUdYbyN7WANr98aZjltZlTQx1sckLSoW8SadgSNj3Fx1GOcyNdyAKQt1qFXDtMmaUMlFT/KwzBlcTsTrBHWxHYA3097YfHK63JIvnoa5JqpY1MgdrvpdR4SQfC1j0/4AZRZjNlxL0cjJ4dDjURrB7G2Cavno80y6TM8slgy6q5apVUp25yjbw72NjbYgbEb4DHiBJrRG7v7e3/H2lSOCN+0IshzakylJRTSyzGVv4EjDSULC7E9b+La3pjQUtPV5s+Y5lO88shBVCFBA+2wP6YHChraGhShkl5hJ17BQo3vYefTfBVlmTpAEaotGFFhrJ29lG59ycT+R5RbGMa6H5To+Nj5m2Fw+yqvpqmIRSwWUWsHZSG/92GJV8sy+oheOSlQKwtp0jcehHtgOpocucCzszdjyyP7nElzkoU8FQ4QdAx/tjhZCQbud7GAdVIziv4Z0vETs9O4o6lnDGRU1a7CwB9PbAr/APt/zmojkaHNMtjRbW5sjLcfh1wcx8Scl7NJ4T3viVp+JYyLc4D0tbDcPm58QobEVm8HDlNnRgRkf7NkWYqFzHi+lpJjtpipHlA+5IGLX4b/AGGsjzQxms43rpEIuwgy+Nb+xLH9Dh5w3BNns6LEguzKVcCxBuQT9v7jHUPAXCE2XxQSvIQNjpHbzGOz4ufNlP1CcnyfHw4h9JlGj/8AT+4DNOB/1BxHzdP+pzICCfPTy/yvgEzD9gPNsrM03DvGVDWm3girKF4Sf/mrMB+GO/UgAQAja2GksOkkWuOmOmdipzABc+V3FH7OPxL4WDvWcL1NdTq1jNlzrVr+CHUPuMVfX5bWZRVy0uaUs9DUx/XBPE0br/8AFgDj7H1VExRxHcb3BHbHM/xm4SlztHpOKMoizywLU55JEqi+3LdbMnfv23xJkpBcoxr6hoGcAQFQSty3ffthzy9R1Bh0sCfLB1nXw+gpM05eVVBINx8tPKNaHuNQ+seosR388EdH8PKWoypovCk5Xa/Y9r+vUG3XED50Xdy3H42Q2KlYRUzxDmrd0XdxboMec4EFtAJFgpvi9M44fyah4cai5emELd5F2bbqb+Z6YpzMMuanqW5cfIi7ISCbHp97YSmUZLIjXwnH3GskkXiEaEra4t2J9cPqSoRI0aa2m5GkISbj2+2GkdCqxo7SOFJIZLA7X69cSFLAs/8ABvy+rqxTt064I1UFY+o6iGQPrtFbSWHiO57bG9sO66Coo2C1DKqFNeo7kL+N/wAcR0uTIoD8y0W13WPYgffD9Kaasp1pBoc72kMJBUX6YUR7iMB9o0dYZI+YFN1BGpbAknsD3wyFRBQloqhhI+okkIHtftfDk5TJH4hGXiVtLFQ2kDp+vlhSny7LzHeoZAxP88ljbA8gO5oUmVtdo4boQTv5AAXx5azKskiJyxzBcXGrsNvS2NpomlZIpCqEMQ3Tc3wznlBkeygAenXHeAufL1FZphG76Bfc2GPYYJq0yclGblrdjq7Y3jozUTFNSq2m/W32PriVyGhooczRc/ephoZFdS1LYszdgR5bG+AZgikjuEuMncgw4QaL9fxAwpSymKsQqQhS5Ut2Nuv26++Cmm4AzDMK2gFFyxR5nHLLTVMouQiMQVYDo9rbeuHj/DDNaf5yeR4Y6WOaWCmmlBvUsnWyC5CgXux2FsUMjLjLkamrjcnQgxFFDVCX595VhPiQxKC1+gvfthJ5FWkjUcmQJcKoQ3A9b4ThR1kYzMVK9QO1u/5Y2pq8RLNrQSMwKgWtf++E9dTAb0ZOcMca5pwxpipZWSheXnSQoAu5sCwI6vYGxOy3uBfFpU/xkyCrjqFq4ZaSAwQrGBDeQTHVzGVh/SdHXqPwxSLSRJyxStolt4ypP3vhCoSFHRi5IKAgAeeK8HlZMQpZoYpJvPuIDW54k+VSSRw08oliZAYwjXB1BCdKNcb6bKSLgC9sXyGkp5KesraepikqlR4pKuNA4cX8RVbqG3uO18c000EuZ1KQU6qsjtYK7BV9yT0xanDHGFdkNTl0ObzVZWlVlldKpeZMHYBSjyAqqKEA3HcnBI4IIdytkGx8H+kdhYqSalu5F8PssqpaOMNN/FqmqMzhqYlabWQTrVzvo9t97emBD4xfDhKDKIK7KJKqtahOmRjECml236bkrsSwFgCAd8WdkfEaZsMxgyHMMvnlomQVwVmlpixBOkVGlVLC3VR374kKPP5ctqWphK9PPMGGtJRddQtqXazEe2x7Y6nmv4mLHzyro0oYC+z8e1jfzK1xeqpVTOKqmOrJBaJmjsAjBbi32xKUGW1FQ2p0a/1G9gCf7DB5xvl2W8N8SSQ5fCtLTNEpCJPJL4wSG1Ft9RO59+mBw1bSAgNyl8yT+mPmPI54WOMjqFi8VG2T/tHuXzR5XpEQ507G1k/QYnYKZ6phJVVGhT11NtfyAHXA3l78ybl0zc2Ug6nIsFGJmgiiec8yQTOpAYt0Hp/6HXHLce862IUKENsuWmy+IchDJIR1ZbfliJzyYVU2ssFb+rVsPxwtWSGKApAq6EFnYnTqPrbt6YAs0zMIzFIqdbGwYktv6euI0RsjalxZca7krUVjQbGujla9rCPb8Ri0fhrw0+dIkuZVaQU7NZWWHVY+uroD/bFE5dHV5lnVNSs2uTUHcDYRj28/THZvw5kyimoIo3pI1qTHZ0dbiRfT/NsXekq0D3IfWZrI6EsTgLhlcrr4oa9EYq2kOqAAhha+wHcC/li/MvhWKNSg0g21D/y88VvwzNTPDGv8gUKl9yo7b+mLFoZwVAc72tjr+OAi6nHzsXazJiwI364bhfEQbA74wTBiL9xj1X1am7nFFycTyWNVTp2vgG4zRpqGeGljEkzLtftv54NKyQCLTqsxHXA1m0ixxGOO3TxN3/HAnYM1dGcVccZLR5RmMsUgR5w51DQNCHyB7n9PfAquaU6NpCqGHSxub++Lw+LEOWutSDMxqRYOYjpWK/RQe7Hy3vjlnMKxqbNHijYizdH3NvQ9/wAMfNeRgtjRn0vjZ/p3DqWSKZQ0/wDEC7qna/rgD4koFlnec0kxksbMguPwxMNnRESgIgIFrmQg4bGqqPrWQhP6iLr+uEYuWMx2UB5VtRFVQs6xwyAarXYbX9sbfMSEpFLBIzDoYxYdcWRVr83E9mjWYjwSgXF/XzxDCOvpgSaqIaFCguNF/uf0746mN1yDrc5OTG+M96gvFmkVPIoho5KmNTqbWLG52sLYcSZ8ZXjC0eiEbWK2J+/tgkT52UQiJ4GdDqkj0B1vfuexwuGzPW7PErwQOpSOOEXAHWxwwoD7RIYj3goM45mnnQCI72207jzPlhxTZsksZMrwxkGwVKYsAPe+CWalrDS6onjnKklLwA3U9eovcHr6YUpp6qZC1OBpBteYAMT5207DywsqK1GhjdGVBVsnPkkp/FcGwtax6YbxOI5GlYgsG/h7X3v1PoMb0TAuEtq1baffCU0dpXVbBAbCx8sdVRWp89dkmbUupqidy7FgvXuSdv1wtTsTXx8pGciRVCLcsx8u+5OPYKadKaWpjjfS7BY7kX69fx298NkpWjqDCsjRzBwLFSrBr+XUG+PLRJM9sTqulpjR0KVr5PU5Ssr8ySlqoxEUl02LCx/m7na+xth9BkUXGeVmnmqJaGCSMpL8u38TSTdkU6TYMbXI3IFvPFH8GfE3MskzPJaWeZpssjqgtXCzgmdNR3dyL7E3+2LG4m+OVPwzVtQ8Iw2rYp3p6pYtBhmUfRNGwBGrsRYjvjs48qvl9ZshCVRQgd/e5eMqcKlHcY8Pw8M8UVOXQ1lRWRBv9SWmeEkEnazgE2/qtY9sQfKEbNYi8a7m/c/4cO8xzeu4gzE1Obzz1taZSXmllaR2BJIG/YX2AsMLNkGcT0r18WW1RoF1HmiI6fD9R/ztfyOOWw5ueA1ICLOpDyPy9v6upBw8dKRsqVkEprg1lN/BywT+e+GtfSVFHUPFUxtHKrbqw3Hf77EH74cBpBBAoa0hH4Dz9AMAwqeGhNKFGhJqJNS6D4durYdTTU9TE7TNLLUki2tj0/36YcZjRxw0cMkVW05k/rNlYdyDfrfrfzx7kq5XDWqOJzUNSlG8NPIA2q23nthYaxcZxZWo+8fcM8X5xw1A9NR1WnLmqFnkp5Yg8Usi2IunUjwrtextvi3uFvjstVnGd1nE0dPAgoYjQUoVrNPGSWW9iQXBYav5TpPQYoWlkWOpVRKwphKpLutxpva5W+5t2xY+bZfSZhlE5y6OnIhd21ohDBRvc+4PmcV4/I8jFvHsf073XxHYQWB31H/xJ4nyDi/ihsx4WiqFjaCMSmawu1uw/lIuVbtdbjY2wDy6p5REr2W/iK9hjSlPIpQuoFifEb9cJCqSJySeZv0UD9cc3M7ZsrZD7zq4zxQAyVlrly2iZIBpllIVSOo9cPuHHaeeKKNdfisiD+Zj3OBqXmVb82UaV6IowXcJUzQMTFfmMpC27A9T74lygKnzHYiXf4hRnFpIXjDgQxCzHUPG3e3p+uASWCKhhqMxrCDMgIp0Jvo/8rDofLBZxRIKVWjjMemKyOzG4DW3AHc4rmtq1lPIYFkd10qLi4B3PtfE/ioWHxKfKcJDH4bZDFVVZrauVhKW1AkW38wcdEZXnlNSUxikdUbquobBv6rjp7j74qHg6SCny1UjiUSWuT2H2/5xrmVZVU78+mqVOk/Qx6/hhWTIWymHjxccQnTnA3xPWnzOGizDTypHsJAwOk/7f579I0FWskSPGwYEbEdxj5t5Hms2Yyo8EpQBrOL3KsDtjtH4WcRSvk8cFVIZGRF0k+uK/GzENwaQ+VgBXmsuZKk9jj0VvL5uo9ALYhVrBYWNziOr8yMTugbcnHQL8ZzAtyXqczLuSWNhgP4wzpqegk5J/iEeBL2JOIbPON6PK0cSzKWTqt/wGOcviL8YqivzAw0eoxXtZAT9yRiZvIC6lOPx2bftHnE0tZNK71QGrUxBeVYljHfStyTfzO574oHjl4qPM4Ji4EbHSWVgdJ8/XFhfvda6Fnaop0dhc60ZiDireN4pgrSnRMmsE6Lb/briVSGcS8gohk9SZxUrBGoaKRLeEmMEEehxMCRqmkLJIgNt1tsfQjFdZGzUyry5Q1PJuEc7XHWx7HBoQsmXvpkK3XwNqsVbt6YjyYwr6luPIWx7jLl/LAvSXCMd0PiAOEJ4KmpjAlkVvFcAoLYjcrzKSSlcOdMiNvb3xLJVc5L2VTbquGEFDFCnEio5M3oZppRUuhcgFlXYjV3/ADxvJn2ZRzNor5DqUqA52W/Ww/TD+aoHKZAWNx1ABt+eIOZEEgUFFckEEi+3kb9Dhi5GMmbGo1HT5/mtrU+ZyqYyXAY3uPK9sb5fn9bFTaDPO1mNrSCwHWw2xFyzI1lWy+I7EgEnphVNAQCRPEBvY3wXNqg8FuNOEvh/UZ/V1cM1bHl01Kf4kDqTMD2fSbXTsbG462OJ+X4Wz5bnL5dSxjMvmJeVHU6QI47gG5J2B637+WGHDnxBzk5nQ5ZFmFFQUpmHzNTNTqb+bu1ixPYb+QAxdAzWLIVoIcxmiHMk5fhXQWZuhFze58vXHdfP4qDDjyA8nYDX+L6+Zx/HxK3Jl9vvB/JsnyfhLNmkpqVa8RaFjSQhhGwO9gLg3IvfqPTEVx5ltDLllPm8VDGkyl3eo5gjMrO5MrAWLSMSfqJAVRsD1xMcSfFzhGiqa6mehqq6vSGSO1RSaBFLayrZjsL9dsV9x18UpuLCKOgp3yrKxAnMpgbI7kDbSNrBr2IsbY6OfhjGQBwVPSgdH3JPvBLoo0NiP8q+G9HUiGszVnpVqYdZUOFEZ3O3uCp9wR3wtxT8L4skjXMOE655cxjjab5XTzNMZJOot0RQndjvv6YjOFeOAyUuVZ40jUaqscDqNbBiQo1Mx2QAknY9AMHf/VnCFTV1uQtTZnXPFLKsNXTcuWJwBs5V/D6XtYW8sDjXFlHGhxI73yv3FTLRhfvKKyqr0VktTUIWVlLmQWGliO3nc4tH4VcVT1dVPkyy0NJCpvSQuHM07MT4V6rt3vbY998V5xHw9WZPFS1FcUhWvD1KjUuqxa3iVRZSL7AbHe22IWqrIdMLZaklNyxd21WJPv1xHgyN42bkB/5FK7KKudLQcNcM5rBNV5zlVPKGaNUlLxlRyyYwgYuBYEEdQCbAg7YoHjrJ4sm4praHLaLMaeBSGCV0aq9jc7aPDo8iOuHGTcfZpl2QDLFiiqKeKaQojr4THKhWWMgfUGuG67FRbBDxJwZm1TleV1uZNFl8EsCRQrKhEqgAC1u4O7XJ872OHO6+gqKCa7J739z776jD+OfpG5W7RgiRPCoAFiCDbzw2IsLyPta4BHiOJHNcsOS1BpfmIqomCKW6KRpLLq0kHuL4SoYoknY1sHOTRuWawDefXEp+m5NsEgxGgaSSri5bCLQwYH+m2+LbyLMos6o5aWk5b1TIUmhBJbSe+w327i/UXxV+YTZcrwjLoSIkjtKGa+p/MHz/AEwbfDzhueugfMFEYJIWlMMzBonUhrG2xDgFWBOsXVhtfDcIyZbVCRf/ACJVjPA0NxbPPhxmeXU09VS6ZqeIcx42dVeNbd1JvgQji0WWMBn87YuH4j8P5zXcPGukK5gYq+omu8aq1LSFysSoQAd7Eld76rgXF8VOkQjYIJAxNr+ajywHmYxgcBRQl+P69zWFSswBOuQ2A74POGEaGCon0ljDYj3tt+ZwNU9PHCTILM/0r/f/AD3wYZVMKChZqkXVAGdT/OQNhjhZ2sTo4E4mRvEFFyqaFJrtOVDG+5F9yTiv5I5JM0OgCyBQPbr/AHwf5xNLKvNqZLtJ4msd/wDgdsQWX5eZD80gBZm1FfIdsF47cFJMHyU9RgBMn4jrsgphFGn1DYhsN2h4hqaGjzGoNMkFfzGpg9UoeTQSGst79RYXG/bE5neWRT0YeVdnFiQb28jgAamkpZgJoj18LgEg+xGLfGXE68iNyHy8mfG3HlqGPC2eT5XnUYq45Iv5ZVbYkdsdefB/iiSpEEUj6WY2AJ3sB0+2OLYy8/y60+uSSKPxMbne97A+mOu/2fOHa2oSCZouQDYcyVTYjyFsS58NZQySnBlLYiHnTQreTBdbl2AAxC5xJOIaqpKtpRCdvbBeuVxx8uMDmOerW2GMzLJvm6Gtp1XRrjKhrd7dcPbGzSEMAZw7xfxW1HLU1lZUGUK1gn3/ANv1xR+d8bz5pO3LCwLfY+ntgi+KtdNTZzV0M45ZhlZGCm6kqxF/xvbFUIVFSTURtJD3CvpPvifxPGDWzS/yvK9MBVh/kmYCUamqpGfyLC34YfcQTCqyqS4R7DoU/wAOKx5hpalHy93I6lSemCCozqQUTx1IKh069be+GZPHZXBBgYvJXJjIIqPsjkhr6SWimYwzKebCx3FxsQe9j/tiehkalpmpai2iVDpcHY/7HAFlrtHNE4O4a4N7ehGC/MJWiy68hPS6nzwvOn1RuB/o/KR1GzQPUAGxuCDcH9MSUMhur7hH8uxwN0c4So8d9DrY2xOZdKYpOXKQ8T/Sw74zIvcLE3tJKoaSJOby46kdyBZgPcYiK3MoZYiQo0rbwkeJfY98Tq5TFVSMFqmpd9WpX02979cMM34KepjU0ua0yyMpJaUaAw+3fHsWMN7xWbIVvUGjVq1wQWsbg9cOFzSO3jFj5DDKs4OzyhXUhpauMk2aCpRr/a4OB+WoqaeRop0MciGzKwsRiz+GDdGQfxTL2JM0bpRzsdKz1VuY9xstug273xpm/Elfmzn56aWVVa6RhrIp8x6jDFlaOMRIAFYgykm246DGLopo5QbSGTuOg38/PDlxry5nZnLDGquKVNTJmtSavOKiWSaQqryOdbMALA372AGPZ0jjlMNJKZItdlkcabgdCfLDM6dKqthGr6iLb4d86CSnPLSzK2q7G+1+nvg2Ju4PeolGzsrLH9VrKF6Li6OAcvoc24btlFLI1dBOEq4rcxpPD9Qby6+Hp274plp2U6Y2LR27ixJ98T9JxlnOX5JFlmXZk9LSJHINMQ0XLNdmJG5Y7bnsAMNxMq8g10QRrvfz7Q8bhDc6FoOHf35TNFNHDHUwobNUwxM8SnYsI5CQNrjVY9O2Ky4c+F2UcRRzxU2dkSRGRGQpGV56E3BKn6WABVgTcXGAfiXjut4mzHLZYL5eaCnEKSRSEMT1Zi3Xc3NvXCGS8TS5PmBqMlaSBeXaXxbSDvf0vig5seNAnEvxHZOz9rjWdWayNSxKb4fZnwfNlvEVTlSfu+nQSRRfN8uZZGGzbXsF269z3wcR5PHxfSUVb8xRZlWVWuRqaStL3I8TA6RsFuL3B3NtthgVPxgoqzhmmoa7LY8wqebrqKeUMqOq/QisDfc7/bFU0PFdbkOc1dfwyf3aZQ6CNm5hRGa+m562sN8D+C2P0nFqQLF7B70dQxkGNrUy3fjbkdDT5PR1pphBmotA0sNmVmUf6bjbTYbqw28+oxRyxPydbKxRG0NJbw3sbC/n6YIzX5txGJjnc9RUzmRX0iMnchVBt2J8IB77YjHo62i1UOZmakjY89ElU2JFwGt62IvhWZ1ZrQUBQ/4+YrIOR5feROrlm7JYLuRax/8AWCrgfis8OZiJKqgWrpZotPMB0SRqpO4PQi5NwQb2FrHEdwtwhmHGOZtQ5QLSXDPLI2lFuwBJP39cOs6yWoyCuFBm0DrVGNXKF7XU/SdvPc22xgL4wHA/3gpyXYnVnDMUnEf73pKavizBYm0SCjqlSWMW2YEq1gb9R+Ixzrxhk9Nl3E1dT5fFJHHA+6TVKVDX6El0AB9rAjvviQ4Kq6Wgelkyesqcvr2IM7RzEMgB8SjYeEjbfETxVEaHPquSBzIs15dTS62a43LeVzfY9rYF/SHhrixLXE9XdA3/AL1OyjNy5N7zSGoWiBkkQuyi58h5ffC8de9W6i7FWsQL9yf8/DDKMCuURC4hcgyN3XbpiXNDFQx3iN3J2Nr6f/e/5Y4r0O+50EsjXU3r5TJGRsUTdj3Jt09gLY84KeCuplhkYcxR9N7fcHDStqUFPOFuI0j2Pc7bnBL8Nfh9V5/BrykCpIANozZ1PtfGBfwzMZvxBHVdk87ppSO8JH1FgbeuBWbIi9QUgD6na1h1OOpuG/2ceJc6p1+fqIcvgNiecSW/AYJ4v2ZY8uUClrBWVbGxlaOyRg9SB3PvheMZkF1HM+F9MZy38Ovhfm/FfFkWWZKjuSCsz6bpEvck9MfSnhDgaj4aySgy2mhT/tkUFgOpA64YfDn4e5L8OsnWHL6cCoYDnTMLvIfU4NWzSKKLVcA47GNKFt3ONlyA2qdR7S5RChDMBcY9qKWAxyDRsRY4Ypm7vuNxhRczjkVkIF/XFQZSKkvEzhX9rf4U/IStxLlVNqpmOmqEaj+H5G3ljjGSikmBYsybeW2PsD8QuH6HivIazLa1dcc8ZUjuPUHHzJ424CrOEeIK2glQskMh0MF+pL7HEjMMPUqVP4gb9pXdDlt33Jc9yB0GCKtyf5rLZpIxay2G2H9FkcjyJyhpU/zYMhl0ceXmnILeEgkixxBl8i2BEtw+MqqR95TeVIXDJKPoO4wW1qF8p5YYsbALvexHa/cYguQcuzaRGuv8TY4no5UnjaG+iRWJVSbA37fjYj74LMbIImeOKUqYOfLSQkNGbrf6T54e08zwWYn+GTuDuMLzU8ms69Ora6qb79satTGnp7uytqJ0r7G2NLX3NCcbqP8A5/VArta4cxna+2BGpq5JmbWxdb/zbj/1hStzCWVBBFFOkSuSSIzdjhkRb6Emt6xHbFWHFw2ZzPIzeoaHUVQ6bX91Hlh6cuGZBZpFZ2A03AJvbDKCIyG3LkF+gMZwZZPT1UFEFjpZmBYm/Lb/AGwxjXUnRbg7keRSZ5HXVhmgMVCmuSJ25bON9lHfy++IOvkhNY8lLEaaJyf4Y30/jhxTyOG5AvGOrINunS/nhGuax5Kr9LG58zglBDbiCw4hQIxjfRqsNX9OJGaNYHZFUgmw0gdD/h/PHtNlbTSQCVTBH1dwLg9T9sZUxVFTM/OZQNdyQ4YIOwuDbGlgT3ANRqCXflpqSykm/bD2fkk8tTKuw8ZUHfysOuF4J6Xnli3Mcm8khXZVv0H5YZTc6WtlejewUkIwIU2vtjOzBqJTQtGjKSS2m/Swt7YUgjkSByqXD6fw/wCf0wusExXmVepkUb38u3640iP/AHHhbwKuxI/O3ljb1PAyYoqRiIZEGvSx1AGxBAuF+9x+GPaTM5aLKamlkpqC9U/gmliu8dtrA22BF7jCGX5ukiVNPCjxgRG7Bt3Hc9OuFJKnLTGkU1VM3KAPLeMkBvsdziWm5UwhqShsSxuDeM8tioKKLNmppcwp4TTLNzCGeINqUNtvpIFvL0xE8XZtw3VDMp0parNM2nsiymUQ01IqiyhE+qQgd2IBJO2ACTkxzLJQGd5S2p5HVUA8gFF8MopnFTKrG3MurX9cXHNlfs6rqv1/OEXteMsD4bcUHhuurIo5IlppirytUPo0kD+Ve7EkDbtgn4r4tyTP+G8wo6OsJzPNKyOWqIhFykf0h5D/ACgAKqLt0v3JqZaCqpJkWSP/AOnrUqQdXljww1CuEglEkhvoiiIJ1ew6nA48ziwrWCK+PzHzDTIVHEiTNBWvk0zmFxPEHBkWwUNYGyg72tfBLxjWUlZQ0k2XyQSwyAC9tMkLbFkPYg32NuxwN5Nk1SKXmVcDrDIEcSEXG5ZQT5XKkWwUfuPn5eUVdW2wtcHGMSqkEbPvKsbkH4gvRiRahrX5fVvxwQwykH5Z42blLzJCdhv1Jv1xBR1L5fO8FRb+GbaCtt+1/PG81TUVkLlEM6FyQq7eHbv5g32xzXQsZ1EcKJtmTER1Ka95mGy+IgE9Pf8Azti8/wBm2FIs1eAvFJUkDl8y4VWB6MQb9O2Kmy/Jyqs00eqZU1bi9m6D774L+CKit4fzaKpp5VgAO1+wHXYYMAAVJmbkxM+j/D1BVxyyy1dVDJSMqCKJFtosN++98EvzlKPAoIANt8VXwDxhDnOSU80chYldwW322wSV1cxppGiJVyNj1GKlIURFFjuPuJMyamV2pWD2Xp2OOT/iR+0ZmNJmVRlGRx6ZYm0SGRTdWHUDzxetNUZnU0Uq5mEvuA6mxI87HHz0+Nctfl3xCzsUjySL8wShTsPbAJ+I24b/AIa6l25V+1JxRlo5dUqVA8z5YJOGP2sqmbNIqfPKcRxOQOYLGzE/pjij941aAGV3WTqwbqDjMvzKqqatEQsXZwAAL3OH+mB1JhmJ1PqrlfGdPnekxyglwCApv1wDcb/D6g4mzlJqyG9jZnXZgCOuGfwmy05Tw3lkeZShqhIVJA9r4swVKCYM4FnFr2viWua00rs4zayhsy+BNBQwyTQPJNEASAm1vwxUfEeWR5OJgqU4iS4IeVlcY6H+LnGi8IZRJMt2kkukIQkAtbv5D3xwRxZm2aZ3nM02bVMkxZtQF7IvoB0woYFJ6hHyHHvH1QP3lmUsnKVIwNgGvf1OEJE5cgLkhFsSD2t/xj2hUwROuo30jbrthCod9Cgm6s3uDgTtqEeul33PaGqMlYWa/UsB6/8AGJfK6dVro5aldcKEN9QGre4HtgfjcQSKY7Eqbn2xJPmaU0bNaQIviKKvXBV9WoBakMM/3hlg12oY21dfEOv44QGYUIksaJGXzLA/3xXTcSKSfHKP/iMJniCI3u8p91BxT6bSLmss+LMcu13+TUALpADH+2H8PEcVMnLKVK9wFk2+22Kh/wCokHR3t6xjC8XFrRrpBDAdNUQOM9Nx0IPNfvH+YZIyxHM6SmlbLlblpU6LK7X39geuG+S8FZ/xI2ZT5Dl5rUoKZ6yskDKBBEDuzaiB3Fh1PQXxJyfEqpbhaLKTEvKhj5LEWXWDftbbtgbkz/MMtppocsqZIKXMI1E6L9Mqq11BHezC/vhGE+UwYMADevfUldcXIcTY9/zjyopOTRxUEX+pI5acld7i2x7dDiEqRGlUaVHvEDpNhYXv+fvhegqqp41ijlkLytdvGdh54apTvzJJBG0rtIUiFv5u5PtcYtReN2YkCzFZI4YEaLVqDsNR67joMNtMAcrpN/U3Axvl1O1VVNFsbAlhfrbG1ZFHDM4RyXvZ1Itb1Hpgxo1c9uSlKyVVYx0gUCRaX17EIOuw7k/nbCVevMWWaJYoYyAEUAiy9gDh9kCL+78wo46KmmlrhHEJqgEGIXJ1ISRbtiCLzUcskKyuCvgIB2I329sJX6nIHtMqLZY6CoXQPEwZRbubHHk1AkSKt2kntqa5AAB3F/W3X3xmV1PJrYnpwUkVvC6WBBOHtRlpiq43NRrLESWINmF9x74I6fuejX5ZCgc6YnBF7MCD67Y8nplp21GRC7Hbfp/lxiUocupSbTF6mAy6pFhFyo3FhbvvhtXZa0tTK8dPPBSR3IMiaCQB6nrt3/8AREOLIngNXN6SloGyKqnrqqRq5njSiiA2J1eJmPYAbAe+JvIKCWCUVjRpVJEQrTIbcpidiD1Hv64EeeHQsxXl2ssYbsOgwYcA5zBTVYoqyJOdNIBENBZp3c2AZidKKBudrnFGDGS+2qNSj3LQyKvgmjaN4YZ+W4ZlkAKK4JIYj+axJNul8TUOXioqpHp4AYDa5sb6u5388a0s2TZFWsJJflpJAjI6prurXsQvcEruOu4IwX5TQTV1VJPT2RGPhChgB9juMW5ywwccrAtft9paa9oJ13A1LmlnnpIXbSQHC774iKf4crlk5lQysbWRm8QQemL4gypBEOZBd7bkLhvVZcArFFIHqMcdkhqZTMmQmIu0q9V6g7N6Yi5XOTyQT1i3ia3gA6+QA/wb+eLHzyn5EUjEaQGv6HALnTrMAropfSBta1uoA32PXCqhXLQ4F+J1LRPHbTECLNGHFz7+X546ByTiSDM6JJo5BIjC4W++PnyrPRzSVVOWjlBsFPQn1Hpi3OCPicctpk5sphlRFLsx2cn0xoBmhhOqMxroIlcq13I3A7DHJXxp4DHENfNmlDMtPIoO2/iH+XxZZ48mqqfXylm1j61bt3wL5nn89XcJQh1udmPuL4DnxMbxDCjOUhw/X1GYmmjQyPe2s3CnfrfFzfCj4S1UOcR1/ERjMVO11j6rfzJxKwpWUdass2WQhVO2hemDDK+L4aILzEMGx1bWv/bDWz2KEQuEKbMvbK62mgpFvDdR4SV6rhZ8zijktFJrv0xV1HxjREbVa2ZRffYj1wG8cfGGlyRWgyqRZ66RbJvcLbrfAqxMIyK/aNzyp4mraXL8oilPyJYzyJ5kdCv8wt3HTHOnJmp3+XnVopNyqm+x7fY2sfscWvlXED1czTSuX5jFmDG5Vj6424tycZvBDXUMIlrKRSbAfUnW9u5Hl6+mDD/6SIHH/UJVscpnki0qxJtqtsCMSstNzKN9brIVJKKvUH/P1xE5YHaARqoDk7g9rdsPpamDLJnNQ6cw78rzHb74nYEtQlqsAttIeSRYGLcxVK7EOCL+mGeY561TSGlpoVhiP1MpN2Hl6DGmZ14zOZZI1aOICyoxvbzOGBTTfHQTEKBYbnLyZTZCnUae+MwqYxc48Mdhh8nieM2xtpx5oONnpIaRUUTukICqbuwBv5WJ/P8AHDnLcvTMKeeFP9WNdaEvsV7g+WJMQz0lAlJChJ1F5YxfRqIsAfMgX3PmcRdHDTySSPG5ieM+IHZbdP16+2JA9gxdTxgsDRxU51TKB08x39cTtDl8k2UZjVxLHJFGUeSPVdi2oA7DtpJ3xF5DTPWZnBDF4+Y/L3WwYnp9vPD2qH7pzTNoKB2kpl1AKN0uW03NutiSAcLYm+IO/wDueTWzI+uZMnzOpfLomeGOdkhkfup3AtYX8JHXGAPX+J6FZJQtzyAwbqBuBe/UYmaSh/eeXKWV2Xotze0iDb8VNvtiMmj05hNFSAorI4BBvY/UB/8A5GMXIG0exPGriFJC1cTSU4lb5hty4uyW/Uf741pcqkiHNrAIyR4Vbr+HngphpTNnNDmckcdPRUa0ytBFtsE3F7WPiFzfcg4jMyRMxqaifkTjLqV0jM0y8vQWubkepBt12tjVy311NKkDUYZdLT0dY0rxLMgQqkTAr4r9fP8A5xs+YiU2dUWOTcaVuoI26H2wybMVa5SNVX+UsLk4yeWOJWBieOeNwWRhbSD/AIMM4m7MDdR+a+qjhVaVpYgw+tjb7LbYYXGfZkhjR66KWPdpFAVrqBcggjcWHfucRvMSaBkjUrKUJVgbXxGwowinYXuUAFx5sP8AbHlxg9iFQk1mtEKSqVJW0RSeKKyhV3/v0xvlmTtJVK3PSSzXPYj0OGkdRU19NFRursYFeV9TXLL9XfyF/wAcF+TUFNWT08tRI0ck0aBgADuo03b3sDc9b4IWomqplycEcPR18UEtfKlgAqknVsBt+A264vDhzhtKOl1xAyixYd29h/tirOCaSmoaRaahZpFRdK3W+/f88XbkFYlTFCkZ5aMS8hB2VFO+48zt7XwIYXLB1JjKqSCpmSnVCtQYBO8bqQUUmw1eRuCLeh8sTicMwPcTHXfcC22G3C4DXrJlMdTmzGdAw6RIAI1+yENbzc4OI4o441ubsRho+oXPAyvsz+GOX5rGQ0KKT1JF8AOffs6pmClKSvjhkLE3eG/bobdR3/vjohYRYHpjcU4fT4fGdul8AVuMBnGOefsw8WwUrnKq/La17EojM0ZHoCQfT/fFQ8Q8G8S8J/w+JcqloVmawkB1AkDzW4x9Oo6EOANCkqLb/niD4m4NyzPaCamrqKGeKQeJWS4+3374HhPanzWpOIMxy6kiipaluWCSV87HBBQfEKoEPLqYkdiNmtY9wcEXxj+Ddd8Ocw+boC1Xw/NJpilI8UBJJCP+gbv74qGrqYKe5Y6SviAv1uf/AFgeFwOREsSs+IkdNHTl0Mjuo1gHpvt+mBfNePedIGgjTRcgC/UeeAKfMXldg58RuVX+kedu2GjVS38XhA2G2PDAs96pk/VcS5hU8xmlaIOCAUHTEJqYsWd3Zw2rUd8e1dSirHHqFkjXbyv4j+uGfzYjcbgjzwxVFagFoW5HXGKULexbp74NKLM2TljVsQSfXFYZdWjX2FsGLZhEYqE8sQ3pwupe5BIJPqdr4Uy0RGK8LqSponq+ZFQU7VDHxStCL38ztcnErmfDOTZvTqaykinnYeGSwDA+wttivYs2kE4CX1J9LX3wT5PmtQtSj1R+kXsTjL49Q9NJ3JvhPwqYAtZla627CZz+pw+zL9n3hmqpXNGk+XykXVxKXX7g9sTeVV8UzozbKwve+C6gzCLMSkSozwRnc/1n19P19up+oZnpj7TnyX9m7PkpppMsqqGq135buWXw+gtsT5nt088VVnHD9bwzU1FHnFMaeuBKaG7Du336D7474nlkp1UUlpXkGy/0juxHkP8A1gT4syfhjiPKZaTiDkS7HQzgGdG/qBHiuTv5YIZANExTYx7ThlIucQFXUxNgB3Plh1yqKm8FQjzyD6ikwVQfIbG/vg/zfg6HKFlCSip5bNFHJflkx36tsSDuV2xCDKTb+BJQhO1oyfzKk40ZA/UTxMRWpqedSM6m1SjtBo3DnXpb8wfyxFTqgrJCkPIeZmdoybgDe9z5Wufvg3paWPLKNIHUu1AgeGQbbMPH03JuL/hgXo4HkmquZAUeWPxqyf6MeoaRb1IufQDzxEuUEtXQiysn+Hctgy2eN45W5xjEES6fqdyzM48gEAG/cnDJ8tApnEDLHzHEvIlkvLIm5Um3Ym7W2tcYditNJMjq55gdWF9vFa1revf74bxU8sq1ldEhKUsgildhcXZWZRfy/XEasxYuT3X7/WbftUe0WWVVHl3PiljvAqvHHf6piwvYdyRrHsoxHcU00VLn+YyjQJWqC8axpqAUtck+pLfbE3lNZT0sEEtU0o5r+M2BJN7bfiR9zjJ6J80zvI45lEazyrDOgA3B/mvba46+2MXIy5bPRB/f6frCH8shsvXTLUkoXSWYEkttZQybj8MP86zKbMJqKlm0RZfFQtBGkaD6dFjq82+mx6jSMOVrknllpVXUsRZYGX6YmW+wPmdye1ziOqcomeGolhsAUJCH+c3uRYd9hv5Y0Nb22oOwKEGcmyT5hq0tMqmlTx37ox0Ej/8AK/2w7GTQlSJpA8pDRSsFJ+k7EeewG+N6N5Y+ZNLpUSqIplk2IvffpfridysVWbZvTxrCJ6daf5Z1WTaMgncDtivJmcEmYADqCP7lmhpJM0pH5tNHVclEt412BJYdhuov03xJ8PUcdVnqU9YiwxUcySSurFlazi6A+p2Hlvg1zOAFKihy8PeprDHTwqttRBjMkt/K0dtzbfEXm1BBR1lTDBB8pTSuAwVW1am3Zwx6+3rgU8lsqfMZxrYgnRQ8yuhMkXJkaKpiqFTY6xq7edmUfbBHwtQSyVSyjUdYBNttiMP85pGNRFItOweOZQzWA1SGQ80jzDDQ33OHXDsAj5REckoVF1NYqRceW/44oTMG3PVRljcPVD0zaZiSrbEarsDvcj12/MYtPhLOY54P3VA38avqzCG86cXMjA+dtQ9CwxVyBKajSaEKJ7gJbfmKVIK2Pfp+uN+Fc2ENQMwjM38BmKBWAeO56LfysL9+uOa2cLkBH5Q7nXIqlizDJipt4pwFA2tyjt+QwRUVck0tr6iCQ3pY9Mc2T/FDMZGy96IrJPG8gZXshdSLHSN97bj/AGwdcD/EKkzNZ1RpUqElYtSuul1Gq2osdmufLHSTyUZqEMES9hKEiDqoexANz0F9yfbD6FNEms9+mA/Ls6kMsSlokD/yfUxHqen5YIIq0QOsLXZSp5XqB/L9v0xVY7hQhj6Gwxq4Gr+JutvET2GIOfiWkp6mii56K9Vuo1f0glv7Yk4qhKvY3RbizPsCfbrj3IGaIyz3g3K+JstqKLNYIqmnnj0vG4uCDjiD4zfsn55w7WVGacFpJmWStduTs0tMd77dWX1G+O95kgiURwyM7rYMBsBhJqeQrcSBbnzxoInitz49T5S+WVbU+ZI8btuQwKn0N/fGR5Oama8ahxDZjHe/MH9xb7239cfVPi/4QcLcf0zw8RZXT1EpuVnVAkqE9w43xyd8Tf2SuIcgllquCZBm1Gjao4GOmdFvf2a3pv6Yw2BYi6qcw5hlNJLUED+GJkVob7grpG1/MdO/TDROHY5IwIYrVCG1m8Sub7WB2Ht3xa78PuJ4qDO6KahmDcuOSpjaFRf/AMSLncXtYX388I1GRyZaoWRKaojY6Y5YLENcgXDX2t5G1r4h9cqtHR+Z6gRKtrMrkpauRKcEEtZYlNio8z/thzWUuZR5PEwictAsbDYjZrhgPOx0H74sA5QaymqRVRFKn6YGiQnx3UAn/wDId7/nh1X0c+WRwUqFaimhY81wPqQDQFB876yT22wtvKUATOMqzLqyqSQLUxyqQbXCkgj0O4wXUmZPBOgk2JuulhpO3n+OJaGgppal4a6jIVTpSVAHAktcM3c9vL8sO4OGqSnqIlkjZjLcLZdXLN76yb23PfyHrj2TyEA5QhYEIIc1pKem5aNKZIjyyAwuz+Snpbt+OCXI8zqqCserqlRZbmKIIA4UhRsBfT0Nrm57AYC0y2nEXOSapDvKUQJCfEbE6t9iDY9+xw9NXJRROsRKIl1c8hjrY3FtJtpboPIi2OefIJNmGOR2Ye1FfFUzSSy1s0RkVbvVFkE1xYHSvgXcEC5/PfDkZhDQUU/ycdOEmTcrHaRmAv8AVfv0ucV4lZUyyt84GSjmjaaN5CbmMGwG24G2xPcemEc5zmasjgmjjEsZ7wKbkWINztvsb9bYMZSD8zbjavyk1iiFbVCmJCpU7kFb73279fQdcRf/AExQKSJpYxIPqAHQ/fEhHWVdJHyqWROWqoC8imQqpuRa246AX6Y3k4RmzcJXmlWtNSuvXTwB1G5FjqNwdunrhwz3u6nlUt/KLg+MlzKaspXosrnp6WrXQjsmlQTcjSD9I7j0wxz/AIdrslq54pFjhnmQPUTmoVjJa2wAJIvbE09fKuQ1lUizq0VXJGp5jNrNwtgD5Db7YYcR0MkOXx5hWmCGNSlMXkBDmTRrew72Jt/xiHC5Z6J+PzP7MzgNwffLIaqMSTpK8YkvzlOmzAGxF9iL7W7DE/8Au1n4aighj5UE8pnnlTud1Uj7H88RmQZXW8YZxHT0Ec1YinxDoFQW3/zzxaL8OU8OUzVDV4OYQvyZKPQOUynbl7eSi9vM4LyXGPiCep4Y72JVVRk1UYQFheKClQa5GNkjW5Iv31E2wvlxlaphqRMY9E0cjvb6yCb2+9tsFlTwlmSsKKspqqEy2cRousvHYm4UdSoP23xlCj5jmVNk9PSGCGGVY6Zzp1Pbrq9ASSSemB9XkNwPSrUF6Vpsxr3ZKSOJKaN+UFjA5Sk/UT3JJN2a5N8N4IeRUygQ+JCHLhSXYEXAB9Cb2xalIuX5RPBSZNVJWR08jJWVkwULUSdCVW3QXsL998CCs8U8s1BFGVjLXMviuBtb79DjF8hMjsB0IRQfeRAy6PNz8tJTGeo8Ca1UhiHJ0+4vf2vgmThGlynN+ZQ5ikEdBCPAgLfPzvcOVPYLcbnbb1xIZZLLSQVGZmKNJ/BBTMtv4Tb3YeekE2HmcMmyzMFlhp85jlpI6iBXhDNpsBfRYdbXte474E5SxIQ66nqUdCQdSJJZatFmenhS8OtBcu4Ooj2vt+GHNHQZlUxsmb1NTOUkVRDpud7eYsPffD2mpUjmjoaiWOllmlI8bEsXsdz/AJ3xM0tbFV5MsDLIJxWu8gQ2ugQDqe5N/wAcYuT0koTyiRE2SfOV8FJTH5pIFMShTZnOrdRbuT0tvt5Y0i4ezGGctV060yK76YpgxNw9iTpO3Qge2JqmrpuFad8xptD1cqnlS2voU7Fh31C1gT74ypq6OBqNCJpZZ7LKUlLhh1AFrE9dybXvgUzMjfSL/e4QQGJVOXaMleSvpkhgjm061Zhp9/I39e+IekrLs9cJCBK+sEbB2Nz06Xtf8sEdFBJPR1YqCtRQSl4mjqSbuL37bXG3TuTfGlXleYvAI6GKKKAHwMyhNLbatK7A38I87C3QYU2YE1cZ6SsCRGNTLV5hT04SFQ8swkRkUtoAtp3/AA74d5BR1+VcQzVc1dJT0usgM9wpF9Qsb7sSBt5A4jKsV9NHetrH5crIWCLfcf026eWJLimWKuiybJcpgano8upRIZ2BlZ53sXkA6bXVQT0tthuF3Dd1FKt3csnKPjNJl1RK+Y86osjKgSDlqTqG4a9unbB8fi7S1KU7RySBgrTqrjQ1wuwBO2/vjm3NaaGhpkFRVaklcrZrlrC3QACxJufLriMNROmk2YqlyoUllO91P/3bdsWfxuUip467nRdb8RYv+rMiqqh1YJBWDlr9MGqO67eY39b4v/hKvqM7ghzKrVoo2UGCnYjUgP8AO/8A5ny/lHrfHz0jz98onXNZEWeKWdFZCN0jDguF7WsAMd5fC3PqXP8AIojlDwi6hwuoXIPTYYu8bIWJZupi7uWlTJBOrc5tKi1/M4kqWDL5hZ5CW6DUbYFnqRTqEaxlLWtfpfuThnnbTjLQYZhE8Uga/Zh/xi4sRsRoUNqEmZRx0stqWQsAPfEWuaxzLyqsWLXCv6+RxlJOjwQr4uxBO9x74bVtLA9PJGfAxOpST3wQY1YnqHRgN8R8gouJsqnoc0gilDKeTI6Asjdt8ct51wPTwcPxVrzkyxc1K/kR2AlQ+HbuCNj3uL747DzPk1dANbqNQKG53Djtjk74gcRQcL8S5nTzLHJk+eRGCsibqkw25g9QbH2xz/MQOoYdzVAB+qV/liyZTmMlVTzsxpFdwRcFlKGxvvtvsfNcQ5pKrMZictmgeBUfUXl0HSuonb1PX1OE1euy+orMonkCSLOqxv1uo36dwbdB54m6jMKXLq6VuZGVEoDpTIqsRe4QXHiv39fTHLChf3+/vN4Kx3oRLKcrrKbK5Mxr6eEUYqOSYFNpXlZSQBb+XbcjYb480SPK6LC6tqKusYAC2Nupsdr/AEnb9ceTZppWRrtHEZmlanZlsj2AUXPQ29euJKlqoP3FTyZdQTvU1U3N54RmZgD9NztYEfnibIxF/JqDQGhIekp+fkwjnDlZ5hqbUUKAE9N9jex8t7d9pOjkEU0kNXUTSUnMeN9D3WDUSLA+Qv5bb43aFMwzCdp0hEQIenpYruEa++oAEAb+vltbEzRZFFSU9HIuURZZDJOxmWdgjEE2Lbk9idtx5WwRyqAdw61owSqxPSzSZb+8JIXlh/iMb6Y4grAKvoQL7dz6Y0ocqkeOqaf+LRhQIJYZizFdNxc/rtc7dcEGYSwQ5j8zCqVqRFhDfYGxuAOlwLm19t8DiZj8r84s8bzQtLoVF8JUk7Hb1t1vhi5OY/OKJ1ub5ZzZamZlRkQwXgWWLSXIvqIFrg2DWHQ4VeVXctROTCbWsCtj3H1DC9DmMlHYTwgSRUspXnG5BYaVHck7nGsELZgrzxypGhYhVvptb0GNOQ9rMHKqUwgyrhT5fhxamsHOqZXqKhYzZi7uT1HYbqB7Yn6r4ZUkWS5VScXpHXGeKSplKklllLam027WFjjKd52gizCSBWSJ2cjfpawAGFOJM3rMnpqSmqJpHqIKIsjC/hVtyCepNz0x8kvk5+RYaN/ruVpxC2RG+TcB01TRR/I8SHJMoijEMc2gxMi672J6nWNX4XwU5dwpwrDSUdOnEgy+oSVpIYoaZJlle9/ET4vc27YA+G4q/Nqalr86Somy9WKllawMSjqR/nfFl5Fw5wjlnzmYzZ7U/Ps6yxCONTHbSbRnvptfYWxS2XI2Uoxuv6/5hKB9qEGsxo3OX8xnpp9VK0S1aFmKgEixHXp4rd9sCyVVHmdNrpIY0zSZuTUmyoV8NuYoHmLXtg7+VmyyCOnyiSDMDLO7J4NPhKE6WVvW2BrKaSpolzCqz6jo6aokj0RaogGFtwBbtjEyn0mLH8oLX7yHybgOCrZ8ry9poswgmEkk8Sq8UY7atRHucNOG/hnmXEccs9dmVPDBzGjREGp5LHrc2Ci+998FlFTf9M0T11VqqayvGtoCwW1rncX74hs0g4hqsyhkrlZKVo1cQUzeEI38h09PfDcXkvR47+5P9oHFANiP8+p6H4d5DFRZclKM2VedBXq4laPS1ix1bG5PYDFXCXN85RqrMJJ66sqTy2ma+y3ve/QE3Fh5A4OqqjoJ80mnqKQVJWMKgncWjIJJ8Ppt164Tr81y/MZpIjrZABGYXumr1su3UYrxZyo4qLJ2TAZS2hGua5NBmNLlWfVcQNTSQR0uZR06aQZUI0yepKgAkdTiQz+dOH6mtjyeApGJOc3hAL6twL9gAd/XDqmElDQ1B5I59VOgRQQLIfpBviL4odaeCngkliaSZwwLi6NHewF8e5M7izqeKMLgvm0lfmNFFSvqVVZqglUIF26W89u2MDylYaOkm5PzQ1zyFbWUCwt+H44XqqrNZZBTx/xam4ZUDBEsvTr0HTfEjR5bmk1HT1VTBBzJpN9EwZlUA9B33xY1KBcEKe/eDVfmlRTPFl1LJaOlbQgBsLE3LW7knfB2jR1mXUbiZVZBdUMtyttmv79sMMp4co5XqazNzXUlDLeOl5oUPI3crYDa/wDfHrvw9SF6GiyqZ6OCUtNMJW3YD+ZupOJ/IplKqN+8duqkfmNFlA0tVZnOjqNIGknmWPi0gdD64msg4aOZZfFJltbUCKna7iRLlkN7Am/QeeIWegqsudYYqeb5GqQvC918R6nf2ODfgGpi+QqcwrLKkcZLQ9OZqGnRbyAN7emFZc/DCrTFVe6kHxjldJz0yiimjjqZCJ2Ldb/0j0tcbYEsxzFIIqbJ6CNqSkTVGum7O5JuXv5E3xYVbkEmV1Gb1ahKvMKaU0lC0ig8xWXUSPKy/ngSy166CB5HCSrIOVBeK7JqJGkdyb32xR4+UuOJ9v6wSDyK9SCoIPmXaiLR7AaAEDBl7i3T1xY3wp4jzDgLM9UKyR5Xp5bMx0G99hp7Dytgcl4Sk4TGX1FSsd6tGmpY2Ymyf1E2FhfoMRuUUdRmKVFRmdTLCslQDPJqJ0xqb7eV72vipctN31ACFTTdzsHKfi9kme5mKNKuIKjAzIjX+nfY+eJ+Hi+j4kq/3UivPqWQq0Q2bbwEH1J+2OPctoaaizPmZZDG+XzORA7XB5gFrOfX88S/DPHlVwzmMVbS1TfvakqSHRt45IyALW9xis+YbqNQ8D9Qnd75O2U5fTRR+JkhVWBNze2K/wCMc4zClg0Q5fUVR1i5iW+nfe/lgX4Z+PK8Z5vlVPmdM+Q18yuqFZeZHI4W42IvvY7YMOLfinluT8N11PKIo80K2jjjN+ZY3LDvsATY46ePKmRCQdRZYg3K0zfP80onzETUk0sZPMWEGzggdTfobjFC/ECop+KP+4mppIqxAeZA+m5262NxjbPvjNmmZ8RZ1mWT1aihgVpY9dw0gvY29cCNf8QafiCGkety/XM5PNmkusiN5hh9Vu6nEvqAqR3U0nkLMc8L1Jg4lhqaipjmERKmEoBzG03PuQo7d8RmXLHmdfJDIzwATu0JnJtG5B0bHtfr5bYQoc5FRVVEaxRQpCBpqUh2Jbpde+HcDvR1QFa1PBA6iTU5Jsv/AIDf1G2OYUJJqYhBbczNIJZqtaWWop2lE7FwEsWPToB/l8Eaxx02XJlU+Yy12yycqCP/APjPY+PX0I6AgeeG1VlzU+YrDlEK1uZwohfXGV5rdRubqTY2I/HBPwZlMFQKkcTmThlw0kmkRgtI17AdbAb/AJYRk6HI/v8ArGrhHLuRJCUZpC6/K0VPUqsyQlVeRLX/ANTr59P6sFtHNS5hRmmXhv5ZGjdY6moqJZWLBbqLkgbkgdO+J9Mky+OnjrVv+6Av8NaikA5jWsNIUnYkX33v6YlKXIJsxp6isi5Jn06VVpdK6AQQLdb9N8crL5AApfaN9JhdQAreGhUU8Pzaq9BLrM0ity5EIG1xazi426e+AfM4K+Bpar5W+WxMCJyPFcbeIdh6jFomerOZ5hleZxvRoQqs0iWZjc7ggeEXO2G7w0FNVvQz1BzZ4hr5ESFF6bXc/V5Ww/DlyX9fUz0wRd1BaLh6mz7KzmdcZaqSKFdIQAc0qSNOnqdRt+Hrh5lfwyz3Nad3ossSSKGQwghiouvUAel7fbDpKmleCppqyOmVA6vy73UXNyDa3fBbPxbmEKwDKo3NO0QYBQoCnfbc4audgeI/6nsaIRvuDuQU1VPkeWZipesiWNedGn/1G+/QdMNs3zuRJKw5sYnqlVZVppfpUntfvfsMZjMcbEoNj/8AR/rNbSmNqfid86yaaCMPl8rqYoafT4bjrYeWGfDeXRnNYKWrV4wq8xWWU6mdbHceXXGYzBvSF1UV3/eK3QMIL1UuY0tZmJUxvVvbSdJTey/liTg+ezQV8TvDmlXEB8pDsoS17WY9T/tjMZiTEeQAPz/WGPqYA/vUZT8JZXDlYzriynkNaCqwrHMwMjG+zdh1wnxZmElFkyVGWTLQwpGIwIRcB7bX8xfGYzFD5GV8fya/WMYAKTBFcozSvgFdPJCalHASRTcTNYEhl9cOHkpkYGqy+qyyVZeZzob6S+5swtYX3xmMx1Vclqia4NqHeTimf5aj4lyDLqo16FmrpNTlR0Sw6AgdcDX7oqqvjGVaChSuy6BwiQyr/DAA6b9jjMZiA5W9Pl7xjfWgv7zfijhlaXLubDSRR17FkanhkusMZJNlJ/vjzgjhBjPFNl9WaymXS0qVAs6eXuL4zGYNcznAT8xNU0nuKuDnqqiPMc+zKmENECsMaMRoDdBftYfrgAzemgeHTkb1FdBTyjm06OCCtt2tYXH54zGY6XjjkpuVAAEmN6iup0y6GVowk1LZZFJIV1Gwtf07YmMgy6aeCqhgpngiiKVABa/MLHcX8wMZjMS+UePjMR7f5ElstowuzrKqWjqZq9YJ5GRkVVEhsZmILMQfww4g4eo63N+QJUpap5A2keLl2BJYKereuMxmJcBLGyZ0sONWAJ+3+YU51wcnEmVUlbn0vPkaFYKZYxoLqgIG3bpitM0ymnyujWnpagQaWK1aOba7HZScZjMMGRzkIvowcwBe4NVL0op4aCCSSIJL8wUiFgOguL+mFpqahyzPMxiy+inzCVYw8U5ewt2A9d8ZjMdGzo/EkChluMJ69oqmKeeqenrBIJUSEm6ODYW9cS2dZpnfEkbpV0Eysw0tPpAYkdx733xmMxev/wAxBCgxrnXC+T5XFllJPG8VdJTlJDGngJI/m62wCVPCkMM0EAkkid59KaASBrPU3xmMwjA7EA32P8zc+NQdSczz5HJ8zqMtSLRBAgHq3QE/554lcoypaeLLXzNZGpJQJIJta2sbkAAjqdtsZjMLZ2UKfv8A4iF25uK09WtJVVJy6okp6oOCzvclr9w98L0WcTVeZQTZvUc1I1IZFjuJOw1eZxmMxh+oG44MSKhZBnFVmdQrSCOFmAQoq6AI1H1Eeew3w/TPaSmq0aprJ68Q2VRGdIsOl/Q4zGYkPj4yLjj/ACXGPEnEnMeesmkkFRUOrTKF8N/5bdyMC1Jn9PT1RhqYFkmLMTpYgE2uLnzxmMw1MCVJ8h3ca0WWVudZhM9yqz7qWSyrvsD5YfVuWvFVzIKtp9LWJjnsAfLGYzE4as5HxPJjVhZn/9k=",
322 | "text/plain": [
323 | ""
324 | ]
325 | },
326 | "execution_count": null,
327 | "metadata": {},
328 | "output_type": "execute_result"
329 | }
330 | ],
331 | "source": [
332 | "fn = Path('samples/puppy.jpg')\n",
333 | "img = fn.read_bytes()\n",
334 | "Image(img)"
335 | ]
336 | },
337 | {
338 | "cell_type": "markdown",
339 | "id": "0f77415f",
340 | "metadata": {},
341 | "source": [
342 | "All you have to do is read it in as bytes:"
343 | ]
344 | },
345 | {
346 | "cell_type": "code",
347 | "execution_count": null,
348 | "id": "6931d432",
349 | "metadata": {},
350 | "outputs": [
351 | {
352 | "data": {
353 | "text/plain": [
354 | "b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01\\x00\\x01\\x00\\x00'"
355 | ]
356 | },
357 | "execution_count": null,
358 | "metadata": {},
359 | "output_type": "execute_result"
360 | }
361 | ],
362 | "source": [
363 | "img[:20]"
364 | ]
365 | },
366 | {
367 | "cell_type": "markdown",
368 | "id": "165ab678",
369 | "metadata": {},
370 | "source": [
371 | "And you can pass it inside a `Chat` object:"
372 | ]
373 | },
374 | {
375 | "cell_type": "code",
376 | "execution_count": null,
377 | "id": "e67b03c0",
378 | "metadata": {},
379 | "outputs": [
380 | {
381 | "data": {
382 | "text/markdown": [
383 | "A cute puppy with brown and white fur lying on grass next to purple flowers.\n",
384 | "\n",
385 | "\n",
386 | "\n",
387 | "- id: `chatcmpl-xxx`\n",
388 | "- model: `claude-sonnet-4-20250514`\n",
389 | "- finish_reason: `stop`\n",
390 | "- usage: `Usage(completion_tokens=20, prompt_tokens=108, total_tokens=128, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n",
391 | "\n",
392 | " "
393 | ],
394 | "text/plain": [
395 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-20250514', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='A cute puppy with brown and white fur lying on grass next to purple flowers.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=20, prompt_tokens=108, total_tokens=128, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0))"
396 | ]
397 | },
398 | "execution_count": null,
399 | "metadata": {},
400 | "output_type": "execute_result"
401 | }
402 | ],
403 | "source": [
404 | "chat = Chat(models[0])\n",
405 | "chat([img, \"What's in this image? Be brief.\"])"
406 | ]
407 | },
408 | {
409 | "cell_type": "markdown",
410 | "id": "d60541b6",
411 | "metadata": {},
412 | "source": [
413 | "### Prefill\n",
414 | "Some providers (e.g. Anthropic) support `prefill`, allowing you to specify how the assistant's response should begin:\"\n",
415 | "\n"
416 | ]
417 | },
418 | {
419 | "cell_type": "code",
420 | "execution_count": null,
421 | "id": "c656d876",
422 | "metadata": {},
423 | "outputs": [
424 | {
425 | "data": {
426 | "text/markdown": [
427 | "According to Douglas Adams,it's 42.\n",
428 | "\n",
429 | "More seriously, there's no universal answer. Common perspectives include:\n",
430 | "- Creating meaning through relationships, growth, and contribution\n",
431 | "- Fulfilling a divine purpose or spiritual calling \n",
432 | "- Pursuing happiness, knowledge, or personal fulfillment\n",
433 | "- Simply experiencing existence itself\n",
434 | "\n",
435 | "The question may matter more than any single answer.\n",
436 | "\n",
437 | "\n",
438 | "\n",
439 | "- id: `chatcmpl-xxx`\n",
440 | "- model: `claude-sonnet-4-20250514`\n",
441 | "- finish_reason: `stop`\n",
442 | "- usage: `Usage(completion_tokens=75, prompt_tokens=24, total_tokens=99, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n",
443 | "\n",
444 | " "
445 | ],
446 | "text/plain": [
447 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-20250514', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"According to Douglas Adams,it's 42.\\n\\nMore seriously, there's no universal answer. Common perspectives include:\\n- Creating meaning through relationships, growth, and contribution\\n- Fulfilling a divine purpose or spiritual calling \\n- Pursuing happiness, knowledge, or personal fulfillment\\n- Simply experiencing existence itself\\n\\nThe question may matter more than any single answer.\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=75, prompt_tokens=24, total_tokens=99, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0))"
448 | ]
449 | },
450 | "execution_count": null,
451 | "metadata": {},
452 | "output_type": "execute_result"
453 | }
454 | ],
455 | "source": [
456 | "chat = Chat(models[0])\n",
457 | "chat(\"Concisely, what's the meaning of life?\", prefill=\"According to Douglas Adams,\")"
458 | ]
459 | },
460 | {
461 | "cell_type": "markdown",
462 | "id": "9113800d",
463 | "metadata": {},
464 | "source": [
465 | "## Tools\n",
466 | "\n",
467 | "Lisette makes it easy to give LLMs access to Python functions. Just define a function with type hints and a docstring:"
468 | ]
469 | },
470 | {
471 | "cell_type": "code",
472 | "execution_count": null,
473 | "id": "1cd4546f",
474 | "metadata": {},
475 | "outputs": [],
476 | "source": [
477 | "def add_numbers(\n",
478 | " a: int, # First number to add\n",
479 | " b: int # Second number to add \n",
480 | ") -> int:\n",
481 | " \"Add two numbers together\"\n",
482 | " return a + b"
483 | ]
484 | },
485 | {
486 | "cell_type": "markdown",
487 | "id": "48516950",
488 | "metadata": {},
489 | "source": [
490 | "Now pass the function to `Chat` and the model can use it automatically:"
491 | ]
492 | },
493 | {
494 | "cell_type": "code",
495 | "execution_count": null,
496 | "id": "c4c3baba",
497 | "metadata": {},
498 | "outputs": [
499 | {
500 | "data": {
501 | "text/markdown": [
502 | "\n",
503 | "\n",
504 | "The result of 47 + 23 is 70.\n",
505 | "\n",
506 | "\n",
507 | "\n",
508 | "- id: `chatcmpl-xxx`\n",
509 | "- model: `claude-sonnet-4-20250514`\n",
510 | "- finish_reason: `stop`\n",
511 | "- usage: `Usage(completion_tokens=18, prompt_tokens=573, total_tokens=591, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n",
512 | "\n",
513 | " "
514 | ],
515 | "text/plain": [
516 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-20250514', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='\\n\\nThe result of 47 + 23 is 70.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=18, prompt_tokens=573, total_tokens=591, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0))"
517 | ]
518 | },
519 | "execution_count": null,
520 | "metadata": {},
521 | "output_type": "execute_result"
522 | }
523 | ],
524 | "source": [
525 | "chat = Chat(models[0], tools=[add_numbers])\n",
526 | "res = chat(\"What's 47 + 23? Use the tool.\")\n",
527 | "res"
528 | ]
529 | },
530 | {
531 | "cell_type": "markdown",
532 | "id": "b6ccc75e",
533 | "metadata": {},
534 | "source": [
535 | "If you want to see all intermediate messages and outputs you can use the `return_all=True` feature."
536 | ]
537 | },
538 | {
539 | "cell_type": "code",
540 | "execution_count": null,
541 | "id": "47775df7",
542 | "metadata": {},
543 | "outputs": [
544 | {
545 | "data": {
546 | "text/markdown": [
547 | "I'll help you calculate 47 + 23 + 59 using the add_numbers tool. Since the tool can only add two numbers at a time, I'll need to do this in two steps.\n",
548 | "\n",
549 | "🔧 add_numbers({\"a\": 47, \"b\": 23})\n",
550 | "\n",
551 | "\n",
552 | "\n",
553 | "\n",
554 | "- id: `chatcmpl-xxx`\n",
555 | "- model: `claude-sonnet-4-20250514`\n",
556 | "- finish_reason: `tool_calls`\n",
557 | "- usage: `Usage(completion_tokens=116, prompt_tokens=433, total_tokens=549, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n",
558 | "\n",
559 | " "
560 | ],
561 | "text/plain": [
562 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-20250514', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=\"I'll help you calculate 47 + 23 + 59 using the add_numbers tool. Since the tool can only add two numbers at a time, I'll need to do this in two steps.\", role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 47, \"b\": 23}', 'name': 'add_numbers'}, 'id': 'toolu_019AHG2Xck5beeQ3UMYFKZkw', 'type': 'function'}], function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=116, prompt_tokens=433, total_tokens=549, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0))"
563 | ]
564 | },
565 | "metadata": {},
566 | "output_type": "display_data"
567 | },
568 | {
569 | "data": {
570 | "text/plain": [
571 | "{'tool_call_id': 'toolu_019AHG2Xck5beeQ3UMYFKZkw',\n",
572 | " 'role': 'tool',\n",
573 | " 'name': 'add_numbers',\n",
574 | " 'content': '70'}"
575 | ]
576 | },
577 | "metadata": {},
578 | "output_type": "display_data"
579 | },
580 | {
581 | "data": {
582 | "text/markdown": [
583 | "Now I'll add the result (70) to the third number (59):\n",
584 | "\n",
585 | "🔧 add_numbers({\"a\": 70, \"b\": 59})\n",
586 | "\n",
587 | "\n",
588 | "\n",
589 | "\n",
590 | "- id: `chatcmpl-xxx`\n",
591 | "- model: `claude-sonnet-4-20250514`\n",
592 | "- finish_reason: `tool_calls`\n",
593 | "- usage: `Usage(completion_tokens=87, prompt_tokens=562, total_tokens=649, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n",
594 | "\n",
595 | " "
596 | ],
597 | "text/plain": [
598 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-20250514', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content=\"Now I'll add the result (70) to the third number (59):\", role='assistant', tool_calls=[{'index': 1, 'function': {'arguments': '{\"a\": 70, \"b\": 59}', 'name': 'add_numbers'}, 'id': 'toolu_01MaVpxB81qBfEst6cKg5tRZ', 'type': 'function'}], function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=87, prompt_tokens=562, total_tokens=649, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0))"
599 | ]
600 | },
601 | "metadata": {},
602 | "output_type": "display_data"
603 | },
604 | {
605 | "data": {
606 | "text/plain": [
607 | "{'tool_call_id': 'toolu_01MaVpxB81qBfEst6cKg5tRZ',\n",
608 | " 'role': 'tool',\n",
609 | " 'name': 'add_numbers',\n",
610 | " 'content': '129'}"
611 | ]
612 | },
613 | "metadata": {},
614 | "output_type": "display_data"
615 | },
616 | {
617 | "data": {
618 | "text/markdown": [
619 | "\n",
620 | "\n",
621 | "The answer is **129**.\n",
622 | "\n",
623 | "I calculated this by first adding 47 + 23 = 70, then adding 70 + 59 = 129.\n",
624 | "\n",
625 | "\n",
626 | "\n",
627 | "- id: `chatcmpl-xxx`\n",
628 | "- model: `claude-sonnet-4-20250514`\n",
629 | "- finish_reason: `stop`\n",
630 | "- usage: `Usage(completion_tokens=41, prompt_tokens=702, total_tokens=743, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n",
631 | "\n",
632 | " "
633 | ],
634 | "text/plain": [
635 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-20250514', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='\\n\\nThe answer is **129**.\\n\\nI calculated this by first adding 47 + 23 = 70, then adding 70 + 59 = 129.', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=41, prompt_tokens=702, total_tokens=743, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0))"
636 | ]
637 | },
638 | "metadata": {},
639 | "output_type": "display_data"
640 | }
641 | ],
642 | "source": [
643 | "chat = Chat(models[0], tools=[add_numbers])\n",
644 | "res = chat(\"What's 47 + 23 + 59? Use the tool.\",max_steps=3,return_all=True)\n",
645 | "display(*res)"
646 | ]
647 | },
648 | {
649 | "cell_type": "markdown",
650 | "id": "6cdf13fd",
651 | "metadata": {},
652 | "source": [
653 | "It shows the intermediate tool calls, and the tool results!"
654 | ]
655 | },
656 | {
657 | "cell_type": "markdown",
658 | "id": "545b8392",
659 | "metadata": {},
660 | "source": [
661 | "## Web search\n",
662 | "\n",
663 | "Some models support web search capabilities. Lisette makes this easy to use:"
664 | ]
665 | },
666 | {
667 | "cell_type": "code",
668 | "execution_count": null,
669 | "id": "5d33be9e",
670 | "metadata": {},
671 | "outputs": [
672 | {
673 | "data": {
674 | "text/markdown": [
675 | "Here's a fun fact about otters: Sea otters have a baggy pouch of loose skin under each armpit that they use like pockets to store food and their favourite rocks. They essentially have built-in storage pouches for their tools and snacks!\n",
676 | "\n",
677 | "\n",
678 | "\n",
679 | "- id: `chatcmpl-xxx`\n",
680 | "- model: `claude-sonnet-4-20250514`\n",
681 | "- finish_reason: `stop`\n",
682 | "- usage: `Usage(completion_tokens=131, prompt_tokens=16333, total_tokens=16464, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), server_tool_use=ServerToolUse(web_search_requests=1), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n",
683 | "\n",
684 | " "
685 | ],
686 | "text/plain": [
687 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-20250514', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"Here's a fun fact about otters: Sea otters have a baggy pouch of loose skin under each armpit that they use like pockets to store food and their favourite rocks. They essentially have built-in storage pouches for their tools and snacks!\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': [[{'type': 'web_search_result_location', 'cited_text': '© Don Getty · Sea otters have a baggy pouch of loose skin under each armpit that they use like pockets to store food and their favourite rocks.', 'url': 'https://wwf.ca/stories/10-facts-you-otter-know-about-sea-otters/', 'title': '10 facts you otter know about sea otters - WWF.CA', 'encrypted_index': 'EpABCioIChgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDLb2NS6/OFMaGiJL9hoMHnFjDmtj63JlucR5IjBECL8xR+jdfEsBupvvfZQm9sbYVSDYiWza2VU/u6fU5LqDMrSvsqYT1pnCh9W0voEqFGOFXiq76aO1I1NWYw6Eke9qIIdeGAQ='}]], 'thinking_blocks': None}))], usage=Usage(completion_tokens=131, prompt_tokens=16333, total_tokens=16464, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), server_tool_use=ServerToolUse(web_search_requests=1), cache_creation_input_tokens=0, cache_read_input_tokens=0))"
688 | ]
689 | },
690 | "execution_count": null,
691 | "metadata": {},
692 | "output_type": "execute_result"
693 | }
694 | ],
695 | "source": [
696 | "chat = Chat(models[0], search='l') # 'l'ow, 'm'edium, or 'h'igh search context\n",
697 | "res = chat(\"Please tell me one fun fact about otters. Keep it brief\")\n",
698 | "res"
699 | ]
700 | },
701 | {
702 | "cell_type": "markdown",
703 | "id": "c9fc4bee",
704 | "metadata": {},
705 | "source": [
706 | "::: {.callout-tip}\n",
707 | "Some providers (like Anthropic) provide citations for their search results.\n",
708 | ":::"
709 | ]
710 | },
711 | {
712 | "cell_type": "code",
713 | "execution_count": null,
714 | "id": "a3910821",
715 | "metadata": {},
716 | "outputs": [
717 | {
718 | "data": {
719 | "text/plain": [
720 | "{'citations': [[{'type': 'web_search_result_location',\n",
721 | " 'cited_text': '© Don Getty · Sea otters have a baggy pouch of loose skin under each armpit that they use like pockets to store food and their favourite rocks.',\n",
722 | " 'url': 'https://wwf.ca/stories/10-facts-you-otter-know-about-sea-otters/',\n",
723 | " 'title': '10 facts you otter know about sea otters - WWF.CA',\n",
724 | " 'encrypted_index': 'EpABCioIChgCIiQ4ODk4YTFkYy0yMTNkLTRhNmYtOTljYi03ZTBlNTUzZDc0NWISDLb2NS6/OFMaGiJL9hoMHnFjDmtj63JlucR5IjBECL8xR+jdfEsBupvvfZQm9sbYVSDYiWza2VU/u6fU5LqDMrSvsqYT1pnCh9W0voEqFGOFXiq76aO1I1NWYw6Eke9qIIdeGAQ='}]],\n",
725 | " 'thinking_blocks': None}"
726 | ]
727 | },
728 | "execution_count": null,
729 | "metadata": {},
730 | "output_type": "execute_result"
731 | }
732 | ],
733 | "source": [
734 | "res.choices[0].message.provider_specific_fields"
735 | ]
736 | },
737 | {
738 | "cell_type": "markdown",
739 | "id": "8921940c",
740 | "metadata": {},
741 | "source": [
742 | "## Streaming\n",
743 | "\n",
744 | "For real-time responses, use `stream=True` to get chunks as they're generated rather than waiting for the complete response:"
745 | ]
746 | },
747 | {
748 | "cell_type": "code",
749 | "execution_count": null,
750 | "id": "78836deb",
751 | "metadata": {},
752 | "outputs": [],
753 | "source": [
754 | "chat = Chat(models[0])\n",
755 | "res_gen = chat(\"Concisely, what are the top 10 biggest animals?\", stream=True)"
756 | ]
757 | },
758 | {
759 | "cell_type": "code",
760 | "execution_count": null,
761 | "id": "cd4f8fa6",
762 | "metadata": {},
763 | "outputs": [],
764 | "source": [
765 | "from litellm import ModelResponse, ModelResponseStream"
766 | ]
767 | },
768 | {
769 | "cell_type": "markdown",
770 | "id": "9804382f",
771 | "metadata": {},
772 | "source": [
773 | "You can loop over the generator to get the partial responses:"
774 | ]
775 | },
776 | {
777 | "cell_type": "code",
778 | "execution_count": null,
779 | "id": "cc81074b",
780 | "metadata": {},
781 | "outputs": [
782 | {
783 | "name": "stdout",
784 | "output_type": "stream",
785 | "text": [
786 | "Here are the top 10 biggest animals by size/weight:\n",
787 | "\n",
788 | "1. **Blue whale** - largest animal ever, up to 100 feet long\n",
789 | "2. **Fin whale** - second-largest whale, up to 85 feet\n",
790 | "3. **Bowhead whale** - up to 65 feet, very heavy build\n",
791 | "4. **Right whale** - up to 60 feet, extremely bulky\n",
792 | "5. **Sperm whale** - up to 67 feet, largest toothed whale\n",
793 | "6. **Gray whale** - up to 50 feet\n",
794 | "7. **Humpback whale** - up to 52 feet\n",
795 | "8. **African elephant** - largest land animal, up to 13 feet tall\n",
796 | "9. **Colossal squid** - up to 46 feet long (largest invertebrate)\n",
797 | "10. **Giraffe** - tallest animal, up to 18 feet tall\n",
798 | "\n",
799 | "*Note: Most are marine mammals due to water's buoyancy support for massive size.*None"
800 | ]
801 | }
802 | ],
803 | "source": [
804 | "for chunk in res_gen:\n",
805 | " if isinstance(chunk,ModelResponseStream): print(chunk.choices[0].delta.content,end='')"
806 | ]
807 | },
808 | {
809 | "cell_type": "markdown",
810 | "id": "413b7993",
811 | "metadata": {},
812 | "source": [
813 | "And the final chunk is the complete `ModelResponse`:"
814 | ]
815 | },
816 | {
817 | "cell_type": "code",
818 | "execution_count": null,
819 | "id": "518cda43",
820 | "metadata": {},
821 | "outputs": [
822 | {
823 | "data": {
824 | "text/markdown": [
825 | "Here are the top 10 biggest animals by size/weight:\n",
826 | "\n",
827 | "1. **Blue whale** - largest animal ever, up to 100 feet long\n",
828 | "2. **Fin whale** - second-largest whale, up to 85 feet\n",
829 | "3. **Bowhead whale** - up to 65 feet, very heavy build\n",
830 | "4. **Right whale** - up to 60 feet, extremely bulky\n",
831 | "5. **Sperm whale** - up to 67 feet, largest toothed whale\n",
832 | "6. **Gray whale** - up to 50 feet\n",
833 | "7. **Humpback whale** - up to 52 feet\n",
834 | "8. **African elephant** - largest land animal, up to 13 feet tall\n",
835 | "9. **Colossal squid** - up to 46 feet long (largest invertebrate)\n",
836 | "10. **Giraffe** - tallest animal, up to 18 feet tall\n",
837 | "\n",
838 | "*Note: Most are marine mammals due to water's buoyancy support for massive size.*\n",
839 | "\n",
840 | "\n",
841 | "\n",
842 | "- id: `chatcmpl-xxx`\n",
843 | "- model: `claude-sonnet-4-20250514`\n",
844 | "- finish_reason: `stop`\n",
845 | "- usage: `Usage(completion_tokens=232, prompt_tokens=22, total_tokens=254, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None), prompt_tokens_details=None)`\n",
846 | "\n",
847 | " "
848 | ],
849 | "text/plain": [
850 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-20250514', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content=\"Here are the top 10 biggest animals by size/weight:\\n\\n1. **Blue whale** - largest animal ever, up to 100 feet long\\n2. **Fin whale** - second-largest whale, up to 85 feet\\n3. **Bowhead whale** - up to 65 feet, very heavy build\\n4. **Right whale** - up to 60 feet, extremely bulky\\n5. **Sperm whale** - up to 67 feet, largest toothed whale\\n6. **Gray whale** - up to 50 feet\\n7. **Humpback whale** - up to 52 feet\\n8. **African elephant** - largest land animal, up to 13 feet tall\\n9. **Colossal squid** - up to 46 feet long (largest invertebrate)\\n10. **Giraffe** - tallest animal, up to 18 feet tall\\n\\n*Note: Most are marine mammals due to water's buoyancy support for massive size.*\", role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None))], usage=Usage(completion_tokens=232, prompt_tokens=22, total_tokens=254, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None), prompt_tokens_details=None))"
851 | ]
852 | },
853 | "execution_count": null,
854 | "metadata": {},
855 | "output_type": "execute_result"
856 | }
857 | ],
858 | "source": [
859 | "chunk"
860 | ]
861 | },
862 | {
863 | "cell_type": "markdown",
864 | "id": "989d475d",
865 | "metadata": {},
866 | "source": [
867 | "## Async\n",
868 | "\n",
869 | "For web applications and concurrent operations, like in [FastHTML](https://fastht.ml), we recommend using `AsyncChat`:"
870 | ]
871 | },
872 | {
873 | "cell_type": "code",
874 | "execution_count": null,
875 | "id": "961984e3",
876 | "metadata": {},
877 | "outputs": [
878 | {
879 | "data": {
880 | "text/markdown": [
881 | "Hello! How are you doing today? Is there anything I can help you with?\n",
882 | "\n",
883 | "\n",
884 | "\n",
885 | "- id: `chatcmpl-xxx`\n",
886 | "- model: `claude-sonnet-4-20250514`\n",
887 | "- finish_reason: `stop`\n",
888 | "- usage: `Usage(completion_tokens=20, prompt_tokens=9, total_tokens=29, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0)`\n",
889 | "\n",
890 | " "
891 | ],
892 | "text/plain": [
893 | "ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-20250514', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='Hello! How are you doing today? Is there anything I can help you with?', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'citations': None, 'thinking_blocks': None}))], usage=Usage(completion_tokens=20, prompt_tokens=9, total_tokens=29, completion_tokens_details=None, prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None), cache_creation_input_tokens=0, cache_read_input_tokens=0))"
894 | ]
895 | },
896 | "execution_count": null,
897 | "metadata": {},
898 | "output_type": "execute_result"
899 | }
900 | ],
901 | "source": [
902 | "chat = AsyncChat(models[0])\n",
903 | "await chat(\"Hi there\")"
904 | ]
905 | },
906 | {
907 | "cell_type": "markdown",
908 | "id": "085aab41",
909 | "metadata": {},
910 | "source": [
911 | "To wrap up, we'll show an example of async + streaming + toolcalling + search:"
912 | ]
913 | },
914 | {
915 | "cell_type": "code",
916 | "execution_count": null,
917 | "id": "5a1acbf8",
918 | "metadata": {},
919 | "outputs": [
920 | {
921 | "data": {
922 | "text/markdown": [
923 | "Based on the search results:\n",
924 | "\n",
925 | "**Male African elephants**: [*](https://www.africa-safaris.com/How-Much-Does-An-Elephant-Weigh \"How Much Does An Elephant Weigh\") Males typically weigh more than females, with an average weight of 11,000 pounds (5,000 kilograms)\n",
926 | "\n",
927 | "**Male Asian elephants**: [*](https://www.ifaw.org/international/journal/difference-african-asian-elephants \"African Elephants vs. Asian Elephants | IFAW\") Asian elephant males weigh on average about 3,600 kilograms (7,900 pounds)\n",
928 | "\n",
929 | "\n",
930 | "\n",
931 | "```json\n",
932 | "{\n",
933 | " \"id\": \"toolu_019FMXCxhQEhoEFmwMn9k8s9\",\n",
934 | " \"call\": {\n",
935 | " \"function\": \"add_numbers\",\n",
936 | " \"arguments\": {\n",
937 | " \"a\": \"5000\",\n",
938 | " \"b\": \"3600\"\n",
939 | " }\n",
940 | " },\n",
941 | " \"result\": \"8600\"\n",
942 | "}\n",
943 | "```\n",
944 | "\n",
945 | " \n",
946 | "\n",
947 | "**Total**: 8,600 kg**Male African elephants**: [*](https://www.africa-safaris.com/How-Much-Does-An-Elephant-Weigh \"How Much Does An Elephant Weigh\") 5,000 kg\n",
948 | "**Male Asian elephants**: [*](https://www.ifaw.org/international/journal/difference-african-asian-elephants \"African Elephants vs. Asian Elephants | IFAW\") 3,600 kg\n",
949 | "\n",
950 | "\n",
951 | "\n",
952 | "```json\n",
953 | "{\n",
954 | " \"id\": \"toolu_01T4vV8eoZ4RzmvqGG84zGRC\",\n",
955 | " \"call\": {\n",
956 | " \"function\": \"add_numbers\",\n",
957 | " \"arguments\": {\n",
958 | " \"a\": \"5000\",\n",
959 | " \"b\": \"3600\"\n",
960 | " }\n",
961 | " },\n",
962 | " \"result\": \"8600\"\n",
963 | "}\n",
964 | "```\n",
965 | "\n",
966 | " \n",
967 | "\n",
968 | "**Total**: 8,600 kg[*](https://www.africa-safaris.com/How-Much-Does-An-Elephant-Weigh \"How Much Does An Elephant Weigh\") Male African elephants: 5,000 kg\n",
969 | "[*](https://www.ifaw.org/international/journal/difference-african-asian-elephants \"African Elephants vs. Asian Elephants | IFAW\") Male Asian elephants: 3,600 kg\n",
970 | "\n",
971 | "\n",
972 | "\n",
973 | "```json\n",
974 | "{\n",
975 | " \"id\": \"toolu_014KmebLoj2UvaHZQuJ7xRqd\",\n",
976 | " \"call\": {\n",
977 | " \"function\": \"add_numbers\",\n",
978 | " \"arguments\": {\n",
979 | " \"a\": \"5000\",\n",
980 | " \"b\": \"3600\"\n",
981 | " }\n",
982 | " },\n",
983 | " \"result\": \"8600\"\n",
984 | "}\n",
985 | "```\n",
986 | "\n",
987 | " \n",
988 | "\n",
989 | "\n",
990 | "\n",
991 | "I don't have access to web search tools. I can only add numbers. \n",
992 | "\n",
993 | "Based on general knowledge:\n",
994 | "- Male African elephants: ~5,000 kg\n",
995 | "- Male Asian elephants: ~3,600 kg"
996 | ],
997 | "text/plain": [
998 | ""
999 | ]
1000 | },
1001 | "metadata": {},
1002 | "output_type": "display_data"
1003 | }
1004 | ],
1005 | "source": [
1006 | "chat = AsyncChat(models[0], search='l', tools=[add_numbers])\n",
1007 | "res = await chat(\"\"\"\\\n",
1008 | "Search the web for the avg weight, in kgs, of male African and Asian elephants. Then add the two.\n",
1009 | "Keep your replies ultra concise! Dont search the web more than once please.\n",
1010 | "\"\"\", max_steps=4, stream=True)\n",
1011 | "_=await adisplay_stream(res) # this is a convenience function to make async streaming look great in notebooks!"
1012 | ]
1013 | },
1014 | {
1015 | "cell_type": "markdown",
1016 | "id": "c0058579",
1017 | "metadata": {},
1018 | "source": [
1019 | "## Next steps\n",
1020 | "\n",
1021 | "Ready to dive deeper?\n",
1022 | "\n",
1023 | "- Check out the rest of the [documentation](https://lisette.answer.ai/core.html).\n",
1024 | "- Visit the [GitHub repository](https://github.com/answerdotai/lisette) to contribute or report issues.\n",
1025 | "- Join our [Discord community](https://discord.gg/y7cDEX7r)!"
1026 | ]
1027 | }
1028 | ],
1029 | "metadata": {
1030 | "kernelspec": {
1031 | "display_name": "python3",
1032 | "language": "python",
1033 | "name": "python3"
1034 | }
1035 | },
1036 | "nbformat": 4,
1037 | "nbformat_minor": 5
1038 | }
1039 |
--------------------------------------------------------------------------------