├── tests
├── __init__.py
├── conftest.py
├── test_messages.py
└── test_basic.py
├── imgs
└── logo.png
├── Makefile
├── pyproject.toml
├── .github
└── workflows
│ └── test.yml
├── LICENSE
├── demo.py
├── .gitignore
├── smartfunc
└── __init__.py
├── README.md
├── app.py
└── uv.lock
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/imgs/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/koaning/smartfunc/HEAD/imgs/logo.png
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | install:
2 | uv venv
3 | uv pip install -e .
4 | uv pip install pytest pytest-asyncio pytest-mock
5 |
6 | pypi: clean
7 | uv build
8 | uv publish
9 |
10 | check: clean
11 | uv run pytest tests
12 |
13 | clean:
14 | rm -rf __pycache__ .pytest_cache dist
15 |
16 | play: install
17 | uv run --with marimo marimo edit app.py
18 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "smartfunc"
3 | version = "1.0.0"
4 | description = "Turn functions into LLM-powered endpoints using OpenAI SDK"
5 | authors = []
6 | dependencies = [
7 | "pydantic>=2.0.0",
8 | "openai>=1.0.0",
9 | ]
10 |
11 | [build-system]
12 | requires = ["hatchling"]
13 | build-backend = "hatchling.build"
14 |
15 | [tool.pytest.ini_options]
16 | testpaths = ["tests"]
17 | python_files = ["test_*.py"]
18 | python_functions = ["test_*"]
19 |
20 | [project.optional-dependencies]
21 | test = [
22 | "pytest>=7.0.0",
23 | "pytest-asyncio>=0.21.0",
24 | "pytest-mock>=3.12.0",
25 | ]
26 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: Test
2 |
3 | on: [pull_request]
4 |
5 | permissions:
6 | contents: read
7 |
8 | jobs:
9 | test:
10 | runs-on: ubuntu-latest
11 | strategy:
12 | matrix:
13 | python-version: ["3.10", "3.11", "3.12", "3.13"]
14 | steps:
15 | - uses: actions/checkout@v4
16 | - name: Set up Python ${{ matrix.python-version }}
17 | uses: actions/setup-python@v5
18 | with:
19 | python-version: ${{ matrix.python-version }}
20 | - name: Install uv
21 | uses: astral-sh/setup-uv@v5
22 | - name: Install dependencies
23 | run: |
24 | make install
25 | - name: Run tests
26 | run: |
27 | make check
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 vincent d warmerdam
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/demo.py:
--------------------------------------------------------------------------------
1 | import marimo
2 |
3 | __generated_with = "0.17.7"
4 | app = marimo.App()
5 |
6 |
7 | @app.cell
8 | def _():
9 | import marimo as mo
10 | return (mo,)
11 |
12 |
13 | @app.cell(hide_code=True)
14 | def _(mo):
15 | mo.md(r"""
16 | ## Structured Output
17 | """)
18 | return
19 |
20 |
21 | @app.cell
22 | def _():
23 | from smartfunc import backend
24 | from openai import OpenAI
25 | from dotenv import load_dotenv
26 | from pydantic import BaseModel
27 |
28 | load_dotenv(".env")
29 |
30 | client = OpenAI()
31 |
32 | class Summary(BaseModel):
33 | summary: str
34 | pros: list[str]
35 | cons: list[str]
36 | return Summary, backend, client
37 |
38 |
39 | @app.cell
40 | def _(Summary, backend, client):
41 | @backend(client, model="gpt-4o-mini", response_format=Summary)
42 | def analyze_pokemon(name: str) -> str:
43 | return f"Describe the following pokemon: {name}"
44 | return (analyze_pokemon,)
45 |
46 |
47 | @app.cell
48 | def _(analyze_pokemon):
49 | result = analyze_pokemon("pikachu")
50 | print(result.summary)
51 | print(result.pros)
52 | print(result.cons)
53 | return
54 |
55 |
56 | @app.cell(hide_code=True)
57 | def _(mo):
58 | mo.md(r"""
59 | ## Images
60 |
61 | You can also return conversations, which let's you re-use OpenAI's SDK for messages.
62 | """)
63 | return
64 |
65 |
66 | @app.cell
67 | def _():
68 | url = "https://c02.purpledshub.com/uploads/sites/41/2023/01/How-to-see-the-Wolf-Moon-in-2023--4bb6bb7.jpg?w=940&webp=1"
69 | return (url,)
70 |
71 |
72 | @app.cell
73 | def _(backend, client):
74 | @backend(client, model="gpt-4o-mini")
75 | def desc_image(url: str) -> str:
76 | return [
77 | {
78 | "role": "user",
79 | "content": [
80 | {"type": "text", "text": "Describe the following image:"},
81 | {
82 | "type": "image_url",
83 | "image_url": {"url": url},
84 | },
85 | ],
86 | }
87 | ]
88 | return (desc_image,)
89 |
90 |
91 | @app.cell
92 | def _(desc_image, url):
93 | desc_image(url)
94 | return
95 |
96 |
97 | @app.cell
98 | def _():
99 | return
100 |
101 |
102 | if __name__ == "__main__":
103 | app.run()
104 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # UV
98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | #uv.lock
102 |
103 | # poetry
104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105 | # This is especially recommended for binary packages to ensure reproducibility, and is more
106 | # commonly ignored for libraries.
107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108 | #poetry.lock
109 |
110 | # pdm
111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112 | #pdm.lock
113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114 | # in version control.
115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116 | .pdm.toml
117 | .pdm-python
118 | .pdm-build/
119 |
120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121 | __pypackages__/
122 |
123 | # Celery stuff
124 | celerybeat-schedule
125 | celerybeat.pid
126 |
127 | # SageMath parsed files
128 | *.sage.py
129 |
130 | # Environments
131 | .env
132 | .venv
133 | env/
134 | venv/
135 | ENV/
136 | env.bak/
137 | venv.bak/
138 |
139 | # Spyder project settings
140 | .spyderproject
141 | .spyproject
142 |
143 | # Rope project settings
144 | .ropeproject
145 |
146 | # mkdocs documentation
147 | /site
148 |
149 | # mypy
150 | .mypy_cache/
151 | .dmypy.json
152 | dmypy.json
153 |
154 | # Pyre type checker
155 | .pyre/
156 |
157 | # pytype static type analyzer
158 | .pytype/
159 |
160 | # Cython debug symbols
161 | cython_debug/
162 |
163 | # PyCharm
164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166 | # and can be added to the global gitignore or merged into this file. For a more nuclear
167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168 | #.idea/
169 |
170 | # Ruff stuff:
171 | .ruff_cache/
172 |
173 | # PyPI configuration file
174 | .pypirc
175 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Pytest fixtures and mock clients for smartfunc tests."""
2 |
3 | import pytest
4 | from typing import Optional
5 |
6 |
7 | class MockMessage:
8 | """Mock OpenAI message object."""
9 |
10 | def __init__(self, content: str):
11 | self.content = content
12 |
13 |
14 | class MockChoice:
15 | """Mock OpenAI choice object."""
16 |
17 | def __init__(self, content: str):
18 | self.message = MockMessage(content)
19 |
20 |
21 | class MockCompletion:
22 | """Mock OpenAI completion response."""
23 |
24 | def __init__(self, content: str):
25 | self.choices = [MockChoice(content)]
26 |
27 |
28 | class MockCompletions:
29 | """Mock OpenAI completions interface."""
30 |
31 | def __init__(self, response_content: str):
32 | self.response_content = response_content
33 | self.calls = []
34 |
35 | def create(self, **kwargs):
36 | """Mock the create method and store call arguments."""
37 | self.calls.append(kwargs)
38 | return MockCompletion(self.response_content)
39 |
40 |
41 | class MockChat:
42 | """Mock OpenAI chat interface."""
43 |
44 | def __init__(self, response_content: str):
45 | self.completions = MockCompletions(response_content)
46 |
47 |
48 | class MockOpenAI:
49 | """Duck-typed mock OpenAI client for testing.
50 |
51 | This provides just enough interface to work with smartfunc's backend
52 | decorators while allowing inspection of calls made during tests.
53 |
54 | Args:
55 | response_content: The content string to return from API calls
56 |
57 | Example:
58 | >>> client = MockOpenAI(response_content='{"result": "test"}')
59 | >>> # Use with backend decorator
60 | >>> # Later, inspect calls:
61 | >>> client.chat.completions.calls[0]["model"]
62 | 'gpt-4o-mini'
63 | """
64 |
65 | def __init__(self, response_content: str = "test response"):
66 | self.response_content = response_content
67 | self.chat = MockChat(response_content)
68 |
69 | @property
70 | def calls(self):
71 | """Convenience property to access calls made to completions.create()."""
72 | return self.chat.completions.calls
73 |
74 |
75 | class MockAsyncCompletions:
76 | """Mock async OpenAI completions interface."""
77 |
78 | def __init__(self, response_content: str):
79 | self.response_content = response_content
80 | self.calls = []
81 |
82 | async def create(self, **kwargs):
83 | """Mock the async create method and store call arguments."""
84 | self.calls.append(kwargs)
85 | return MockCompletion(self.response_content)
86 |
87 |
88 | class MockAsyncChat:
89 | """Mock async OpenAI chat interface."""
90 |
91 | def __init__(self, response_content: str):
92 | self.completions = MockAsyncCompletions(response_content)
93 |
94 |
95 | class MockAsyncOpenAI:
96 | """Duck-typed mock AsyncOpenAI client for testing.
97 |
98 | Async version of MockOpenAI.
99 |
100 | Args:
101 | response_content: The content string to return from API calls
102 | """
103 |
104 | def __init__(self, response_content: str = "test response"):
105 | self.response_content = response_content
106 | self.chat = MockAsyncChat(response_content)
107 |
108 | @property
109 | def calls(self):
110 | """Convenience property to access calls made to completions.create()."""
111 | return self.chat.completions.calls
112 |
113 |
114 | @pytest.fixture
115 | def mock_client_factory():
116 | """Factory to create mock OpenAI clients with custom responses.
117 |
118 | Returns a function that creates MockOpenAI instances with the
119 | specified response content.
120 |
121 | Usage:
122 | def test_something(mock_client_factory):
123 | client = mock_client_factory("custom response")
124 | # or for JSON:
125 | client = mock_client_factory('{"key": "value"}')
126 | """
127 | def _create(response_content: str = "test response"):
128 | return MockOpenAI(response_content=response_content)
129 | return _create
130 |
131 |
132 | @pytest.fixture
133 | def async_mock_client_factory():
134 | """Factory to create async mock OpenAI clients with custom responses.
135 |
136 | Returns a function that creates MockAsyncOpenAI instances with the
137 | specified response content.
138 |
139 | Usage:
140 | async def test_something(async_mock_client_factory):
141 | client = async_mock_client_factory("custom response")
142 | """
143 | def _create(response_content: str = "test response"):
144 | return MockAsyncOpenAI(response_content=response_content)
145 | return _create
146 |
--------------------------------------------------------------------------------
/tests/test_messages.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pydantic import BaseModel
3 | from smartfunc import backend, async_backend
4 |
5 |
6 | class Summary(BaseModel):
7 | """Test model for structured output."""
8 | summary: str
9 |
10 |
11 | def test_message_list_basic(mock_client_factory):
12 | """Test function that returns a list of messages."""
13 | client = mock_client_factory()
14 |
15 | @backend(client, model="gpt-4o-mini")
16 | def chat_with_history(user_message: str) -> list:
17 | """Chat with conversation history."""
18 | return [
19 | {"role": "system", "content": "You are a helpful assistant."},
20 | {"role": "user", "content": "Hello"},
21 | {"role": "assistant", "content": "Hi! How can I help?"},
22 | {"role": "user", "content": user_message},
23 | ]
24 |
25 | result = chat_with_history("What's the weather?")
26 |
27 | assert result == "test response"
28 | messages = client.calls[0]["messages"]
29 | assert len(messages) == 4
30 | assert messages[0]["role"] == "system"
31 | assert messages[1]["role"] == "user"
32 | assert messages[2]["role"] == "assistant"
33 | assert messages[3]["role"] == "user"
34 | assert messages[3]["content"] == "What's the weather?"
35 |
36 |
37 | def test_message_list_ignores_system_param(mock_client_factory):
38 | """Test that system parameter is ignored when messages are provided."""
39 | client = mock_client_factory()
40 |
41 | @backend(client, model="gpt-4o-mini", system="This should be ignored")
42 | def chat() -> list:
43 | """Chat with custom messages."""
44 | return [
45 | {"role": "system", "content": "Custom system message"},
46 | {"role": "user", "content": "Hello"},
47 | ]
48 |
49 | result = chat()
50 |
51 | messages = client.calls[0]["messages"]
52 | assert len(messages) == 2
53 | assert messages[0]["content"] == "Custom system message"
54 |
55 |
56 | def test_multimodal_content(mock_client_factory):
57 | """Test function with multimodal content (text + image)."""
58 | client = mock_client_factory()
59 |
60 | @backend(client, model="gpt-4o-mini")
61 | def analyze_image(image_base64: str, question: str) -> list:
62 | """Analyze an image."""
63 | return [
64 | {
65 | "role": "user",
66 | "content": [
67 | {"type": "text", "text": question},
68 | {
69 | "type": "image_url",
70 | "image_url": {"url": f"data:image/jpeg;base64,{image_base64}"},
71 | },
72 | ],
73 | }
74 | ]
75 |
76 | result = analyze_image("iVBORw0KGgo...", "What's in this image?")
77 |
78 | assert result == "test response"
79 | messages = client.calls[0]["messages"]
80 | content = messages[0]["content"]
81 | assert isinstance(content, list)
82 | assert len(content) == 2
83 | assert content[0]["type"] == "text"
84 | assert content[0]["text"] == "What's in this image?"
85 | assert content[1]["type"] == "image_url"
86 | assert "data:image/jpeg;base64" in content[1]["image_url"]["url"]
87 |
88 |
89 | def test_invalid_return_type(mock_client_factory):
90 | """Test that invalid return types raise errors."""
91 | client = mock_client_factory()
92 |
93 | @backend(client, model="gpt-4o-mini")
94 | def bad_function() -> str:
95 | return {"invalid": "dict"} # Not string or list
96 |
97 | with pytest.raises(ValueError, match="must return either a string prompt or a list"):
98 | bad_function()
99 |
100 |
101 | def test_message_list_with_structured_output(mock_client_factory):
102 | """Test message list with structured output format."""
103 | client = mock_client_factory('{"summary": "conversation summary"}')
104 |
105 | @backend(client, model="gpt-4o-mini", response_format=Summary)
106 | def summarize_conversation() -> list:
107 | """Summarize a conversation."""
108 | return [
109 | {"role": "user", "content": "Hello"},
110 | {"role": "assistant", "content": "Hi there!"},
111 | {"role": "user", "content": "Summarize our conversation"},
112 | ]
113 |
114 | result = summarize_conversation()
115 |
116 | assert isinstance(result, Summary)
117 | assert result.summary == "conversation summary"
118 |
119 |
120 | @pytest.mark.asyncio
121 | async def test_async_message_list(async_mock_client_factory):
122 | """Test async backend with message list."""
123 | client = async_mock_client_factory()
124 |
125 | @async_backend(client, model="gpt-4o-mini")
126 | def chat() -> list:
127 | """Async chat."""
128 | return [
129 | {"role": "user", "content": "Hello"},
130 | ]
131 |
132 | result = await chat()
133 |
134 | assert result == "test response"
135 | messages = client.calls[0]["messages"]
136 | assert len(messages) == 1
137 |
--------------------------------------------------------------------------------
/tests/test_basic.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pydantic import BaseModel
3 | from smartfunc import backend, async_backend
4 |
5 |
6 | class Summary(BaseModel):
7 | """Test model for structured output."""
8 | summary: str
9 | pros: list[str]
10 | cons: list[str]
11 |
12 |
13 | def test_basic_string_output(mock_client_factory):
14 | """Test basic function that returns a string."""
15 | client = mock_client_factory()
16 |
17 | @backend(client, model="gpt-4o-mini")
18 | def generate_text(topic: str) -> str:
19 | """Generate some text."""
20 | return f"Write about {topic}"
21 |
22 | result = generate_text("testing")
23 |
24 | assert result == "test response"
25 | assert len(client.calls) == 1
26 | assert client.calls[0]["model"] == "gpt-4o-mini"
27 | assert client.calls[0]["messages"][0]["role"] == "user"
28 | assert client.calls[0]["messages"][0]["content"] == "Write about testing"
29 |
30 |
31 | def test_structured_output(mock_client_factory):
32 | """Test function with structured Pydantic output."""
33 | client = mock_client_factory('{"summary": "test", "pros": ["a", "b"], "cons": ["c"]}')
34 |
35 | @backend(client, model="gpt-4o-mini", response_format=Summary)
36 | def summarize(text: str) -> Summary:
37 | """Summarize text."""
38 | return f"Summarize: {text}"
39 |
40 | result = summarize("pokemon")
41 |
42 | assert isinstance(result, Summary)
43 | assert result.summary == "test"
44 | assert result.pros == ["a", "b"]
45 | assert result.cons == ["c"]
46 |
47 | # Verify response_format was set
48 | assert "response_format" in client.calls[0]
49 | assert client.calls[0]["response_format"]["type"] == "json_schema"
50 | schema = client.calls[0]["response_format"]["json_schema"]["schema"]
51 | assert schema["additionalProperties"] is False
52 |
53 |
54 | def test_system_prompt(mock_client_factory):
55 | """Test that system prompt is correctly passed."""
56 | client = mock_client_factory()
57 |
58 | @backend(client, model="gpt-4o-mini", system="You are helpful")
59 | def generate(prompt: str) -> str:
60 | return prompt
61 |
62 | result = generate("test")
63 |
64 | assert len(client.calls[0]["messages"]) == 2
65 | assert client.calls[0]["messages"][0]["role"] == "system"
66 | assert client.calls[0]["messages"][0]["content"] == "You are helpful"
67 | assert client.calls[0]["messages"][1]["role"] == "user"
68 |
69 |
70 | def test_extra_kwargs(mock_client_factory):
71 | """Test that extra kwargs are passed to OpenAI API."""
72 | client = mock_client_factory()
73 |
74 | @backend(client, model="gpt-4o-mini", temperature=0.7, max_tokens=100)
75 | def generate(prompt: str) -> str:
76 | return prompt
77 |
78 | result = generate("test")
79 |
80 | assert client.calls[0]["temperature"] == 0.7
81 | assert client.calls[0]["max_tokens"] == 100
82 |
83 |
84 | def test_function_must_return_string(mock_client_factory):
85 | """Test that function must return a string or list."""
86 | client = mock_client_factory()
87 |
88 | @backend(client, model="gpt-4o-mini")
89 | def bad_function() -> str:
90 | return 123 # Not a string!
91 |
92 | with pytest.raises(ValueError, match="must return either a string prompt or a list"):
93 | bad_function()
94 |
95 |
96 | def test_run_method(mock_client_factory):
97 | """Test the run method for non-decorator usage."""
98 | client = mock_client_factory()
99 |
100 | backend_instance = backend(client, model="gpt-4o-mini")
101 |
102 | def generate(prompt: str) -> str:
103 | return f"Process: {prompt}"
104 |
105 | result = backend_instance.run(generate, "test")
106 |
107 | assert result == "test response"
108 | assert client.calls[0]["messages"][0]["content"] == "Process: test"
109 |
110 |
111 | @pytest.mark.asyncio
112 | async def test_async_basic(async_mock_client_factory):
113 | """Test async backend basic functionality."""
114 | client = async_mock_client_factory()
115 |
116 | @async_backend(client, model="gpt-4o-mini")
117 | def generate_text(topic: str) -> str:
118 | """Generate text async."""
119 | return f"Write about {topic}"
120 |
121 | result = await generate_text("testing")
122 |
123 | assert result == "test response"
124 | assert len(client.calls) == 1
125 |
126 |
127 | @pytest.mark.asyncio
128 | async def test_async_structured_output(async_mock_client_factory):
129 | """Test async backend with structured output."""
130 | client = async_mock_client_factory('{"summary": "async test", "pros": ["fast"], "cons": []}')
131 |
132 | @async_backend(client, model="gpt-4o-mini", response_format=Summary)
133 | def summarize(text: str) -> Summary:
134 | """Summarize async."""
135 | return f"Summarize: {text}"
136 |
137 | result = await summarize("pokemon")
138 |
139 | assert isinstance(result, Summary)
140 | assert result.summary == "async test"
141 | assert result.pros == ["fast"]
142 | schema = client.calls[0]["response_format"]["json_schema"]["schema"]
143 | assert schema["additionalProperties"] is False
144 |
145 |
146 | @pytest.mark.asyncio
147 | async def test_async_run_method(async_mock_client_factory):
148 | """Test the async run method."""
149 | client = async_mock_client_factory()
150 |
151 | backend_instance = async_backend(client, model="gpt-4o-mini")
152 |
153 | def generate(prompt: str) -> str:
154 | return f"Process: {prompt}"
155 |
156 | result = await backend_instance.run(generate, "test")
157 |
158 | assert result == "test response"
159 |
160 |
161 | def test_multiple_arguments(mock_client_factory):
162 | """Test function with multiple arguments."""
163 | client = mock_client_factory()
164 |
165 | @backend(client, model="gpt-4o-mini")
166 | def generate(topic: str, style: str, length: int) -> str:
167 | return f"Write a {length} word {style} piece about {topic}"
168 |
169 | result = generate("AI", "formal", 500)
170 |
171 | content = client.calls[0]["messages"][0]["content"]
172 | assert "Write a 500 word formal piece about AI" in content
173 |
174 |
175 | def test_complex_prompt_logic(mock_client_factory):
176 | """Test that function can have complex prompt generation logic."""
177 | client = mock_client_factory()
178 |
179 | @backend(client, model="gpt-4o-mini")
180 | def smart_generate(items: list[str], include_summary: bool) -> str:
181 | prompt = "Process these items:\n"
182 | for i, item in enumerate(items, 1):
183 | prompt += f"{i}. {item}\n"
184 |
185 | if include_summary:
186 | prompt += "\nProvide a summary at the end."
187 |
188 | return prompt
189 |
190 | result = smart_generate(["apple", "banana"], True)
191 |
192 | content = client.calls[0]["messages"][0]["content"]
193 | assert "1. apple" in content
194 | assert "2. banana" in content
195 | assert "summary" in content
196 |
--------------------------------------------------------------------------------
/smartfunc/__init__.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | from typing import Any, Callable, Optional, Type, Union, List, Dict
3 | from pydantic import BaseModel
4 | from openai import OpenAI, AsyncOpenAI
5 |
6 |
7 | def _disallow_additional_properties(schema: Any) -> Any:
8 | """Ensure every object schema explicitly forbids unknown properties (OpenAI requirement)."""
9 | if isinstance(schema, dict):
10 | if schema.get("type") == "object":
11 | schema["additionalProperties"] = False
12 | props = schema.get("properties")
13 | if isinstance(props, dict):
14 | for value in props.values():
15 | _disallow_additional_properties(value)
16 |
17 | items = schema.get("items")
18 | if isinstance(items, dict) or isinstance(items, list):
19 | _disallow_additional_properties(items)
20 |
21 | for keyword in ("allOf", "anyOf", "oneOf"):
22 | subschema = schema.get(keyword)
23 | if isinstance(subschema, list):
24 | for item in subschema:
25 | _disallow_additional_properties(item)
26 | elif isinstance(subschema, dict):
27 | _disallow_additional_properties(subschema)
28 |
29 | not_schema = schema.get("not")
30 | if isinstance(not_schema, dict):
31 | _disallow_additional_properties(not_schema)
32 |
33 | for defs_key in ("definitions", "$defs"):
34 | defs = schema.get(defs_key)
35 | if isinstance(defs, dict):
36 | for value in defs.values():
37 | _disallow_additional_properties(value)
38 |
39 | elif isinstance(schema, list):
40 | for item in schema:
41 | _disallow_additional_properties(item)
42 |
43 | return schema
44 |
45 |
46 | class backend:
47 | """Synchronous backend decorator for LLM-powered functions.
48 |
49 | This class provides a decorator that transforms a function into an LLM-powered
50 | endpoint. The function can return either:
51 | - A string that will be used as the user prompt
52 | - A list of message dictionaries for full conversation control
53 |
54 | The decorator handles calling the LLM and parsing the response.
55 |
56 | Features:
57 | - Works with any OpenAI SDK-compatible provider (OpenAI, OpenRouter, etc.)
58 | - Optional structured output validation using Pydantic models
59 | - Full control over prompt generation using Python
60 | - Support for multimodal content (images, audio, video via base64)
61 |
62 | Example:
63 | from openai import OpenAI
64 | from pydantic import BaseModel
65 |
66 | client = OpenAI()
67 |
68 | class Summary(BaseModel):
69 | summary: str
70 | pros: list[str]
71 |
72 | @backend(client, model="gpt-4o-mini", response_format=Summary)
73 | def generate_summary(text: str) -> Summary:
74 | '''Generate a summary of the following text.'''
75 | return f"Summarize this text: {text}"
76 |
77 | result = generate_summary("Some text here")
78 | print(result.summary)
79 | """
80 |
81 | def __init__(
82 | self,
83 | client: OpenAI,
84 | model: str,
85 | response_format: Optional[Type[BaseModel]] = None,
86 | system: Optional[str] = None,
87 | **kwargs
88 | ):
89 | """Initialize the backend with specific LLM configuration.
90 |
91 | Args:
92 | client: OpenAI client instance (or compatible client)
93 | model: Name/identifier of the model to use (e.g., "gpt-4o-mini")
94 | response_format: Optional Pydantic model for structured output
95 | system: Optional system prompt for the LLM
96 | **kwargs: Additional arguments passed to the OpenAI API (e.g., temperature, max_tokens)
97 | """
98 | self.client = client
99 | self.model = model
100 | self.response_format = response_format
101 | self.system = system
102 | self.kwargs = kwargs
103 |
104 | def __call__(self, func: Callable) -> Callable:
105 | @wraps(func)
106 | def wrapper(*args, **kwargs):
107 | # Call the function to get the prompt or messages
108 | result = func(*args, **kwargs)
109 |
110 | # Handle different return types
111 | if isinstance(result, str):
112 | # String: build messages with optional system prompt
113 | messages = []
114 | if self.system:
115 | messages.append({"role": "system", "content": self.system})
116 | messages.append({"role": "user", "content": result})
117 | elif isinstance(result, list):
118 | # List of messages: use directly
119 | # System prompt is ignored if messages are provided
120 | messages = result
121 | else:
122 | raise ValueError(
123 | f"Function {func.__name__} must return either a string prompt "
124 | f"or a list of message dictionaries, got {type(result).__name__}"
125 | )
126 |
127 | # Prepare API call kwargs
128 | call_kwargs = {
129 | "model": self.model,
130 | "messages": messages,
131 | **self.kwargs
132 | }
133 |
134 | # Add structured output if specified
135 | if self.response_format:
136 | schema = _disallow_additional_properties(
137 | self.response_format.model_json_schema()
138 | )
139 | call_kwargs["response_format"] = {
140 | "type": "json_schema",
141 | "json_schema": {
142 | "name": self.response_format.__name__,
143 | "schema": schema,
144 | "strict": True
145 | }
146 | }
147 |
148 | # Call OpenAI API
149 | response = self.client.chat.completions.create(**call_kwargs)
150 | response_text = response.choices[0].message.content
151 |
152 | # Parse response
153 | if self.response_format:
154 | return self.response_format.model_validate_json(response_text)
155 | else:
156 | return response_text
157 |
158 | return wrapper
159 |
160 | def run(self, func: Callable, *args, **kwargs):
161 | """Run a function through the backend without using it as a decorator.
162 |
163 | Args:
164 | func: The function to execute
165 | *args: Positional arguments to pass to the function
166 | **kwargs: Keyword arguments to pass to the function
167 |
168 | Returns:
169 | The result from the LLM (parsed according to response_format)
170 | """
171 | decorated_func = self(func)
172 | return decorated_func(*args, **kwargs)
173 |
174 |
175 | class async_backend:
176 | """Asynchronous backend decorator for LLM-powered functions.
177 |
178 | Similar to the synchronous `backend` class, but provides asynchronous execution.
179 | Use this when you need non-blocking LLM operations, typically in async web
180 | applications or for concurrent processing.
181 |
182 | The function can return either:
183 | - A string that will be used as the user prompt
184 | - A list of message dictionaries for full conversation control
185 |
186 | Features:
187 | - Async/await support for non-blocking operations
188 | - Works with any OpenAI SDK-compatible provider
189 | - Optional structured output validation using Pydantic models
190 | - Support for multimodal content (images, audio, video via base64)
191 |
192 | Example:
193 | from openai import AsyncOpenAI
194 | from pydantic import BaseModel
195 | import asyncio
196 |
197 | client = AsyncOpenAI()
198 |
199 | class Summary(BaseModel):
200 | summary: str
201 |
202 | @async_backend(client, model="gpt-4o-mini", response_format=Summary)
203 | async def generate_summary(text: str) -> Summary:
204 | '''Generate a summary.'''
205 | return f"Summarize: {text}"
206 |
207 | result = asyncio.run(generate_summary("text"))
208 | """
209 |
210 | def __init__(
211 | self,
212 | client: AsyncOpenAI,
213 | model: str,
214 | response_format: Optional[Type[BaseModel]] = None,
215 | system: Optional[str] = None,
216 | **kwargs
217 | ):
218 | """Initialize the async backend with specific LLM configuration.
219 |
220 | Args:
221 | client: AsyncOpenAI client instance (or compatible async client)
222 | model: Name/identifier of the model to use
223 | response_format: Optional Pydantic model for structured output
224 | system: Optional system prompt for the LLM
225 | **kwargs: Additional arguments passed to the OpenAI API
226 | """
227 | self.client = client
228 | self.model = model
229 | self.response_format = response_format
230 | self.system = system
231 | self.kwargs = kwargs
232 |
233 | def __call__(self, func: Callable) -> Callable:
234 | @wraps(func)
235 | async def wrapper(*args, **kwargs):
236 | # Call the function to get the prompt or messages
237 | result = func(*args, **kwargs)
238 |
239 | # Handle different return types
240 | if isinstance(result, str):
241 | # String: build messages with optional system prompt
242 | messages = []
243 | if self.system:
244 | messages.append({"role": "system", "content": self.system})
245 | messages.append({"role": "user", "content": result})
246 | elif isinstance(result, list):
247 | # List of messages: use directly
248 | # System prompt is ignored if messages are provided
249 | messages = result
250 | else:
251 | raise ValueError(
252 | f"Function {func.__name__} must return either a string prompt "
253 | f"or a list of message dictionaries, got {type(result).__name__}"
254 | )
255 |
256 | # Prepare API call kwargs
257 | call_kwargs = {
258 | "model": self.model,
259 | "messages": messages,
260 | **self.kwargs
261 | }
262 |
263 | # Add structured output if specified
264 | if self.response_format:
265 | schema = _disallow_additional_properties(
266 | self.response_format.model_json_schema()
267 | )
268 | call_kwargs["response_format"] = {
269 | "type": "json_schema",
270 | "json_schema": {
271 | "name": self.response_format.__name__,
272 | "schema": schema,
273 | "strict": True
274 | }
275 | }
276 |
277 | # Call OpenAI API
278 | response = await self.client.chat.completions.create(**call_kwargs)
279 | response_text = response.choices[0].message.content
280 |
281 | # Parse response
282 | if self.response_format:
283 | return self.response_format.model_validate_json(response_text)
284 | else:
285 | return response_text
286 |
287 | return wrapper
288 |
289 | async def run(self, func: Callable, *args, **kwargs):
290 | """Run a function through the backend without using it as a decorator.
291 |
292 | Args:
293 | func: The function to execute
294 | *args: Positional arguments to pass to the function
295 | **kwargs: Keyword arguments to pass to the function
296 |
297 | Returns:
298 | The result from the LLM (parsed according to response_format)
299 | """
300 | decorated_func = self(func)
301 | return await decorated_func(*args, **kwargs)
302 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ### smartfunc
4 |
5 | > Turn functions into LLM-powered endpoints using OpenAI SDK
6 |
7 | ## Installation
8 |
9 | ```bash
10 | uv pip install smartfunc
11 | ```
12 |
13 | ## What is this?
14 |
15 | Here is a nice example of what is possible with this library:
16 |
17 | ```python
18 | from smartfunc import backend
19 | from openai import OpenAI
20 |
21 | client = OpenAI()
22 |
23 | @backend(client, model="gpt-4o-mini")
24 | def generate_summary(text: str) -> str:
25 | return f"Generate a summary of the following text: {text}"
26 | ```
27 |
28 | The `generate_summary` function will now return a string with the summary of the text that you give it.
29 |
30 | ### Other providers
31 |
32 | Note that we're using the OpenAI SDK here but that doesn't mean that you have to use their LLM service. The OpenAI SDK is a standard these days that has support for *many* (if not *most*) providers these days. These include services like [Ollama](https://ollama.com/) for local models or to many cloud hosting providers like [OpenRouter](https://openrouter.ai/). Just make sure you set the `api_key` and the `base_url` parameters manually when you call `OpenAI()`.
33 |
34 | ```python
35 | OpenAI(
36 | api_key=os.getenv("OPENROUTER_API_KEY"),
37 | base_url="https://openrouter.ai/api/v1"
38 | )
39 | ```
40 |
41 |
42 | ## How does it work?
43 |
44 | This library uses the OpenAI SDK to interact with LLMs. Your function can return either a string (which becomes the prompt) or a list of message dictionaries (for full conversation control). The decorator handles calling the LLM and parsing the response.
45 |
46 | The key benefits of this approach:
47 |
48 | - **Works with any OpenAI SDK-compatible provider**: Use OpenAI, OpenRouter, or any provider with OpenAI-compatible APIs
49 | - **Full Python control**: Build prompts using Python (no template syntax to learn)
50 | - **Type-safe structured outputs**: Use Pydantic models for validated responses
51 | - **Async support**: Built-in async/await support for concurrent operations
52 | - **Conversation history**: Pass message lists for multi-turn conversations
53 | - **Multimodal support**: Include images, audio, and video via base64 encoding
54 | - **Simple and focused**: Does one thing well - turn functions into LLM calls
55 |
56 | ## Features
57 |
58 | ### Basic Usage
59 |
60 | The simplest way to use `smartfunc`:
61 |
62 | ```python
63 | from smartfunc import backend
64 | from openai import OpenAI
65 |
66 | client = OpenAI()
67 |
68 | @backend(client, model="gpt-4o-mini")
69 | def write_poem(topic: str) -> str:
70 | return f"Write a short poem about {topic}"
71 |
72 | print(write_poem("summer"))
73 | ```
74 |
75 | ### Structured Outputs
76 |
77 | Use Pydantic models to get validated, structured responses:
78 |
79 | ```python
80 | from smartfunc import backend
81 | from openai import OpenAI
82 | from pydantic import BaseModel
83 |
84 | client = OpenAI()
85 |
86 | class Summary(BaseModel):
87 | summary: str
88 | pros: list[str]
89 | cons: list[str]
90 |
91 | @backend(client, model="gpt-4o-mini", response_format=Summary)
92 | def analyze_pokemon(name: str) -> str:
93 | return f"Describe the following pokemon: {name}"
94 |
95 | result = analyze_pokemon("pikachu")
96 | print(result.summary)
97 | print(result.pros)
98 | print(result.cons)
99 | ```
100 |
101 | This will return a Pydantic model that might look like this:
102 |
103 | ```python
104 | Summary(
105 | summary='Pikachu is a small, electric-type Pokémon...',
106 | pros=['Iconic mascot', 'Strong electric attacks', 'Cute appearance'],
107 | cons=['Weak against ground-type moves', 'Limited evolution options']
108 | )
109 | ```
110 |
111 | ### System Prompts and Parameters
112 |
113 | You can confirm anything you like upfront in the client.
114 |
115 | ```python
116 | @backend(
117 | client,
118 | model="gpt-4o-mini",
119 | response_format=Summary,
120 | system="You are a Pokemon expert with 20 years of experience",
121 | temperature=0.7,
122 | max_tokens=500
123 | )
124 | def expert_analysis(pokemon: str) -> Summary:
125 | return f"Provide an expert analysis of {pokemon}"
126 | ```
127 |
128 | ### Async Support
129 |
130 | If you like working asynchronously, you can use `async_backend` for non-blocking operations. Beware that you may get throttled by the LLM provider if you send too many requests too quickly.
131 |
132 | ```python
133 | import asyncio
134 | from smartfunc import async_backend
135 | from openai import AsyncOpenAI
136 |
137 | client = AsyncOpenAI()
138 |
139 | @async_backend(client, model="gpt-4o-mini", response_format=Summary)
140 | async def analyze_async(pokemon: str) -> Summary:
141 | return f"Describe: {pokemon}"
142 |
143 | result = asyncio.run(analyze_async("charizard"))
144 | print(result)
145 | ```
146 |
147 | ### Complex Prompt Logic
148 |
149 | Since prompts are built with Python, you can use any logic you want:
150 |
151 | ```python
152 | @backend(client, model="gpt-4o-mini")
153 | def custom_prompt(items: list[str], style: str, include_summary: bool) -> str:
154 | """Generate with custom logic."""
155 | prompt = f"Write in {style} style:\n\n"
156 |
157 | for i, item in enumerate(items, 1):
158 | prompt += f"{i}. {item}\n"
159 |
160 | if include_summary:
161 | prompt += "\nProvide a brief summary at the end."
162 |
163 | return prompt
164 |
165 | result = custom_prompt(
166 | items=["First point", "Second point", "Third point"],
167 | style="formal",
168 | include_summary=True
169 | )
170 | ```
171 |
172 | ### Conversation History
173 |
174 | Instead of returning a string, you can return a list of message dictionaries to have full control over the conversation:
175 |
176 | ```python
177 | @backend(client, model="gpt-4o-mini")
178 | def chat_with_history(user_message: str, conversation_history: list) -> list:
179 | """Chat with conversation context."""
180 | messages = [
181 | {"role": "system", "content": "You are a helpful assistant."},
182 | ]
183 |
184 | # Add previous conversation
185 | messages.extend(conversation_history)
186 |
187 | # Add new user message
188 | messages.append({"role": "user", "content": user_message})
189 |
190 | return messages
191 |
192 | # Use it with conversation history
193 | history = [
194 | {"role": "user", "content": "What's your name?"},
195 | {"role": "assistant", "content": "I'm Claude, an AI assistant."},
196 | ]
197 |
198 | response = chat_with_history("What can you help me with?", history)
199 | print(response)
200 | ```
201 |
202 | Note: When you return a message list, the `system` parameter in the decorator is ignored.
203 |
204 | ### Multimodal Content (Images, Audio, Video)
205 |
206 | You can include images, audio, or video by passing them as base64-encoded content in your messages:
207 |
208 | ```python
209 | import base64
210 |
211 | @backend(client, model="gpt-4o-mini")
212 | def analyze_image(image_path: str, question: str) -> list:
213 | """Analyze an image with a question."""
214 | # Read and encode image
215 | with open(image_path, "rb") as f:
216 | image_data = base64.b64encode(f.read()).decode("utf-8")
217 |
218 | return [
219 | {
220 | "role": "user",
221 | "content": [
222 | {"type": "text", "text": question},
223 | {
224 | "type": "image_url",
225 | "image_url": {
226 | "url": f"data:image/jpeg;base64,{image_data}"
227 | },
228 | },
229 | ],
230 | }
231 | ]
232 |
233 | result = analyze_image("photo.jpg", "What's in this image?")
234 | print(result)
235 | ```
236 |
237 | You can also mix multiple media types:
238 |
239 | ```python
240 | @backend(client, model="gpt-4o-mini")
241 | def analyze_multiple_media(image1_path: str, image2_path: str) -> list:
242 | """Compare two images."""
243 | # Encode images
244 | with open(image1_path, "rb") as f:
245 | img1 = base64.b64encode(f.read()).decode("utf-8")
246 | with open(image2_path, "rb") as f:
247 | img2 = base64.b64encode(f.read()).decode("utf-8")
248 |
249 | return [
250 | {
251 | "role": "user",
252 | "content": [
253 | {"type": "text", "text": "Compare these images:"},
254 | {
255 | "type": "image_url",
256 | "image_url": {"url": f"data:image/jpeg;base64,{img1}"},
257 | },
258 | {
259 | "type": "image_url",
260 | "image_url": {"url": f"data:image/jpeg;base64,{img2}"},
261 | },
262 | ],
263 | }
264 | ]
265 |
266 | result = analyze_multiple_media("image1.jpg", "image2.jpg")
267 | ```
268 |
269 | For audio content:
270 |
271 | ```python
272 | @backend(client, model="gpt-4o-mini")
273 | def transcribe_audio(audio_path: str) -> list:
274 | """Transcribe audio content."""
275 | with open(audio_path, "rb") as f:
276 | audio_data = base64.b64encode(f.read()).decode("utf-8")
277 |
278 | return [
279 | {
280 | "role": "user",
281 | "content": [
282 | {"type": "text", "text": "Transcribe this audio:"},
283 | {
284 | "type": "input_audio",
285 | "input_audio": {
286 | "data": audio_data,
287 | "format": "wav" # or "mp3", "flac", etc.
288 | },
289 | },
290 | ],
291 | }
292 | ]
293 | ```
294 |
295 | ### Using OpenRouter
296 |
297 | OpenRouter provides access to hundreds of models through an OpenAI-compatible API:
298 |
299 | ```python
300 | from openai import OpenAI
301 | import os
302 |
303 | # OpenRouter client
304 | openrouter_client = OpenAI(
305 | api_key=os.getenv("OPENROUTER_API_KEY"),
306 | base_url="https://openrouter.ai/api/v1"
307 | )
308 |
309 | # Use Llama via OpenRouter
310 | @backend(openrouter_client, model="meta-llama/llama-3.1-70b", response_format=Summary)
311 | def analyze_with_llama(pokemon: str) -> Summary:
312 | return f"Analyze {pokemon}"
313 | ```
314 |
315 | ### Reusable Backend Configurations
316 |
317 | You can create reusable backend configurations:
318 |
319 | ```python
320 | from smartfunc import backend
321 | from openai import OpenAI
322 |
323 | client = OpenAI()
324 |
325 | # Create a configured backend
326 | gpt_mini = lambda **kwargs: backend(
327 | client,
328 | model="gpt-4o-mini",
329 | system="You are a helpful assistant",
330 | temperature=0.7,
331 | **kwargs
332 | )
333 |
334 | # Use it multiple times
335 | @gpt_mini(response_format=Summary)
336 | def summarize(text: str) -> Summary:
337 | return f"Summarize: {text}"
338 |
339 | @gpt_mini()
340 | def translate(text: str, language: str) -> str:
341 | return f"Translate '{text}' to {language}"
342 | ```
343 |
344 | ## Migration from v0.2.0
345 |
346 |
347 | If you're upgrading from v0.2.0, here are the key changes:
348 |
349 | ### What Changed
350 |
351 | 1. **Client injection required**: You now pass an OpenAI client instance instead of a model name string
352 | 2. **Functions return prompts**: Your function should return a string (the prompt), not use docstrings as templates
353 | 3. **`response_format` parameter**: Structured output is specified via `response_format=` instead of return type annotations
354 | 4. **No more Jinja2**: Prompts are built with Python, not templates
355 |
356 | ### Before (v0.2.0)
357 |
358 | ```python
359 | from smartfunc import backend
360 |
361 | @backend("gpt-4o-mini")
362 | def summarize(text: str) -> Summary:
363 | """Summarize: {{ text }}"""
364 | pass
365 | ```
366 |
367 | ### After (v1.0.0)
368 |
369 | ```python
370 | from smartfunc import backend
371 | from openai import OpenAI
372 |
373 | client = OpenAI()
374 |
375 | @backend(client, model="gpt-4o-mini", response_format=Summary)
376 | def summarize(text: str) -> Summary:
377 | """This is now actual documentation."""
378 | return f"Summarize: {text}"
379 | ```
380 |
381 | ### Why the Changes?
382 |
383 | - **Better type checking**: The `response_format` parameter doesn't interfere with type checkers
384 | - **More flexibility**: Full Python for prompt generation instead of Jinja2 templates
385 | - **Multi-provider support**: Works with any OpenAI SDK-compatible provider (OpenRouter, etc.)
386 | - **Explicit dependencies**: Client injection makes it clear what's being used
387 | - **Simpler codebase**: Removed magic template parsing
388 |
389 |
390 |
391 | ## Development
392 |
393 | Run tests:
394 |
395 | ```bash
396 | make check
397 | ```
398 |
399 | Or:
400 |
401 | ```bash
402 | uv run pytest tests
403 | ```
404 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | import marimo
2 |
3 | __generated_with = "0.11.17"
4 | app = marimo.App(width="medium")
5 |
6 |
7 | @app.cell(hide_code=True)
8 | def _():
9 | import marimo as mo
10 | import requests as rq
11 | from dotenv import load_dotenv
12 |
13 | load_dotenv(".env", override=True)
14 | return load_dotenv, mo, rq
15 |
16 |
17 | @app.cell(hide_code=True)
18 | def _(rq):
19 | url = "https://raw.githubusercontent.com/chalda-pnuzig/emojis.json/refs/heads/master/src/list.json"
20 |
21 | emoji = rq.get(url).json()['emojis']
22 | return emoji, url
23 |
24 |
25 | @app.cell(hide_code=True)
26 | def _():
27 | from instructor.batch import BatchJob
28 | from pydantic import BaseModel, Field
29 | from typing import Literal
30 |
31 | class EmojiDescription(BaseModel):
32 | terms: list[str] = Field(..., description="List of words/phrases that could fit the emoji. List can be long, around 10 examples.")
33 | description: str = Field(..., description="Describes the emoji at length. It does not describe what it looks like, but rather what the symbol could mean and what it is typically used for. Two, max three, sentences.")
34 | return BaseModel, BatchJob, EmojiDescription, Field, Literal
35 |
36 |
37 | @app.cell
38 | def _(EmojiDescription, cache, json, llm):
39 | model = llm.get_async_model("claude-3.5-haiku")
40 |
41 | async def get_info(e):
42 | resp = await model.prompt(f"What can you tell me about this emoji: {e['emoji']}", schema=EmojiDescription, log=True)
43 | cache[e['emoji']] = {**e, 'response': json.loads(await resp.text())}
44 | return cache[e['emoji']]
45 | return get_info, model
46 |
47 |
48 | @app.cell
49 | def _():
50 | from diskcache import Cache
51 |
52 | cache = Cache("emojidb")
53 | return Cache, cache
54 |
55 |
56 | @app.cell
57 | def _(cache, emoji):
58 | todo = [e for e in emoji if e['emoji'] not in cache][:250]
59 | len(todo), len(cache), len(emoji)
60 | return (todo,)
61 |
62 |
63 | @app.cell
64 | def _():
65 | # results = await async_map_with_retry(
66 | # items=todo,
67 | # func=get_info,
68 | # max_concurrency=5,
69 | # max_retries=3,
70 | # show_progress=True
71 | # )
72 | return
73 |
74 |
75 | @app.cell
76 | def _(cache):
77 | import polars as pl
78 | from lazylines import LazyLines
79 |
80 | pl.DataFrame(
81 | LazyLines([cache[k] for k in cache.iterkeys()])
82 | .mutate(
83 | desc=lambda d: d['response']['description'],
84 | terms=lambda d: d['response']['terms']
85 | )
86 | .drop("response")
87 | .show()
88 | )
89 | return LazyLines, pl
90 |
91 |
92 | @app.cell(hide_code=True)
93 | def _(mo):
94 | import asyncio
95 | import random
96 | import logging
97 | import tqdm
98 | from typing import List, Dict, Any, Callable, Optional
99 |
100 | async def process_with_retry(
101 | func,
102 | item,
103 | max_retries=3,
104 | initial_backoff=1.0,
105 | backoff_factor=2.0,
106 | jitter=0.1,
107 | timeout=None,
108 | on_success=None,
109 | on_failure=None,
110 | logger=None
111 | ):
112 | """Process a single item with retry logic and backoff."""
113 | logger = logger or logging.getLogger(__name__)
114 | attempts = 0
115 | last_exception = None
116 |
117 | while attempts <= max_retries:
118 | try:
119 | # Add timeout if specified
120 | if timeout is not None:
121 | result = await asyncio.wait_for(func(item), timeout=timeout)
122 | else:
123 | result = await func(item)
124 |
125 | # Call success callback if provided
126 | if on_success:
127 | on_success(item, result)
128 |
129 | return item, result, None
130 |
131 | except Exception as e:
132 | attempts += 1
133 | last_exception = e
134 |
135 | if attempts <= max_retries:
136 | # Calculate backoff time with jitter
137 | backoff_time = initial_backoff * (backoff_factor ** (attempts - 1))
138 | jitter_amount = backoff_time * jitter
139 | actual_backoff = backoff_time + random.uniform(-jitter_amount, jitter_amount)
140 | actual_backoff = max(0.1, actual_backoff) # Ensure minimum backoff
141 |
142 | logger.warning(
143 | f"Attempt {attempts}/{max_retries} failed for item {item}. "
144 | f"Retrying in {actual_backoff:.2f}s. Error: {str(e)}"
145 | )
146 |
147 | await asyncio.sleep(actual_backoff)
148 | else:
149 | if on_failure:
150 | on_failure(item, last_exception)
151 |
152 | logger.error(
153 | f"All {max_retries} retry attempts failed for item {item}. "
154 | f"Final error: {str(last_exception)}"
155 | )
156 |
157 | return item, None, last_exception
158 |
159 | async def async_map_worker(
160 | items,
161 | func,
162 | semaphore,
163 | max_retries=3,
164 | initial_backoff=1.0,
165 | backoff_factor=2.0,
166 | jitter=0.1,
167 | timeout=None,
168 | on_success=None,
169 | on_failure=None,
170 | logger=None
171 | ):
172 | """Map an async function over items with concurrency control."""
173 | async def bounded_process(item):
174 | async with semaphore:
175 | return await process_with_retry(
176 | func,
177 | item,
178 | max_retries,
179 | initial_backoff,
180 | backoff_factor,
181 | jitter,
182 | timeout,
183 | on_success,
184 | on_failure,
185 | logger
186 | )
187 |
188 | # Create tasks
189 | tasks = [bounded_process(item) for item in items]
190 | return tasks
191 |
192 | def async_map_with_retry(
193 | items: List[Dict[Any, Any]],
194 | func: Callable,
195 | max_concurrency: int = 10,
196 | max_retries: int = 3,
197 | initial_backoff: float = 1.0,
198 | backoff_factor: float = 2.0,
199 | jitter: float = 0.1,
200 | timeout: Optional[float] = None,
201 | on_success: Optional[Callable] = None,
202 | on_failure: Optional[Callable] = None,
203 | show_progress: bool = True,
204 | description: str = "Processing items",
205 | logger: Optional[logging.Logger] = None
206 | ):
207 | """
208 | Map an async function over a list of dictionaries with progress tracking and retry.
209 |
210 | Args:
211 | items: List of dictionaries to process
212 | func: Async function that takes a dictionary and returns a result
213 | max_concurrency: Maximum number of concurrent tasks
214 | max_retries: Maximum number of retry attempts
215 | initial_backoff: Initial backoff time in seconds
216 | backoff_factor: Multiplier for backoff on successive retries
217 | jitter: Random jitter factor to avoid thundering herd
218 | timeout: Maximum time to wait for a task to complete (None = wait forever)
219 | on_success: Callback function to run on successful processing
220 | on_failure: Callback function to run when an item fails after all retries
221 | show_progress: Whether to show progress bar
222 | description: Description for progress bar
223 | logger: Optional logger for detailed logging
224 |
225 | Returns:
226 | List of tuples (original_dict, result_or_None, exception_or_None)
227 | """
228 | logger = logger or logging.getLogger(__name__)
229 |
230 | async def main():
231 | # Create semaphore for concurrency control
232 | semaphore = asyncio.Semaphore(max_concurrency)
233 |
234 | # Get tasks
235 | tasks = await async_map_worker(
236 | items,
237 | func,
238 | semaphore,
239 | max_retries,
240 | initial_backoff,
241 | backoff_factor,
242 | jitter,
243 | timeout,
244 | on_success,
245 | on_failure,
246 | logger
247 | )
248 |
249 | # Set up progress bar if requested
250 | if show_progress:
251 | results = []
252 | with mo.status.progress_bar(total=len(tasks), title=description) as progress_bar:
253 | for task in asyncio.as_completed(tasks):
254 | result = await task
255 | results.append(result)
256 | progress_bar.update()
257 | return results
258 | else:
259 | # Without progress bar, just gather all results
260 | return await asyncio.gather(*tasks)
261 |
262 | return main()
263 | return (
264 | Any,
265 | Callable,
266 | Dict,
267 | List,
268 | Optional,
269 | async_map_with_retry,
270 | async_map_worker,
271 | asyncio,
272 | logging,
273 | process_with_retry,
274 | random,
275 | tqdm,
276 | )
277 |
278 |
279 | @app.cell
280 | def _():
281 | import inspect
282 | import json
283 | import llm
284 | from typing import TypeVar, get_type_hints
285 | from functools import wraps
286 | from jinja2 import Template
287 | from diskcache import Cache
288 | return Cache, Template, TypeVar, get_type_hints, inspect, json, llm, wraps
289 |
290 |
291 | @app.cell
292 | def _(
293 | BaseModel,
294 | Cache,
295 | Callable,
296 | Template,
297 | get_type_hints,
298 | inspect,
299 | json,
300 | llm,
301 | wraps,
302 | ):
303 | class backend:
304 | def __init__(self, name, system=None, cache=None, **kwargs):
305 | self.model = llm.get_model(name)
306 | self.system = system
307 | self.kwargs = kwargs
308 | self.cache = Cache(cache) if isinstance(cache, str) else cache
309 |
310 | def __call__(self, func: Callable) -> Callable:
311 | @wraps(func)
312 | def wrapper(*args, **kwargs):
313 | signature = inspect.signature(func)
314 | docstring = inspect.getdoc(func) or ""
315 | type_hints = get_type_hints(func)
316 |
317 | # We only support Pydantic now
318 | if type_hints.get('return', None):
319 | assert issubclass(type_hints.get('return', None), BaseModel), "Output type must be Pydantic class"
320 |
321 | # Create a dictionary of parameter types
322 | param_types = {name: param.default for name, param in signature.parameters.items()}
323 | bound_args = signature.bind(*args, **kwargs)
324 | bound_args.apply_defaults() # Apply default values for missing parameters
325 | all_kwargs = bound_args.arguments
326 |
327 | template = Template(docstring)
328 | formatted_docstring = template.render(**all_kwargs)
329 | cache_key = docstring + json.dumps(all_kwargs) + str(type_hints.get('return', None))
330 |
331 | if self.cache:
332 | if cache_key in self.cache:
333 | return self.cache[cache_key]
334 |
335 | # Call the prompt, with schema if given
336 | resp = self.model.prompt(
337 | formatted_docstring,
338 | system=self.system,
339 | schema=type_hints.get('return', None),
340 | **kwargs
341 | )
342 | if type_hints.get('return', None):
343 | out = json.loads(resp.text())
344 | out = resp.text()
345 |
346 | if self.cache:
347 | self.cache[cache_key] = out
348 | return out
349 |
350 | return wrapper
351 |
352 | def run(self, func, *args, **kwargs):
353 | new_func = self(func)
354 | return new_func(*args, **kwargs)
355 | return (backend,)
356 |
357 |
358 | @app.cell
359 | def _(
360 | BaseModel,
361 | Callable,
362 | Template,
363 | get_type_hints,
364 | inspect,
365 | json,
366 | llm,
367 | wraps,
368 | ):
369 | class async_backend:
370 | def __init__(self, name, system=None, **kwargs):
371 | self.model = llm.get_async_model(name)
372 | self.system = system
373 | self.kwargs = kwargs
374 |
375 | def __call__(self, func: Callable) -> Callable:
376 | @wraps(func)
377 | async def wrapper(*args, **kwargs):
378 | signature = inspect.signature(func)
379 | docstring = inspect.getdoc(func) or ""
380 | type_hints = get_type_hints(func)
381 |
382 | # We only support Pydantic now
383 | if type_hints.get('return', None):
384 | assert issubclass(type_hints.get('return', None), BaseModel), "Output type must be Pydantic class"
385 |
386 | # Create a dictionary of parameter types
387 | param_types = {name: param.default for name, param in signature.parameters.items()}
388 | bound_args = signature.bind(*args, **kwargs)
389 | bound_args.apply_defaults() # Apply default values for missing parameters
390 | all_kwargs = bound_args.arguments
391 |
392 | template = Template(docstring)
393 | formatted_docstring = template.render(**all_kwargs)
394 |
395 | # Call the prompt, with schema if given
396 | resp = await self.model.prompt(
397 | formatted_docstring,
398 | system=self.system,
399 | schema=type_hints.get('return', None),
400 | **kwargs
401 | )
402 | text = await resp.text()
403 | if type_hints.get('return', None):
404 | return json.loads(text)
405 | return text
406 |
407 | return wrapper
408 |
409 | async def run(self, func, *args, **kwargs):
410 | new_func = self(func)
411 | return new_func(*args, **kwargs)
412 | return (async_backend,)
413 |
414 |
415 | @app.cell
416 | def _(BaseModel, async_backend):
417 | class Out(BaseModel):
418 | result: int
419 |
420 | @async_backend("claude-3.5-haiku")
421 | def foobar(a, b) -> Out:
422 | """
423 | {{a}} + {{b}} =
424 | """
425 | return Out, foobar
426 |
427 |
428 | @app.cell
429 | async def _(foobar):
430 | await foobar(1, 2)
431 | return
432 |
433 |
434 | @app.cell
435 | def _():
436 | # @backend("claude-3.5-haiku")
437 | # async def _foobar(a, b) -> Out:
438 | # """
439 | # {{a}} + {{b}} =
440 | # """
441 |
442 | # _foobar(1, 2)
443 | return
444 |
445 |
446 | if __name__ == "__main__":
447 | app.run()
448 |
--------------------------------------------------------------------------------
/uv.lock:
--------------------------------------------------------------------------------
1 | version = 1
2 | revision = 3
3 | requires-python = ">=3.13"
4 |
5 | [[package]]
6 | name = "annotated-types"
7 | version = "0.7.0"
8 | source = { registry = "https://pypi.org/simple" }
9 | sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" }
10 | wheels = [
11 | { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" },
12 | ]
13 |
14 | [[package]]
15 | name = "anyio"
16 | version = "4.8.0"
17 | source = { registry = "https://pypi.org/simple" }
18 | dependencies = [
19 | { name = "idna" },
20 | { name = "sniffio" },
21 | ]
22 | sdist = { url = "https://files.pythonhosted.org/packages/a3/73/199a98fc2dae33535d6b8e8e6ec01f8c1d76c9adb096c6b7d64823038cde/anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", size = 181126, upload-time = "2025-01-05T13:13:11.095Z" }
23 | wheels = [
24 | { url = "https://files.pythonhosted.org/packages/46/eb/e7f063ad1fec6b3178a3cd82d1a3c4de82cccf283fc42746168188e1cdd5/anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a", size = 96041, upload-time = "2025-01-05T13:13:07.985Z" },
25 | ]
26 |
27 | [[package]]
28 | name = "certifi"
29 | version = "2025.1.31"
30 | source = { registry = "https://pypi.org/simple" }
31 | sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577, upload-time = "2025-01-31T02:16:47.166Z" }
32 | wheels = [
33 | { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393, upload-time = "2025-01-31T02:16:45.015Z" },
34 | ]
35 |
36 | [[package]]
37 | name = "colorama"
38 | version = "0.4.6"
39 | source = { registry = "https://pypi.org/simple" }
40 | sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
41 | wheels = [
42 | { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
43 | ]
44 |
45 | [[package]]
46 | name = "distro"
47 | version = "1.9.0"
48 | source = { registry = "https://pypi.org/simple" }
49 | sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" }
50 | wheels = [
51 | { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" },
52 | ]
53 |
54 | [[package]]
55 | name = "h11"
56 | version = "0.14.0"
57 | source = { registry = "https://pypi.org/simple" }
58 | sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418, upload-time = "2022-09-25T15:40:01.519Z" }
59 | wheels = [
60 | { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259, upload-time = "2022-09-25T15:39:59.68Z" },
61 | ]
62 |
63 | [[package]]
64 | name = "httpcore"
65 | version = "1.0.7"
66 | source = { registry = "https://pypi.org/simple" }
67 | dependencies = [
68 | { name = "certifi" },
69 | { name = "h11" },
70 | ]
71 | sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196, upload-time = "2024-11-15T12:30:47.531Z" }
72 | wheels = [
73 | { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551, upload-time = "2024-11-15T12:30:45.782Z" },
74 | ]
75 |
76 | [[package]]
77 | name = "httpx"
78 | version = "0.28.1"
79 | source = { registry = "https://pypi.org/simple" }
80 | dependencies = [
81 | { name = "anyio" },
82 | { name = "certifi" },
83 | { name = "httpcore" },
84 | { name = "idna" },
85 | ]
86 | sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" }
87 | wheels = [
88 | { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
89 | ]
90 |
91 | [[package]]
92 | name = "idna"
93 | version = "3.10"
94 | source = { registry = "https://pypi.org/simple" }
95 | sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" }
96 | wheels = [
97 | { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" },
98 | ]
99 |
100 | [[package]]
101 | name = "iniconfig"
102 | version = "2.0.0"
103 | source = { registry = "https://pypi.org/simple" }
104 | sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646, upload-time = "2023-01-07T11:08:11.254Z" }
105 | wheels = [
106 | { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892, upload-time = "2023-01-07T11:08:09.864Z" },
107 | ]
108 |
109 | [[package]]
110 | name = "jiter"
111 | version = "0.9.0"
112 | source = { registry = "https://pypi.org/simple" }
113 | sdist = { url = "https://files.pythonhosted.org/packages/1e/c2/e4562507f52f0af7036da125bb699602ead37a2332af0788f8e0a3417f36/jiter-0.9.0.tar.gz", hash = "sha256:aadba0964deb424daa24492abc3d229c60c4a31bfee205aedbf1acc7639d7893", size = 162604, upload-time = "2025-03-10T21:37:03.278Z" }
114 | wheels = [
115 | { url = "https://files.pythonhosted.org/packages/e7/1b/4cd165c362e8f2f520fdb43245e2b414f42a255921248b4f8b9c8d871ff1/jiter-0.9.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:2764891d3f3e8b18dce2cff24949153ee30c9239da7c00f032511091ba688ff7", size = 308197, upload-time = "2025-03-10T21:36:03.828Z" },
116 | { url = "https://files.pythonhosted.org/packages/13/aa/7a890dfe29c84c9a82064a9fe36079c7c0309c91b70c380dc138f9bea44a/jiter-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:387b22fbfd7a62418d5212b4638026d01723761c75c1c8232a8b8c37c2f1003b", size = 318160, upload-time = "2025-03-10T21:36:05.281Z" },
117 | { url = "https://files.pythonhosted.org/packages/6a/38/5888b43fc01102f733f085673c4f0be5a298f69808ec63de55051754e390/jiter-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d8da8629ccae3606c61d9184970423655fb4e33d03330bcdfe52d234d32f69", size = 341259, upload-time = "2025-03-10T21:36:06.716Z" },
118 | { url = "https://files.pythonhosted.org/packages/3d/5e/bbdbb63305bcc01006de683b6228cd061458b9b7bb9b8d9bc348a58e5dc2/jiter-0.9.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1be73d8982bdc278b7b9377426a4b44ceb5c7952073dd7488e4ae96b88e1103", size = 363730, upload-time = "2025-03-10T21:36:08.138Z" },
119 | { url = "https://files.pythonhosted.org/packages/75/85/53a3edc616992fe4af6814c25f91ee3b1e22f7678e979b6ea82d3bc0667e/jiter-0.9.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2228eaaaa111ec54b9e89f7481bffb3972e9059301a878d085b2b449fbbde635", size = 405126, upload-time = "2025-03-10T21:36:10.934Z" },
120 | { url = "https://files.pythonhosted.org/packages/ae/b3/1ee26b12b2693bd3f0b71d3188e4e5d817b12e3c630a09e099e0a89e28fa/jiter-0.9.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:11509bfecbc319459647d4ac3fd391d26fdf530dad00c13c4dadabf5b81f01a4", size = 393668, upload-time = "2025-03-10T21:36:12.468Z" },
121 | { url = "https://files.pythonhosted.org/packages/11/87/e084ce261950c1861773ab534d49127d1517b629478304d328493f980791/jiter-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f22238da568be8bbd8e0650e12feeb2cfea15eda4f9fc271d3b362a4fa0604d", size = 352350, upload-time = "2025-03-10T21:36:14.148Z" },
122 | { url = "https://files.pythonhosted.org/packages/f0/06/7dca84b04987e9df563610aa0bc154ea176e50358af532ab40ffb87434df/jiter-0.9.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17f5d55eb856597607562257c8e36c42bc87f16bef52ef7129b7da11afc779f3", size = 384204, upload-time = "2025-03-10T21:36:15.545Z" },
123 | { url = "https://files.pythonhosted.org/packages/16/2f/82e1c6020db72f397dd070eec0c85ebc4df7c88967bc86d3ce9864148f28/jiter-0.9.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:6a99bed9fbb02f5bed416d137944419a69aa4c423e44189bc49718859ea83bc5", size = 520322, upload-time = "2025-03-10T21:36:17.016Z" },
124 | { url = "https://files.pythonhosted.org/packages/36/fd/4f0cd3abe83ce208991ca61e7e5df915aa35b67f1c0633eb7cf2f2e88ec7/jiter-0.9.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e057adb0cd1bd39606100be0eafe742de2de88c79df632955b9ab53a086b3c8d", size = 512184, upload-time = "2025-03-10T21:36:18.47Z" },
125 | { url = "https://files.pythonhosted.org/packages/a0/3c/8a56f6d547731a0b4410a2d9d16bf39c861046f91f57c98f7cab3d2aa9ce/jiter-0.9.0-cp313-cp313-win32.whl", hash = "sha256:f7e6850991f3940f62d387ccfa54d1a92bd4bb9f89690b53aea36b4364bcab53", size = 206504, upload-time = "2025-03-10T21:36:19.809Z" },
126 | { url = "https://files.pythonhosted.org/packages/f4/1c/0c996fd90639acda75ed7fa698ee5fd7d80243057185dc2f63d4c1c9f6b9/jiter-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:c8ae3bf27cd1ac5e6e8b7a27487bf3ab5f82318211ec2e1346a5b058756361f7", size = 204943, upload-time = "2025-03-10T21:36:21.536Z" },
127 | { url = "https://files.pythonhosted.org/packages/78/0f/77a63ca7aa5fed9a1b9135af57e190d905bcd3702b36aca46a01090d39ad/jiter-0.9.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f0b2827fb88dda2cbecbbc3e596ef08d69bda06c6f57930aec8e79505dc17001", size = 317281, upload-time = "2025-03-10T21:36:22.959Z" },
128 | { url = "https://files.pythonhosted.org/packages/f9/39/a3a1571712c2bf6ec4c657f0d66da114a63a2e32b7e4eb8e0b83295ee034/jiter-0.9.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062b756ceb1d40b0b28f326cba26cfd575a4918415b036464a52f08632731e5a", size = 350273, upload-time = "2025-03-10T21:36:24.414Z" },
129 | { url = "https://files.pythonhosted.org/packages/ee/47/3729f00f35a696e68da15d64eb9283c330e776f3b5789bac7f2c0c4df209/jiter-0.9.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6f7838bc467ab7e8ef9f387bd6de195c43bad82a569c1699cb822f6609dd4cdf", size = 206867, upload-time = "2025-03-10T21:36:25.843Z" },
130 | ]
131 |
132 | [[package]]
133 | name = "openai"
134 | version = "1.66.2"
135 | source = { registry = "https://pypi.org/simple" }
136 | dependencies = [
137 | { name = "anyio" },
138 | { name = "distro" },
139 | { name = "httpx" },
140 | { name = "jiter" },
141 | { name = "pydantic" },
142 | { name = "sniffio" },
143 | { name = "tqdm" },
144 | { name = "typing-extensions" },
145 | ]
146 | sdist = { url = "https://files.pythonhosted.org/packages/d8/e1/b3e1fda1aa32d4f40d4de744e91de4de65c854c3e53c63342e4b5f9c5995/openai-1.66.2.tar.gz", hash = "sha256:9b3a843c25f81ee09b6469d483d9fba779d5c6ea41861180772f043481b0598d", size = 397041, upload-time = "2025-03-11T21:52:37.603Z" }
147 | wheels = [
148 | { url = "https://files.pythonhosted.org/packages/2c/6f/3315b3583ffe3e31c55b446cb22d2a7c235e65ca191674fffae62deb3c11/openai-1.66.2-py3-none-any.whl", hash = "sha256:75194057ee6bb8b732526387b6041327a05656d976fc21c064e21c8ac6b07999", size = 567268, upload-time = "2025-03-11T21:52:36.062Z" },
149 | ]
150 |
151 | [[package]]
152 | name = "packaging"
153 | version = "24.2"
154 | source = { registry = "https://pypi.org/simple" }
155 | sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950, upload-time = "2024-11-08T09:47:47.202Z" }
156 | wheels = [
157 | { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451, upload-time = "2024-11-08T09:47:44.722Z" },
158 | ]
159 |
160 | [[package]]
161 | name = "pluggy"
162 | version = "1.5.0"
163 | source = { registry = "https://pypi.org/simple" }
164 | sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955, upload-time = "2024-04-20T21:34:42.531Z" }
165 | wheels = [
166 | { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556, upload-time = "2024-04-20T21:34:40.434Z" },
167 | ]
168 |
169 | [[package]]
170 | name = "pydantic"
171 | version = "2.10.6"
172 | source = { registry = "https://pypi.org/simple" }
173 | dependencies = [
174 | { name = "annotated-types" },
175 | { name = "pydantic-core" },
176 | { name = "typing-extensions" },
177 | ]
178 | sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681, upload-time = "2025-01-24T01:42:12.693Z" }
179 | wheels = [
180 | { url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696, upload-time = "2025-01-24T01:42:10.371Z" },
181 | ]
182 |
183 | [[package]]
184 | name = "pydantic-core"
185 | version = "2.27.2"
186 | source = { registry = "https://pypi.org/simple" }
187 | dependencies = [
188 | { name = "typing-extensions" },
189 | ]
190 | sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443, upload-time = "2024-12-18T11:31:54.917Z" }
191 | wheels = [
192 | { url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709, upload-time = "2024-12-18T11:29:03.193Z" },
193 | { url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273, upload-time = "2024-12-18T11:29:05.306Z" },
194 | { url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027, upload-time = "2024-12-18T11:29:07.294Z" },
195 | { url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888, upload-time = "2024-12-18T11:29:09.249Z" },
196 | { url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738, upload-time = "2024-12-18T11:29:11.23Z" },
197 | { url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138, upload-time = "2024-12-18T11:29:16.396Z" },
198 | { url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025, upload-time = "2024-12-18T11:29:20.25Z" },
199 | { url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633, upload-time = "2024-12-18T11:29:23.877Z" },
200 | { url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404, upload-time = "2024-12-18T11:29:25.872Z" },
201 | { url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130, upload-time = "2024-12-18T11:29:29.252Z" },
202 | { url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946, upload-time = "2024-12-18T11:29:31.338Z" },
203 | { url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387, upload-time = "2024-12-18T11:29:33.481Z" },
204 | { url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453, upload-time = "2024-12-18T11:29:35.533Z" },
205 | { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186, upload-time = "2024-12-18T11:29:37.649Z" },
206 | ]
207 |
208 | [[package]]
209 | name = "pytest"
210 | version = "8.3.5"
211 | source = { registry = "https://pypi.org/simple" }
212 | dependencies = [
213 | { name = "colorama", marker = "sys_platform == 'win32'" },
214 | { name = "iniconfig" },
215 | { name = "packaging" },
216 | { name = "pluggy" },
217 | ]
218 | sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891, upload-time = "2025-03-02T12:54:54.503Z" }
219 | wheels = [
220 | { url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634, upload-time = "2025-03-02T12:54:52.069Z" },
221 | ]
222 |
223 | [[package]]
224 | name = "pytest-asyncio"
225 | version = "0.25.3"
226 | source = { registry = "https://pypi.org/simple" }
227 | dependencies = [
228 | { name = "pytest" },
229 | ]
230 | sdist = { url = "https://files.pythonhosted.org/packages/f2/a8/ecbc8ede70921dd2f544ab1cadd3ff3bf842af27f87bbdea774c7baa1d38/pytest_asyncio-0.25.3.tar.gz", hash = "sha256:fc1da2cf9f125ada7e710b4ddad05518d4cee187ae9412e9ac9271003497f07a", size = 54239, upload-time = "2025-01-28T18:37:58.729Z" }
231 | wheels = [
232 | { url = "https://files.pythonhosted.org/packages/67/17/3493c5624e48fd97156ebaec380dcaafee9506d7e2c46218ceebbb57d7de/pytest_asyncio-0.25.3-py3-none-any.whl", hash = "sha256:9e89518e0f9bd08928f97a3482fdc4e244df17529460bc038291ccaf8f85c7c3", size = 19467, upload-time = "2025-01-28T18:37:56.798Z" },
233 | ]
234 |
235 | [[package]]
236 | name = "pytest-mock"
237 | version = "3.15.1"
238 | source = { registry = "https://pypi.org/simple" }
239 | dependencies = [
240 | { name = "pytest" },
241 | ]
242 | sdist = { url = "https://files.pythonhosted.org/packages/68/14/eb014d26be205d38ad5ad20d9a80f7d201472e08167f0bb4361e251084a9/pytest_mock-3.15.1.tar.gz", hash = "sha256:1849a238f6f396da19762269de72cb1814ab44416fa73a8686deac10b0d87a0f", size = 34036, upload-time = "2025-09-16T16:37:27.081Z" }
243 | wheels = [
244 | { url = "https://files.pythonhosted.org/packages/5a/cc/06253936f4a7fa2e0f48dfe6d851d9c56df896a9ab09ac019d70b760619c/pytest_mock-3.15.1-py3-none-any.whl", hash = "sha256:0a25e2eb88fe5168d535041d09a4529a188176ae608a6d249ee65abc0949630d", size = 10095, upload-time = "2025-09-16T16:37:25.734Z" },
245 | ]
246 |
247 | [[package]]
248 | name = "smartfunc"
249 | version = "1.0.0"
250 | source = { editable = "." }
251 | dependencies = [
252 | { name = "openai" },
253 | { name = "pydantic" },
254 | ]
255 |
256 | [package.optional-dependencies]
257 | test = [
258 | { name = "pytest" },
259 | { name = "pytest-asyncio" },
260 | { name = "pytest-mock" },
261 | ]
262 |
263 | [package.metadata]
264 | requires-dist = [
265 | { name = "openai", specifier = ">=1.0.0" },
266 | { name = "pydantic", specifier = ">=2.0.0" },
267 | { name = "pytest", marker = "extra == 'test'", specifier = ">=7.0.0" },
268 | { name = "pytest-asyncio", marker = "extra == 'test'", specifier = ">=0.21.0" },
269 | { name = "pytest-mock", marker = "extra == 'test'", specifier = ">=3.12.0" },
270 | ]
271 | provides-extras = ["test"]
272 |
273 | [[package]]
274 | name = "sniffio"
275 | version = "1.3.1"
276 | source = { registry = "https://pypi.org/simple" }
277 | sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" }
278 | wheels = [
279 | { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" },
280 | ]
281 |
282 | [[package]]
283 | name = "tqdm"
284 | version = "4.67.1"
285 | source = { registry = "https://pypi.org/simple" }
286 | dependencies = [
287 | { name = "colorama", marker = "sys_platform == 'win32'" },
288 | ]
289 | sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" }
290 | wheels = [
291 | { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
292 | ]
293 |
294 | [[package]]
295 | name = "typing-extensions"
296 | version = "4.12.2"
297 | source = { registry = "https://pypi.org/simple" }
298 | sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321, upload-time = "2024-06-07T18:52:15.995Z" }
299 | wheels = [
300 | { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438, upload-time = "2024-06-07T18:52:13.582Z" },
301 | ]
302 |
--------------------------------------------------------------------------------