├── .github ├── ISSUE_TEMPLATE │ ├── feature-request.md │ ├── issue.md │ └── showcase-submission.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── linting.yml │ ├── python-versions.yml │ ├── tests.yml │ └── type-check.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── assets └── ClientAI.png ├── build_docs.py ├── clientai ├── __init__.py ├── _common_types.py ├── _constants.py ├── _typing.py ├── agent │ ├── __init__.py │ ├── config │ │ ├── __init__.py │ │ ├── defaults.py │ │ ├── models.py │ │ ├── steps.py │ │ └── tools.py │ ├── core │ │ ├── __init__.py │ │ ├── agent.py │ │ ├── context.py │ │ ├── execution.py │ │ ├── factory.py │ │ └── workflow.py │ ├── exceptions.py │ ├── formatting │ │ ├── __init__.py │ │ ├── formatter.py │ │ └── options.py │ ├── steps │ │ ├── __init__.py │ │ ├── base.py │ │ ├── decorators.py │ │ ├── registry.py │ │ └── types.py │ ├── tools │ │ ├── __init__.py │ │ ├── base.py │ │ ├── decorators.py │ │ ├── registry.py │ │ ├── selection │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ └── selector.py │ │ └── types.py │ ├── types │ │ ├── __init__.py │ │ ├── common.py │ │ ├── models.py │ │ └── protocols.py │ ├── utils │ │ ├── __init__.py │ │ ├── exceptions.py │ │ └── validation.py │ └── validation │ │ ├── __init__.py │ │ ├── base.py │ │ ├── exceptions.py │ │ ├── result.py │ │ └── types.py ├── ai_provider.py ├── client_ai.py ├── exceptions.py ├── groq │ ├── __init__.py │ ├── _typing.py │ └── provider.py ├── ollama │ ├── __init__.py │ ├── _typing.py │ ├── manager │ │ ├── __init__.py │ │ ├── config.py │ │ ├── core.py │ │ ├── exceptions.py │ │ └── platform_info.py │ └── provider.py ├── openai │ ├── __init__.py │ ├── _typing.py │ └── provider.py ├── py.typed └── replicate │ ├── __init__.py │ ├── _typing.py │ └── provider.py ├── docs ├── assets │ ├── ClientAI.png │ ├── benav_labs_banner.png │ └── logo.png ├── en │ ├── advanced │ │ ├── agent │ │ │ └── creating_run.md │ │ ├── client │ │ │ ├── groq_specific.md │ │ │ ├── ollama_specific.md │ │ │ ├── openai_specific.md │ │ │ └── replicate_specific.md │ │ ├── error_handling.md │ │ └── overview.md │ ├── api │ │ ├── agent │ │ │ ├── core │ │ │ │ ├── agent.md │ │ │ │ ├── context.md │ │ │ │ ├── execution.md │ │ │ │ └── workflow.md │ │ │ ├── steps │ │ │ │ ├── decorators.md │ │ │ │ ├── step.md │ │ │ │ └── types.md │ │ │ └── tools │ │ │ │ ├── registry.md │ │ │ │ ├── selector.md │ │ │ │ └── tool.md │ │ ├── client │ │ │ ├── ai_provider.md │ │ │ ├── clientai.md │ │ │ ├── ollama_manager │ │ │ │ ├── ollama_manager.md │ │ │ │ └── ollama_server_config.md │ │ │ └── specific_providers │ │ │ │ ├── groq_provider.md │ │ │ │ ├── ollama_provider.md │ │ │ │ ├── openai_provider.md │ │ │ │ └── replicate_provider.md │ │ └── overview.md │ ├── community │ │ ├── CODE_OF_CONDUCT.md │ │ ├── CONTRIBUTING.md │ │ ├── LICENSE.md │ │ ├── overview.md │ │ └── showcase_submission.md │ ├── examples │ │ ├── agent │ │ │ ├── code_analyzer.md │ │ │ ├── simple_qa.md │ │ │ ├── task_planner.md │ │ │ └── writing_assistant.md │ │ ├── client │ │ │ ├── ai_dungeon_master.md │ │ │ ├── simple_qa.md │ │ │ └── translator.md │ │ └── overview.md │ ├── learn │ │ └── overview.md │ └── usage │ │ ├── agent │ │ ├── context.md │ │ ├── creating_agents.md │ │ ├── tools.md │ │ ├── validation.md │ │ └── workflow_steps.md │ │ ├── client │ │ ├── chat_functionality.md │ │ ├── error_handling.md │ │ ├── initialization.md │ │ ├── multiple_providers.md │ │ └── text_generation.md │ │ ├── ollama_manager.md │ │ └── overview.md ├── index.md ├── pt │ ├── index.md │ ├── showcase.md │ └── usage │ │ ├── agent │ │ ├── context.md │ │ ├── creating_agents.md │ │ ├── tools.md │ │ ├── validation.md │ │ └── workflow_steps.md │ │ ├── client │ │ ├── chat_functionality.md │ │ ├── error_handling.md │ │ ├── initialization.md │ │ ├── multiple_providers.md │ │ └── text_generation.md │ │ ├── ollama_manager.md │ │ └── overview.md ├── showcase.md └── stylesheets │ └── extra.css ├── mkdocs.yml ├── mypy.ini ├── pyproject.toml └── tests ├── __init__.py ├── agent ├── __init__.py ├── test_agent.py ├── test_context.py ├── test_factory.py ├── test_tools.py └── test_workflow.py ├── groq ├── __init__.py ├── test_exceptions.py └── test_provider.py ├── ollama ├── __init__.py ├── test_exceptions.py ├── test_manager.py ├── test_ollama_server.py └── test_provider.py ├── openai ├── __init__.py ├── test_exceptions.py └── test_provider.py └── replicate ├── __init__.py ├── test_exceptions.py └── test_provider.py /.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: ClientAI Feature Request 3 | about: Suggest a new feature or improvement for ClientAI 4 | title: '[Feature]: ' 5 | labels: 'enhancement' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Feature Description** 11 | A clear and concise description of the feature you'd like to see 12 | 13 | **Use Case** 14 | Explain when and how you'd use this feature. Include example code if relevant: 15 | ```python 16 | # Example of how you'd like to use the feature 17 | ``` 18 | 19 | **Alternatives Considered** 20 | Have you considered any alternative solutions or workarounds? 21 | 22 | **Additional Context** 23 | Add any other context, mockups, or examples about the feature request here -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: ClientAI Issue 3 | about: Report a bug 4 | title: '[Bug]: ' 5 | labels: 'bug' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Description** 11 | Describe the bug. Explain what happened and what you expected to happen. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the issue: 15 | ```python 16 | # Your code here 17 | ``` 18 | 19 | **Environment** 20 | - ClientAI Version: 21 | - AI Provider: 22 | - Python Version: 23 | - OS: 24 | 25 | **Additional Context** 26 | Add any other context, screenshots, or details about the issue here. -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/showcase-submission.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: ClientAI Project Showcase 3 | about: Submit your project to be featured in the ClientAI showcase 4 | title: 'Showcase: ' 5 | labels: 'showcase' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Project Name** 11 | The name of your project 12 | 13 | **Category** 14 | Choose one: Tutorial, Open Source Project, Application, Commercial Service 15 | 16 | **Description** 17 | Brief description of what your project does (2-3 sentences) 18 | 19 | **Author** 20 | Your name or GitHub handle 21 | 22 | **AI Providers Used** 23 | List the providers your project uses (OpenAI, Groq, Replicate, Ollama, etc.) 24 | 25 | **Project Links** 26 | Relevant links based on your project type (Tutorial URL, Repository, Product URL, etc.) 27 | 28 | **ClientAI Features** 29 | What ClientAI features does your project use? (chat functionality, agent framework, etc.) 30 | 31 | **Additional Information** 32 | Any other details you'd like to share about your project -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # Pull Request Template for ClientAI 2 | 3 | ## Description 4 | Please provide a clear and concise description of what your pull request is about. 5 | 6 | ## Changes 7 | Briefly list the changes you've made. If applicable, also link any relevant issues or pull requests. 8 | 9 | ## Tests 10 | Describe the tests you added or modified to cover your changes, if applicable. 11 | 12 | ## Checklist 13 | - [ ] I have read the [CONTRIBUTING](CONTRIBUTING.md) document. 14 | - [ ] My code follows the code style of this project. 15 | - [ ] I have added necessary documentation (if appropriate). 16 | - [ ] I have added tests that cover my changes (if applicable). 17 | - [ ] All new and existing tests passed. 18 | 19 | ## Additional Notes 20 | Include any additional information that you think is important for reviewers to know. 21 | -------------------------------------------------------------------------------- /.github/workflows/linting.yml: -------------------------------------------------------------------------------- 1 | name: Linting 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | lint: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v3 10 | 11 | - name: Set up Python 12 | uses: actions/setup-python@v4 13 | with: 14 | python-version: 3.12 15 | 16 | - name: Install Poetry 17 | run: pip install poetry 18 | 19 | - name: Install dependencies 20 | run: poetry install --with dev 21 | 22 | - name: Run Ruff 23 | run: poetry run ruff check clientai -------------------------------------------------------------------------------- /.github/workflows/python-versions.yml: -------------------------------------------------------------------------------- 1 | name: Python Versions 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | python-version: ['3.9', '3.10', '3.11', '3.12'] 11 | 12 | steps: 13 | - uses: actions/checkout@v3 14 | - name: Set up Python ${{ matrix.python-version }} 15 | uses: actions/setup-python@v4 16 | with: 17 | python-version: ${{ matrix.python-version }} 18 | 19 | - name: Install Poetry 20 | run: pip install poetry 21 | 22 | - name: Install dependencies 23 | run: poetry install --with dev 24 | 25 | - name: Run tests 26 | run: poetry run pytest -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | tests: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - uses: actions/checkout@v3 11 | 12 | - name: Set up Python 13 | uses: actions/setup-python@v4 14 | with: 15 | python-version: 3.11 16 | 17 | - name: Install Poetry 18 | run: pip install poetry 19 | 20 | - name: Install dependencies 21 | run: poetry install --with dev 22 | 23 | - name: Run tests 24 | run: poetry run pytest -------------------------------------------------------------------------------- /.github/workflows/type-check.yml: -------------------------------------------------------------------------------- 1 | name: Type Checking 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | type-check: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - uses: actions/checkout@v3 11 | 12 | - name: Set up Python 13 | uses: actions/setup-python@v4 14 | with: 15 | python-version: 3.11 16 | 17 | - name: Install Poetry 18 | run: pip install poetry 19 | 20 | - name: Install dependencies 21 | run: poetry install --with dev 22 | 23 | - name: Install mypy 1.9.0 24 | run: poetry add mypy==1.9.0 --dev 25 | 26 | - name: Run mypy 27 | run: poetry run mypy clientai -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | poetry.lock 112 | src/poetry.lock 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | 164 | # Macos 165 | .DS_Store 166 | 167 | .ruff_cache 168 | 169 | # Notebooks 170 | *.ipynb -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | igor.magalhaes.r@gmail.com. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to ClientAI 2 | 3 | Thank you for your interest in contributing to ClientAI! This guide is meant to make it easy for you to get started. 4 | 5 | ## Setting Up Your Development Environment 6 | 7 | ### Cloning the Repository 8 | Start by forking and cloning the ClientAI repository: 9 | 10 | ```sh 11 | git clone https://github.com/YOUR-GITHUB-USERNAME/clientai.git 12 | ``` 13 | 14 | ### Using Poetry for Dependency Management 15 | ClientAI uses Poetry for managing dependencies. If you don't have Poetry installed, follow the instructions on the [official Poetry website](https://python-poetry.org/docs/). 16 | 17 | Once Poetry is installed, navigate to the cloned repository and install the dependencies: 18 | ```sh 19 | cd clientai 20 | poetry install 21 | ``` 22 | 23 | ### Activating the Virtual Environment 24 | Poetry creates a virtual environment for your project. Activate it using: 25 | 26 | ```sh 27 | poetry shell 28 | ``` 29 | 30 | ## Making Contributions 31 | 32 | ### Coding Standards 33 | - Follow PEP 8 guidelines. 34 | - Write meaningful tests for new features or bug fixes. 35 | 36 | ### Testing with Pytest 37 | ClientAI uses pytest for testing. Run tests using: 38 | ```sh 39 | poetry run pytest 40 | ``` 41 | 42 | ### Linting 43 | Use mypy for type checking: 44 | ```sh 45 | mypy clientai 46 | ``` 47 | 48 | Use ruff for style: 49 | ```sh 50 | ruff check --fix 51 | ruff format 52 | ``` 53 | 54 | Ensure your code passes linting before submitting. 55 | 56 | ## Submitting Your Contributions 57 | 58 | ### Creating a Pull Request 59 | After making your changes: 60 | 61 | - Push your changes to your fork. 62 | - Open a pull request with a clear description of your changes. 63 | - Update the README.md if necessary. 64 | 65 | 66 | ### Code Reviews 67 | - Address any feedback from code reviews. 68 | - Once approved, your contributions will be merged into the main branch. 69 | 70 | ## Code of Conduct 71 | Please adhere to our [Code of Conduct](CODE_OF_CONDUCT.md) to maintain a welcoming and inclusive environment. 72 | 73 | Thank you for contributing to ClientAI🚀 74 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Igor Benav 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /assets/ClientAI.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benavlabs/clientai/b5010727492fba38fecc160398f96173bad608da/assets/ClientAI.png -------------------------------------------------------------------------------- /build_docs.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | from dotenv import load_dotenv 5 | 6 | 7 | def build_docs(): 8 | load_dotenv() 9 | 10 | if not os.getenv("GOOGLE_ANALYTICS_KEY"): 11 | raise ValueError( 12 | "GOOGLE_ANALYTICS_KEY environment variable is not set" 13 | ) 14 | 15 | subprocess.run(["mkdocs", "build"], check=True) 16 | 17 | 18 | if __name__ == "__main__": 19 | build_docs() 20 | -------------------------------------------------------------------------------- /clientai/__init__.py: -------------------------------------------------------------------------------- 1 | from ._constants import ( 2 | GROQ_INSTALLED, 3 | OLLAMA_INSTALLED, 4 | OPENAI_INSTALLED, 5 | REPLICATE_INSTALLED, 6 | ) 7 | from .agent import create_agent 8 | from .client_ai import ClientAI 9 | 10 | __all__ = [ 11 | "ClientAI", 12 | "create_agent", 13 | "OPENAI_INSTALLED", 14 | "REPLICATE_INSTALLED", 15 | "OLLAMA_INSTALLED", 16 | "GROQ_INSTALLED", 17 | ] 18 | __version__ = "0.3.3" 19 | -------------------------------------------------------------------------------- /clientai/_common_types.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Iterator 4 | from typing import Any, Dict, TypeVar, Union 5 | 6 | JsonDict = Dict[str, Any] 7 | Message = Dict[str, str] 8 | 9 | T = TypeVar("T", covariant=True) 10 | S = TypeVar("S", covariant=True) 11 | R = TypeVar("R", covariant=True) 12 | 13 | GenericResponse = Union[R, T, Iterator[Union[R, S]]] 14 | -------------------------------------------------------------------------------- /clientai/_constants.py: -------------------------------------------------------------------------------- 1 | from importlib.util import find_spec 2 | 3 | OPENAI_INSTALLED = find_spec("openai") is not None 4 | REPLICATE_INSTALLED = find_spec("replicate") is not None 5 | OLLAMA_INSTALLED = find_spec("ollama") is not None 6 | GROQ_INSTALLED = find_spec("groq") is not None 7 | -------------------------------------------------------------------------------- /clientai/_typing.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Generic, List, Protocol, TypeVar, Union 2 | 3 | from ._common_types import GenericResponse, Message, R, S, T 4 | from .ollama._typing import ( 5 | OllamaChatResponse, 6 | OllamaResponse, 7 | OllamaStreamResponse, 8 | ) 9 | from .openai._typing import OpenAIResponse, OpenAIStreamResponse 10 | from .replicate._typing import ReplicateResponse, ReplicateStreamResponse 11 | 12 | ProviderResponse = Union[ 13 | OpenAIResponse, ReplicateResponse, OllamaResponse, OllamaChatResponse 14 | ] 15 | 16 | ProviderStreamResponse = Union[ 17 | OpenAIStreamResponse, ReplicateStreamResponse, OllamaStreamResponse 18 | ] 19 | 20 | 21 | class AIProviderProtocol(Protocol, Generic[R, T, S]): 22 | def generate_text( 23 | self, 24 | prompt: str, 25 | model: str, 26 | return_full_response: bool = False, 27 | stream: bool = False, 28 | **kwargs: Any, 29 | ) -> GenericResponse[R, T, S]: ... 30 | 31 | def chat( 32 | self, 33 | messages: List[Message], 34 | model: str, 35 | return_full_response: bool = False, 36 | stream: bool = False, 37 | **kwargs: Any, 38 | ) -> GenericResponse[R, T, S]: ... 39 | 40 | 41 | P = TypeVar("P", bound=AIProviderProtocol) 42 | 43 | AIGenericResponse = GenericResponse[ 44 | str, ProviderResponse, ProviderStreamResponse 45 | ] 46 | -------------------------------------------------------------------------------- /clientai/agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .config.models import ModelConfig 2 | from .config.steps import StepConfig 3 | from .config.tools import ToolConfig 4 | from .core import Agent, AgentContext, create_agent 5 | from .steps import act, observe, run, synthesize, think 6 | from .tools import tool 7 | from .tools.selection.config import ToolSelectionConfig 8 | from .types import ToolScope 9 | 10 | __all__ = [ 11 | # Core 12 | "Agent", 13 | "AgentContext", 14 | "create_agent", 15 | # Configuration 16 | "ModelConfig", 17 | "StepConfig", 18 | "ToolConfig", 19 | "ToolSelectionConfig", 20 | # Steps 21 | "think", 22 | "act", 23 | "observe", 24 | "synthesize", 25 | "run", 26 | # Tools 27 | "tool", 28 | "ToolScope", 29 | ] 30 | -------------------------------------------------------------------------------- /clientai/agent/config/__init__.py: -------------------------------------------------------------------------------- 1 | from .models import ModelConfig 2 | from .steps import StepConfig 3 | from .tools import ToolConfig 4 | 5 | __all__ = [ 6 | "ModelConfig", 7 | "StepConfig", 8 | "ToolConfig", 9 | ] 10 | -------------------------------------------------------------------------------- /clientai/agent/config/defaults.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict 2 | 3 | from ..steps.types import StepType 4 | 5 | DEFAULT_STEP_CONFIGS: Dict[str, Dict[str, Any]] = { 6 | "think": { 7 | "temperature": 0.7, 8 | "top_p": 0.9, 9 | "step_type": StepType.THINK, 10 | }, 11 | "act": { 12 | "temperature": 0.2, 13 | "top_p": 0.8, 14 | "step_type": StepType.ACT, 15 | }, 16 | "observe": { 17 | "temperature": 0.1, 18 | "top_p": 0.5, 19 | "step_type": StepType.OBSERVE, 20 | }, 21 | "synthesize": { 22 | "temperature": 0.4, 23 | "top_p": 0.7, 24 | "step_type": StepType.SYNTHESIZE, 25 | }, 26 | } 27 | """Default configurations for different step types. 28 | 29 | A mapping of step types to their default configuration parameters. 30 | Each step type has optimized settings for its specific purpose: 31 | 32 | - think: Higher temperature for creative analysis 33 | - act: Lower temperature for decisive actions 34 | - observe: Lowest temperature for accurate observations 35 | - synthesize: Moderate temperature for balanced summarization 36 | 37 | Example: 38 | Apply default configuration: 39 | ```python 40 | step_type = "think" 41 | config = DEFAULT_STEP_CONFIGS[step_type] 42 | print(config["temperature"]) # Output: 0.7 43 | print(config["top_p"]) # Output: 0.9 44 | ``` 45 | 46 | Notes: 47 | - Temperature controls randomness (0.0-1.0) 48 | - Top_p controls nucleus sampling (0.0-1.0) 49 | - Each type has settings optimized for its purpose 50 | - These defaults can be overridden in step configuration 51 | """ 52 | 53 | STEP_TYPE_DEFAULTS = { 54 | StepType.THINK: { 55 | "temperature": 0.7, 56 | "top_p": 0.9, 57 | }, 58 | StepType.ACT: { 59 | "temperature": 0.2, 60 | "top_p": 0.8, 61 | }, 62 | StepType.OBSERVE: { 63 | "temperature": 0.1, 64 | "top_p": 0.5, 65 | }, 66 | StepType.SYNTHESIZE: { 67 | "temperature": 0.4, 68 | "top_p": 0.7, 69 | }, 70 | } 71 | """Default parameters for each StepType enum value. 72 | 73 | Similar to DEFAULT_STEP_CONFIGS but keyed by StepType enum values instead 74 | of strings. Used internally when working directly with StepType enums. 75 | 76 | Example: 77 | Access defaults by step type: 78 | ```python 79 | from clientai.agent.steps.types import StepType 80 | 81 | config = STEP_TYPE_DEFAULTS[StepType.THINK] 82 | print(config["temperature"]) # Output: 0.7 83 | print(config["top_p"]) # Output: 0.9 84 | ``` 85 | 86 | Notes: 87 | - Matches DEFAULT_STEP_CONFIGS values 88 | - Used with StepType enum values 89 | - Provides type-safe access to defaults 90 | - Does not include step_type field (redundant with key) 91 | """ 92 | 93 | __all__ = ["DEFAULT_STEP_CONFIGS", "STEP_TYPE_DEFAULTS"] 94 | -------------------------------------------------------------------------------- /clientai/agent/config/steps.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Optional 3 | 4 | 5 | @dataclass 6 | class StepConfig: 7 | """Configuration settings for workflow step execution behavior. 8 | 9 | Controls how individual workflow steps are executed, 10 | including retry behavior, error handling, and result passing. 11 | 12 | Attributes: 13 | enabled: Whether the step is active and should be executed 14 | retry_count: Number of times to retry failed step execution 15 | timeout: Optional timeout in seconds for step execution 16 | required: Whether step failure should halt workflow execution 17 | pass_result: Whether step result should be passed to next step 18 | use_internal_retry: Whether to use internal retry mechanism 19 | 20 | Example: 21 | Create step configuration: 22 | ```python 23 | # Basic configuration 24 | config = StepConfig( 25 | enabled=True, 26 | retry_count=3, 27 | timeout=30.0, 28 | required=True 29 | ) 30 | 31 | # Configuration for optional step 32 | optional_config = StepConfig( 33 | enabled=True, 34 | required=False, 35 | pass_result=False 36 | ) 37 | 38 | # Configuration with retries disabled 39 | no_retry_config = StepConfig( 40 | retry_count=0, 41 | use_internal_retry=False 42 | ) 43 | ``` 44 | """ 45 | 46 | enabled: bool = True 47 | retry_count: int = 0 48 | timeout: Optional[float] = None 49 | required: bool = True 50 | pass_result: bool = True 51 | use_internal_retry: bool = True 52 | 53 | @classmethod 54 | def from_dict(cls, config: dict) -> "StepConfig": # pragma: no cover 55 | """Create step configuration from dictionary. 56 | 57 | Args: 58 | config: Dictionary containing configuration parameters. 59 | Only recognized parameters are used. 60 | 61 | Returns: 62 | StepConfig: New configuration instance 63 | with parameters from dictionary. 64 | 65 | Example: 66 | Create from dictionary: 67 | ```python 68 | config = StepConfig.from_dict({ 69 | "enabled": True, 70 | "retry_count": 3, 71 | "timeout": 30.0, 72 | "required": True, 73 | "unknown_param": "ignored" # This will be ignored 74 | }) 75 | ``` 76 | """ 77 | return cls( 78 | **{ 79 | k: v 80 | for k, v in config.items() 81 | if k in cls.__dataclass_fields__ 82 | } 83 | ) 84 | -------------------------------------------------------------------------------- /clientai/agent/config/tools.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Callable, FrozenSet, Optional, Union 3 | 4 | from ..tools.types import ToolScope 5 | from ..types import ToolProtocol 6 | 7 | 8 | @dataclass 9 | class ToolConfig: 10 | """Configuration for registering tools with an agent. 11 | 12 | Defines how a tool should be registered and used 13 | within an agent's workflow, including its availability 14 | in different workflow scopes and metadata. 15 | 16 | Attributes: 17 | tool: The callable function or tool protocol instance to register 18 | scopes: Set of workflow scopes where the tool can be used 19 | name: Optional custom name for the tool 20 | description: Optional description of tool's functionality 21 | 22 | Example: 23 | Basic tool configuration: 24 | ```python 25 | def add(x: int, y: int) -> int: 26 | return x + y 27 | 28 | # Configure tool for all scopes 29 | config = ToolConfig( 30 | tool=add, 31 | name="Calculator", 32 | description="Adds two numbers" 33 | ) 34 | 35 | # Configure tool for specific scopes 36 | config = ToolConfig( 37 | tool=add, 38 | scopes=["think", "act"], 39 | name="Calculator" 40 | ) 41 | ``` 42 | 43 | Notes: 44 | - If no scopes are specified, defaults to ["all"] 45 | - Scopes can be strings or ToolScope enum values 46 | - Name defaults to function name if not provided 47 | """ 48 | 49 | tool: Union[Callable, ToolProtocol] 50 | scopes: FrozenSet[ToolScope] = field( 51 | default_factory=lambda: frozenset({ToolScope.ALL}) 52 | ) 53 | name: Optional[str] = None 54 | description: Optional[str] = None 55 | 56 | def __post_init__(self) -> None: 57 | """Validate and convert scope specifications. 58 | 59 | Converts string scope specifications to ToolScope enum values and 60 | ensures they are stored in a frozenset for immutability. 61 | 62 | Example: 63 | Scope conversion: 64 | ```python 65 | # Using string scopes 66 | config = ToolConfig(tool=func, scopes=["think", "act"]) 67 | 68 | # Using enum scopes 69 | config = ToolConfig( 70 | tool=func, 71 | scopes=[ToolScope.THINK, ToolScope.ACT] 72 | ) 73 | ``` 74 | """ 75 | if isinstance(self.scopes, (list, set)): # noqa: UP038 76 | self.scopes = frozenset( 77 | ToolScope.from_str(s.strip()) if isinstance(s, str) else s 78 | for s in self.scopes 79 | ) 80 | -------------------------------------------------------------------------------- /clientai/agent/core/__init__.py: -------------------------------------------------------------------------------- 1 | from clientai.agent.core.agent import Agent 2 | from clientai.agent.core.context import AgentContext 3 | from clientai.agent.core.execution import StepExecutionEngine 4 | from clientai.agent.core.factory import create_agent 5 | from clientai.agent.core.workflow import WorkflowManager 6 | 7 | __all__ = [ 8 | "Agent", 9 | "AgentContext", 10 | "StepExecutionEngine", 11 | "WorkflowManager", 12 | "create_agent", 13 | ] 14 | -------------------------------------------------------------------------------- /clientai/agent/exceptions.py: -------------------------------------------------------------------------------- 1 | from clientai.exceptions import ClientAIError 2 | 3 | 4 | class AgentError(ClientAIError): 5 | """Base exception class for agent-related errors. 6 | 7 | This exception is raised when there are general errors in agent operations, 8 | such as initialization failures or invalid configurations. 9 | """ 10 | 11 | pass 12 | 13 | 14 | class StepError(AgentError): 15 | """Exception raised for errors in step execution. 16 | 17 | This exception is raised when there are errors in executing workflow steps, 18 | such as invalid step configurations or execution failures. 19 | """ 20 | 21 | pass 22 | 23 | 24 | class WorkflowError(AgentError): 25 | """Exception raised for errors in workflow execution. 26 | 27 | This exception is raised when there are errors in managing 28 | or executing the overall workflow, such as invalid step 29 | sequences or workflow state issues. 30 | """ 31 | 32 | pass 33 | 34 | 35 | class ToolError(AgentError): 36 | """Exception raised for errors in tool execution. 37 | 38 | This exception is raised when there are errors in tool operations, 39 | such as invalid tool configurations, registration failures, or 40 | execution errors. 41 | """ 42 | 43 | pass 44 | -------------------------------------------------------------------------------- /clientai/agent/formatting/__init__.py: -------------------------------------------------------------------------------- 1 | from .formatter import AgentFormatter, FormatOptions 2 | 3 | __all__ = ["AgentFormatter", "FormatOptions"] 4 | -------------------------------------------------------------------------------- /clientai/agent/formatting/options.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Dict, Optional 3 | 4 | 5 | @dataclass 6 | class FormatOptions: 7 | """Configuration options for formatting agent string representations. 8 | 9 | Controls how agent information is formatted into strings, 10 | including indentation, line drawing characters, and text 11 | formatting constraints. 12 | 13 | Attributes: 14 | max_description_length: Maximum length for truncating descriptions 15 | indent: String to use for each level of indentation 16 | tree_chars: Optional mapping of tree drawing characters for formatting. 17 | Supported keys: 18 | - "vertical": Vertical line character (│) 19 | - "horizontal": Horizontal line character (─) 20 | - "branch": Branch character (├) 21 | - "corner": Corner character (└) 22 | - "top": Top junction character (╭) 23 | - "bottom": Bottom junction character (╰) 24 | 25 | Example: 26 | Basic formatting options: 27 | ```python 28 | # Default options 29 | options = FormatOptions() 30 | 31 | # Custom formatting 32 | options = FormatOptions( 33 | max_description_length=80, 34 | indent=" ", 35 | tree_chars={ 36 | "vertical": "│", 37 | "horizontal": "─", 38 | "branch": "├", 39 | "corner": "└", 40 | "top": "╭", 41 | "bottom": "╰" 42 | } 43 | ) 44 | ``` 45 | 46 | Notes: 47 | - If tree_chars is not provided, defaults to standard 48 | box-drawing characters 49 | - Description truncation adds "..." when exceeding 50 | max_description_length 51 | """ 52 | 53 | max_description_length: int = 60 54 | indent: str = " " 55 | tree_chars: Optional[Dict[str, str]] = None 56 | 57 | def __post_init__(self): 58 | """Initialize default tree drawing characters if not provided. 59 | 60 | Example: 61 | Default initialization: 62 | ```python 63 | options = FormatOptions() 64 | print(options.tree_chars["vertical"]) # Output: │ 65 | 66 | # Custom partial override 67 | options = FormatOptions(tree_chars={"vertical": "|"}) 68 | print(options.tree_chars["vertical"]) # Output: | 69 | print(options.tree_chars["horizontal"]) # Output: ─ 70 | ``` 71 | """ 72 | default_chars = { 73 | "vertical": "│", 74 | "horizontal": "─", 75 | "branch": "├", 76 | "corner": "└", 77 | "top": "╭", 78 | "bottom": "╰", 79 | } 80 | self.tree_chars = self.tree_chars or default_chars 81 | -------------------------------------------------------------------------------- /clientai/agent/steps/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Step 2 | from .decorators import act, observe, run, synthesize, think 3 | from .types import StepType 4 | 5 | __all__ = [ 6 | "think", 7 | "act", 8 | "observe", 9 | "synthesize", 10 | "run", 11 | "StepType", 12 | "Step", 13 | ] 14 | -------------------------------------------------------------------------------- /clientai/agent/steps/registry.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from typing import Dict, List, Optional, Set 3 | 4 | from .base import Step 5 | from .types import StepType 6 | 7 | 8 | class StepRegistry: 9 | """Registry for managing and organizing workflow steps. 10 | 11 | Maintains a registry of workflow steps with support for dependency 12 | tracking, type indexing, and step validation. Ensures steps are 13 | properly organized and can be executed in the correct order. 14 | 15 | Attributes: 16 | _steps: Dictionary mapping step names to their Step instances 17 | _type_index: Index mapping step types to sets of step names 18 | _dependency_graph: Graph tracking dependencies between steps 19 | 20 | Example: 21 | Basic registry usage: 22 | ```python 23 | registry = StepRegistry() 24 | 25 | # Register a step 26 | registry.register(analyze_step) 27 | 28 | # Get steps by type 29 | think_steps = registry.get_by_type(StepType.THINK) 30 | 31 | # Get step dependencies 32 | deps = registry.get_dependencies("process_step") 33 | print(deps) # Output: {"analyze_step"} 34 | ``` 35 | 36 | Notes: 37 | - Steps are stored with their dependencies for workflow ordering 38 | - Type indexing enables efficient step retrieval by type 39 | - Steps must have unique names within the registry 40 | """ 41 | 42 | def __init__(self) -> None: 43 | """ 44 | Initialize an empty step registry. 45 | 46 | Creates empty storage for tools and initializes scope indexing for 47 | all available tool scopes. 48 | """ 49 | self._steps: Dict[str, Step] = {} 50 | self._type_index: Dict[StepType, Set[str]] = defaultdict(set) 51 | self._dependency_graph: Dict[str, Set[str]] = defaultdict(set) 52 | 53 | def register(self, step: Step) -> None: 54 | """Register a step in the registry. 55 | 56 | Args: 57 | step: The step instance to register 58 | 59 | Raises: 60 | ValueError: If a step with the same name is already registered 61 | 62 | Example: 63 | Register a step: 64 | ```python 65 | registry = StepRegistry() 66 | try: 67 | registry.register(analyze_step) 68 | print("Step registered successfully") 69 | except ValueError as e: 70 | print(f"Registration failed: {e}") 71 | ``` 72 | """ 73 | if step.name in self._steps: 74 | raise ValueError(f"Step '{step.name}' already registered") 75 | 76 | self._steps[step.name] = step 77 | self._type_index[step.step_type].add(step.name) 78 | self._update_dependencies(step) 79 | 80 | def _update_dependencies(self, step: Step) -> None: 81 | """ 82 | Update step dependencies based on compatibility. 83 | 84 | Analyzes the new step's compatibility with existing steps and updates 85 | the dependency graph accordingly. 86 | 87 | Args: 88 | step: The step whose dependencies need to be updated. 89 | """ 90 | for existing_step in self._steps.values(): 91 | if step.is_compatible_with(existing_step): 92 | self._dependency_graph[step.name].add(existing_step.name) 93 | 94 | def get(self, name: str) -> Optional[Step]: 95 | """Retrieve a step by its name. 96 | 97 | Args: 98 | name: The name of the step to retrieve 99 | 100 | Returns: 101 | Optional[Step]: The requested step if found, None otherwise 102 | 103 | Example: 104 | Retrieve a step: 105 | ```python 106 | step = registry.get("analyze_step") 107 | if step: 108 | print(f"Found step: {step.name}") 109 | else: 110 | print("Step not found") 111 | ``` 112 | """ 113 | return self._steps.get(name) 114 | 115 | def get_by_type(self, step_type: StepType) -> List[Step]: 116 | """Retrieve all steps of a specific type. 117 | 118 | Args: 119 | step_type: The type of steps to retrieve 120 | 121 | Returns: 122 | List[Step]: List of steps matching the specified type 123 | 124 | Example: 125 | Get steps by type: 126 | ```python 127 | think_steps = registry.get_by_type(StepType.THINK) 128 | print(f"Found {len(think_steps)} thinking steps:") 129 | for step in think_steps: 130 | print(f"- {step.name}") 131 | ``` 132 | """ 133 | step_names = self._type_index[step_type] 134 | return [self._steps[name] for name in step_names] 135 | 136 | def get_dependencies(self, step_name: str) -> Set[str]: 137 | """Get names of steps that a step depends on. 138 | 139 | Args: 140 | step_name: The name of the step 141 | 142 | Returns: 143 | Set[str]: Set of step names this step depends on 144 | 145 | Example: 146 | Check dependencies: 147 | ```python 148 | deps = registry.get_dependencies("final_step") 149 | print(f"Dependencies: {', '.join(deps)}") # Output: "step1, step2" 150 | ``` 151 | """ 152 | return self._dependency_graph.get(step_name, set()) 153 | 154 | def remove(self, name: str) -> None: 155 | """Remove a step from the registry. 156 | 157 | Args: 158 | name: The name of the step to remove 159 | 160 | Example: 161 | Remove a step: 162 | ```python 163 | registry.remove("old_step") 164 | print(registry.get("old_step")) # Output: None 165 | ``` 166 | """ 167 | if name not in self._steps: 168 | return 169 | 170 | step = self._steps[name] 171 | del self._steps[name] 172 | self._type_index[step.step_type].discard(name) 173 | del self._dependency_graph[name] 174 | for deps in self._dependency_graph.values(): 175 | deps.discard(name) 176 | 177 | def clear(self) -> None: 178 | """Clear all registered steps and indexes. 179 | 180 | Example: 181 | Clear registry: 182 | ```python 183 | registry.clear() 184 | print(len(registry.get_by_type(StepType.THINK))) # Output: 0 185 | ``` 186 | """ 187 | self._steps.clear() 188 | self._type_index.clear() 189 | self._dependency_graph.clear() 190 | -------------------------------------------------------------------------------- /clientai/agent/steps/types.py: -------------------------------------------------------------------------------- 1 | from enum import Enum, auto 2 | from typing import Any, Protocol 3 | 4 | 5 | class StepType(Enum): 6 | """Type classification for workflow steps. 7 | 8 | Defines the different types of steps that can exist 9 | in a workflow, each representing a different kind 10 | of operation or phase in the agent's processing. 11 | 12 | Attributes: 13 | THINK: Analysis and reasoning steps that process information 14 | ACT: Decision-making and action steps that perform operations 15 | OBSERVE: Data collection and observation steps that gather information 16 | SYNTHESIZE: Integration steps that combine or summarize information 17 | 18 | Example: 19 | Using step types: 20 | ```python 21 | # Reference step types 22 | step_type = StepType.THINK 23 | print(step_type.name) # Output: "THINK" 24 | 25 | # Use in step decoration 26 | @think("analyze") # Uses StepType.THINK internally 27 | def analyze_data(self, input_data: str) -> str: 28 | return f"Analysis of {input_data}" 29 | 30 | # Compare step types 31 | if step.step_type == StepType.ACT: 32 | print("This is an action step") 33 | ``` 34 | 35 | Notes: 36 | - Each step type has default configurations (temperature, etc.) 37 | - Step types influence tool availability through scoping 38 | - Custom steps typically default to ACT type behavior 39 | """ 40 | 41 | THINK = auto() 42 | ACT = auto() 43 | OBSERVE = auto() 44 | SYNTHESIZE = auto() 45 | 46 | 47 | class StepFunction(Protocol): 48 | """Protocol defining the interface for step functions. 49 | 50 | Defines the required signature for callable objects that can serve as 51 | workflow steps, ensuring type safety and consistent behavior. 52 | 53 | Methods: 54 | __call__: Execute the step with provided input data 55 | 56 | Example: 57 | Implementing the protocol: 58 | ```python 59 | class MyStep: 60 | def __call__(self, input_data: Any) -> str: 61 | return f"Processed: {input_data}" 62 | 63 | # Function conforming to protocol 64 | def example_step(input_data: Any) -> str: 65 | return f"Handled: {input_data}" 66 | 67 | # Using with type checking 68 | def register_step(step: StepFunction) -> None: 69 | result = step("test input") 70 | print(result) 71 | ``` 72 | 73 | Notes: 74 | - Step functions must return strings 75 | - Input can be any type but should be documented 76 | - Protocol enables static type checking 77 | """ 78 | 79 | def __call__(self, input_data: Any) -> str: 80 | """Execute the step's processing logic. 81 | 82 | Args: 83 | input_data: Data to be processed by the step 84 | 85 | Returns: 86 | str: Result of step processing 87 | """ 88 | ... 89 | -------------------------------------------------------------------------------- /clientai/agent/tools/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Tool 2 | from .decorators import tool 3 | from .selection import ToolCallDecision, ToolSelectionConfig, ToolSelector 4 | 5 | __all__ = [ 6 | "Tool", 7 | "tool", 8 | "ToolCallDecision", 9 | "ToolSelectionConfig", 10 | "ToolSelector", 11 | ] 12 | -------------------------------------------------------------------------------- /clientai/agent/tools/decorators.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Optional, TypeVar 2 | 3 | from .base import Tool 4 | 5 | T = TypeVar("T") 6 | 7 | 8 | def tool( 9 | name: Optional[str] = None, 10 | description: Optional[str] = None, 11 | ) -> Callable[[Callable[..., T]], Tool]: 12 | """Decorator that converts a function into a Tool instance. 13 | 14 | Creates a Tool with optional custom name and description. If not provided, 15 | uses the function's name and docstring. The decorated function must have 16 | proper type hints. 17 | 18 | Args: 19 | name: Optional custom name for the tool. Defaults to function name. 20 | description: Optional custom description. Defaults to docstring. 21 | 22 | Returns: 23 | A decorator function that creates a Tool instance. 24 | 25 | Raises: 26 | ValueError: If the function lacks required type hints 27 | or has invalid signature. 28 | 29 | Example: 30 | Basic usage with automatic name and description: 31 | ```python 32 | @tool 33 | def calculate(x: int, y: int) -> int: 34 | '''Add two numbers together.''' 35 | return x + y 36 | ``` 37 | 38 | Custom name and description: 39 | ```python 40 | @tool( 41 | name="Calculator", 42 | description="Performs addition of two integers" 43 | ) 44 | def add_numbers(x: int, y: int) -> int: 45 | return x + y 46 | ``` 47 | 48 | Using the decorated tool: 49 | ```python 50 | result = calculate(5, 3) # Returns 8 51 | print(calculate.name) # Output: "calculate" 52 | ``` 53 | 54 | Notes: 55 | - Decorated function must have type hints for all parameters 56 | - Return type hint is required 57 | - Description defaults to function docstring if not provided 58 | - Tool inherits all function attributes and metadata 59 | """ 60 | 61 | if callable(name): 62 | return Tool.create(func=name) 63 | 64 | def decorator(func: Callable[..., T]) -> Tool: 65 | """Create a Tool instance from the decorated function. 66 | 67 | Args: 68 | func: The function to convert into a tool. 69 | 70 | Returns: 71 | A Tool instance wrapping the original function. 72 | 73 | Raises: 74 | ValueError: If function signature validation fails. 75 | """ 76 | return Tool.create( 77 | func=func, 78 | name=name, 79 | description=description, 80 | ) 81 | 82 | return decorator 83 | -------------------------------------------------------------------------------- /clientai/agent/tools/registry.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Optional, Set 2 | 3 | from ..config.tools import ToolConfig 4 | from .base import Tool 5 | from .types import ToolScope 6 | 7 | 8 | class ToolRegistry: 9 | """Registry for managing and organizing tools by name and scope. 10 | 11 | A centralized registry that maintains a collection of tools with 12 | efficient lookup by name and scope. It ensures unique tool names 13 | and proper scope indexing for quick access to tools available in 14 | different execution contexts. 15 | 16 | Attributes: 17 | _tools: Dictionary mapping tool names to Tool instances. 18 | _scope_index: Dictionary mapping scopes to sets of tool names. 19 | 20 | Example: 21 | ```python 22 | registry = ToolRegistry() 23 | 24 | # Register a tool with configuration 25 | config = ToolConfig( 26 | tool=calculator_func, 27 | scopes=["think", "act"], 28 | name="Calculator" 29 | ) 30 | registry.register(config) 31 | 32 | # Get tools for a scope 33 | think_tools = registry.get_for_scope("think") 34 | 35 | # Check if tool exists 36 | if "Calculator" in registry: 37 | tool = registry.get("Calculator") 38 | ``` 39 | """ 40 | 41 | def __init__(self) -> None: 42 | """ 43 | Initialize an empty tool registry. 44 | 45 | Creates empty storage for tools and initializes scope indexing for 46 | all available tool scopes. 47 | """ 48 | self._tools: Dict[str, Tool] = {} 49 | self._scope_index: Dict[ToolScope, Set[str]] = { 50 | scope: set() for scope in ToolScope 51 | } 52 | 53 | def register(self, tool_config: ToolConfig) -> None: 54 | """Register a new tool with the registry. 55 | 56 | Creates a Tool instance if needed and adds it to the registry with 57 | proper scope indexing. Handles scope inheritance for tools marked 58 | as available in all scopes. 59 | 60 | Args: 61 | tool_config: Configuration specifying the tool and its properties. 62 | 63 | Raises: 64 | ValueError: If a tool with the same name is already registered. 65 | 66 | Example: 67 | ```python 68 | registry = ToolRegistry() 69 | registry.register(ToolConfig( 70 | tool=my_tool, 71 | scopes=["think"], 72 | name="MyTool" 73 | )) 74 | ``` 75 | """ 76 | tool = ( 77 | tool_config.tool 78 | if isinstance(tool_config.tool, Tool) 79 | else Tool.create( 80 | func=tool_config.tool, 81 | name=tool_config.name, 82 | description=tool_config.description, 83 | ) 84 | ) 85 | 86 | if tool.name in self._tools: 87 | raise ValueError(f"Tool '{tool.name}' already registered") 88 | 89 | self._tools[tool.name] = tool 90 | 91 | for scope in tool_config.scopes: 92 | self._scope_index[scope].add(tool.name) 93 | if scope == ToolScope.ALL: 94 | for s in ToolScope: 95 | self._scope_index[s].add(tool.name) 96 | 97 | def get(self, name: str) -> Optional[Tool]: 98 | """Retrieve a tool by its name. 99 | 100 | Args: 101 | name: The name of the tool to retrieve. 102 | 103 | Returns: 104 | The requested Tool instance, or None if not found. 105 | 106 | Example: 107 | ```python 108 | tool = registry.get("Calculator") 109 | if tool: 110 | result = tool(5, 3) 111 | ``` 112 | """ 113 | return self._tools.get(name) 114 | 115 | def get_for_scope(self, scope: Optional[str] = None) -> List[Tool]: 116 | """Get all tools available in a specific scope. 117 | 118 | Args: 119 | scope: The scope to filter tools by. If None, returns all tools. 120 | 121 | Returns: 122 | List of Tool instances available in the specified scope. 123 | 124 | Raises: 125 | ValueError: If the specified scope is invalid. 126 | 127 | Example: 128 | ```python 129 | think_tools = registry.get_for_scope("think") 130 | all_tools = registry.get_for_scope(None) 131 | ``` 132 | """ 133 | if scope is None: 134 | return list(self._tools.values()) 135 | 136 | tool_scope = ToolScope.from_str(scope) 137 | return [self._tools[name] for name in self._scope_index[tool_scope]] 138 | 139 | def __contains__(self, name: str) -> bool: 140 | """ 141 | Check if a tool is registered by name. 142 | 143 | Args: 144 | name: The name of the tool to check. 145 | 146 | Returns: 147 | True if the tool is registered, False otherwise. 148 | 149 | Example: 150 | ```python 151 | if "Calculator" in registry: 152 | tool = registry.get("Calculator") 153 | ``` 154 | """ 155 | return name in self._tools 156 | 157 | def __len__(self) -> int: 158 | """ 159 | Get the total number of registered tools. 160 | 161 | Returns: 162 | Number of tools in the registry. 163 | 164 | Example: 165 | ```python 166 | print(f"Registry contains {len(registry)} tools") 167 | ``` 168 | """ 169 | return len(self._tools) 170 | 171 | 172 | def is_tool(obj: Any) -> bool: 173 | """Check if an object is a Tool instance. 174 | 175 | Args: 176 | obj: The object to check. 177 | 178 | Returns: 179 | True if the object is a Tool instance, False otherwise. 180 | 181 | Example: 182 | ```python 183 | if is_tool(obj): 184 | result = obj(5, 3) # We know obj is a Tool 185 | ``` 186 | """ 187 | return isinstance(obj, Tool) 188 | -------------------------------------------------------------------------------- /clientai/agent/tools/selection/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import ToolCallDecision, ToolSelectionConfig 2 | from .selector import ToolSelector 3 | 4 | __all__ = [ 5 | "ToolCallDecision", 6 | "ToolSelectionConfig", 7 | "ToolSelector", 8 | ] 9 | -------------------------------------------------------------------------------- /clientai/agent/types/__init__.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import Any, Callable, Protocol, TypeVar 3 | 4 | AgentInput = TypeVar("AgentInput") 5 | AgentOutput = TypeVar("AgentOutput") 6 | StepResult = TypeVar("StepResult") 7 | 8 | 9 | class ToolScope(str, Enum): 10 | THINK = "think" 11 | ACT = "act" 12 | OBSERVE = "observe" 13 | SYNTHESIZE = "synthesize" 14 | ALL = "all" 15 | 16 | @classmethod 17 | def from_str(cls, scope: str) -> "ToolScope": 18 | try: 19 | return cls[scope.upper()] 20 | except KeyError: 21 | valid = [s.value for s in cls] 22 | raise ValueError( 23 | f"Invalid scope: '{scope}'. Must be one of: {', '.join(valid)}" 24 | ) 25 | 26 | def __str__(self) -> str: 27 | return self.value 28 | 29 | 30 | class ToolProtocol(Protocol): 31 | func: Callable[..., Any] 32 | name: str 33 | description: str 34 | 35 | def __call__(self, *args: Any, **kwargs: Any) -> Any: ... 36 | 37 | 38 | __all__ = [ 39 | "AgentInput", 40 | "AgentOutput", 41 | "StepResult", 42 | "ToolScope", 43 | "ToolProtocol", 44 | ] 45 | -------------------------------------------------------------------------------- /clientai/agent/types/common.py: -------------------------------------------------------------------------------- 1 | """Type variables for agent input/output and results. 2 | 3 | This module defines generic type variables used throughout the agent system 4 | to provide type safety and clarity for agent operations, step executions, 5 | tool usage, and model interactions. 6 | """ 7 | 8 | from typing import Any, TypeVar 9 | 10 | AgentInput = TypeVar("AgentInput", bound=Any, contravariant=True) 11 | """Type variable representing input accepted by an agent. 12 | 13 | This is a contravariant type variable that can represent any type of input 14 | data that an agent can process. 15 | """ 16 | 17 | AgentOutput = TypeVar("AgentOutput", bound=Any, covariant=True) 18 | """Type variable representing output produced by an agent. 19 | 20 | This is a covariant type variable that can represent any type of output 21 | data that an agent can produce. 22 | """ 23 | 24 | StepResult = TypeVar("StepResult", bound=Any, covariant=True) 25 | """Type variable representing results from workflow steps. 26 | 27 | This is a covariant type variable that can represent any type of result 28 | produced by executing a workflow step. 29 | """ 30 | 31 | ToolResult = TypeVar("ToolResult", bound=Any, covariant=True) 32 | """Type variable representing results from tool executions. 33 | 34 | This is a covariant type variable that can represent any type of result 35 | produced by executing a tool. 36 | """ 37 | 38 | ModelResult = TypeVar("ModelResult", bound=Any) 39 | """Type variable representing results from model executions. 40 | 41 | This type variable can represent any type of result produced by 42 | executing a language model. 43 | """ 44 | -------------------------------------------------------------------------------- /clientai/agent/types/models.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, TypedDict, Union 2 | 3 | 4 | class ModelParameters(TypedDict, total=False): 5 | """Configuration parameters for language model execution. 6 | 7 | A TypedDict class defining the standard configuration parameters 8 | accepted by language models in the system. All fields are optional. 9 | 10 | Attributes: 11 | name: The name/identifier of the model. 12 | temperature: Sampling temperature between 0 and 1. 13 | max_tokens: Maximum number of tokens to generate. 14 | top_p: Nucleus sampling parameter between 0 and 1. 15 | frequency_penalty: Penalty for token frequency between -2.0 and 2.0. 16 | presence_penalty: Penalty for token presence between -2.0 and 2.0. 17 | stop: Stop sequences for generation, either a single string or list. 18 | stream: Whether to stream responses or return complete. 19 | extra: Additional model-specific parameters. 20 | 21 | Example: 22 | ```python 23 | params: ModelParameters = { 24 | "name": "gpt-4", 25 | "temperature": 0.7, 26 | "max_tokens": 100, 27 | "stream": True 28 | } 29 | ``` 30 | 31 | Notes: 32 | - All fields are optional (total=False) 33 | - temperature and top_p affect randomness of outputs 34 | - frequency_penalty and presence_penalty affect repetition 35 | - stop can be either a single string or list of strings 36 | - extra allows for model-specific parameters not in standard config 37 | """ 38 | 39 | name: str 40 | temperature: float 41 | max_tokens: int 42 | top_p: float 43 | frequency_penalty: float 44 | presence_penalty: float 45 | stop: Union[str, list[str]] 46 | stream: bool 47 | extra: Dict[str, Any] 48 | -------------------------------------------------------------------------------- /clientai/agent/types/protocols.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Callable, List, Optional, Protocol, Union 2 | 3 | from ..steps.base import Step 4 | from ..tools.base import Tool 5 | from .common import AgentInput, AgentOutput, StepResult, ToolResult 6 | 7 | 8 | class StepProtocol(Protocol[StepResult]): 9 | """Protocol defining the interface for step execution. 10 | 11 | A protocol that defines the required methods for step execution, including 12 | validation and execution of step logic. 13 | 14 | Type Args: 15 | StepResult: The type of result returned by the step. 16 | 17 | Methods: 18 | execute: Execute the step with provided input data. 19 | validate: Validate that input data meets step requirements. 20 | """ 21 | 22 | def execute(self, input_data: Any) -> StepResult: 23 | """Execute the step with provided input data. 24 | 25 | Args: 26 | input_data: Input data for step execution. 27 | 28 | Returns: 29 | StepResult: The result of step execution. 30 | """ 31 | ... 32 | 33 | def validate(self, input_data: Any) -> bool: 34 | """Validate input data meets step requirements. 35 | 36 | Args: 37 | input_data: Input data to validate. 38 | 39 | Returns: 40 | bool: True if input is valid, False otherwise. 41 | """ 42 | ... 43 | 44 | 45 | class ToolProtocol(Protocol[ToolResult]): 46 | """Protocol defining the interface for tool execution. 47 | 48 | A protocol that defines the required methods for tool execution, including 49 | validation and calling of tool functions. 50 | 51 | Type Args: 52 | ToolResult: The type of result returned by the tool. 53 | 54 | Methods: 55 | __call__: Execute the tool with provided arguments. 56 | validate: Validate that arguments meet tool requirements. 57 | """ 58 | 59 | def __call__(self, *args: Any, **kwargs: Any) -> ToolResult: 60 | """Execute the tool with provided arguments. 61 | 62 | Args: 63 | *args: Positional arguments for tool execution. 64 | **kwargs: Keyword arguments for tool execution. 65 | 66 | Returns: 67 | ToolResult: The result of tool execution. 68 | """ 69 | ... 70 | 71 | def validate(self, *args: Any, **kwargs: Any) -> bool: 72 | """Validate arguments meet tool requirements. 73 | 74 | Args: 75 | *args: Positional arguments to validate. 76 | **kwargs: Keyword arguments to validate. 77 | 78 | Returns: 79 | bool: True if arguments are valid, False otherwise. 80 | """ 81 | ... 82 | 83 | 84 | class AgentProtocol(Protocol[AgentInput, AgentOutput]): 85 | """Protocol defining the interface for agent execution. 86 | 87 | A protocol that defines the required methods for agent execution, including 88 | running workflows, managing tools, and maintaining state. 89 | 90 | Type Args: 91 | AgentInput: The type of input accepted by the agent. 92 | AgentOutput: The type of output produced by the agent. 93 | 94 | Methods: 95 | run: Execute the agent's workflow with provided input. 96 | register_tool: Register a new tool with the agent. 97 | get_tools: Retrieve registered tools, optionally filtered by scope. 98 | reset: Reset agent state. 99 | """ 100 | 101 | def run(self, input_data: AgentInput) -> AgentOutput: 102 | """Execute the agent's workflow with provided input. 103 | 104 | Args: 105 | input_data: Input data for workflow execution. 106 | 107 | Returns: 108 | AgentOutput: The result of workflow execution. 109 | """ 110 | ... 111 | 112 | def register_tool( 113 | self, 114 | tool: Union[Callable[..., Any], Tool, ToolProtocol[Any]], 115 | *, 116 | name: Optional[str] = None, 117 | description: Optional[str] = None, 118 | scopes: Union[List[str], str] = "all", 119 | ) -> Tool: 120 | """Register a new tool with the agent. 121 | 122 | Args: 123 | tool: The tool to register, either as a callable or Tool instance. 124 | name: Optional name for the tool. 125 | description: Optional description of the tool. 126 | scopes: Scopes where the tool can be used. 127 | 128 | Returns: 129 | Tool: The registered tool instance. 130 | """ 131 | ... 132 | 133 | def get_tools(self, scope: Optional[str] = None) -> List[Tool]: 134 | """Retrieve registered tools, optionally filtered by scope. 135 | 136 | Args: 137 | scope: Optional scope to filter tools by. 138 | 139 | Returns: 140 | List[Tool]: List of matching tools. 141 | """ 142 | ... 143 | 144 | def reset(self) -> None: 145 | """Reset agent state. 146 | 147 | Clears all internal state and returns agent to initial configuration. 148 | """ 149 | ... 150 | 151 | 152 | class StepExecutionProtocol(Protocol): 153 | """Protocol defining the interface for step execution engines. 154 | 155 | A protocol that defines the required methods for executing workflow steps, 156 | including handling arguments and results. 157 | """ 158 | 159 | def execute_step(self, step: Step, *args: Any, **kwargs: Any) -> Any: 160 | """Execute a workflow step. 161 | 162 | Args: 163 | step: The step to execute. 164 | *args: Additional positional arguments. 165 | **kwargs: Additional keyword arguments. 166 | 167 | Returns: 168 | Any: The result of step execution. 169 | """ 170 | ... 171 | -------------------------------------------------------------------------------- /clientai/agent/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .exceptions import AgentError, StepError, ToolError 2 | from .validation import validate_step, validate_tool 3 | 4 | __all__ = [ 5 | "AgentError", 6 | "StepError", 7 | "ToolError", 8 | "validate_step", 9 | "validate_tool", 10 | ] 11 | -------------------------------------------------------------------------------- /clientai/agent/utils/exceptions.py: -------------------------------------------------------------------------------- 1 | class AgentError(Exception): 2 | """Base exception class for agent-related errors. 3 | 4 | This exception serves as the base class for agent-specific errors 5 | in the utils module. All other agent utility exceptions inherit from this. 6 | """ 7 | 8 | 9 | class StepError(AgentError): 10 | """Exception raised for errors in step validation and execution. 11 | 12 | This exception is raised during step validation or execution in utility 13 | functions, such as when validating step signatures or handling step data. 14 | """ 15 | 16 | 17 | class ToolError(AgentError): 18 | """Exception raised for errors in tool validation and execution. 19 | 20 | This exception is raised during tool validation or execution in utility 21 | functions, such as when validating tool signatures or processing tool data. 22 | """ 23 | -------------------------------------------------------------------------------- /clientai/agent/utils/validation.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | from ..steps.base import Step 4 | from ..tools.base import Tool 5 | from .exceptions import StepError, ToolError 6 | 7 | 8 | def validate_step(step: Step) -> None: 9 | """Validate step configuration and metadata. 10 | 11 | Checks that the step has a valid callable 12 | function with proper configuration. 13 | 14 | Args: 15 | step: The Step instance to validate. 16 | 17 | Raises: 18 | StepError: If the step has no function or the function is not callable. 19 | """ 20 | if step.func is None: # pragma: no cover 21 | raise StepError(f"Step {step.name} has no function") 22 | 23 | if not callable(step.func): # pragma: no cover 24 | raise StepError(f"Step {step.name} function is not callable") 25 | 26 | 27 | def validate_tool(tool: Tool) -> None: 28 | """Validate tool configuration and signature. 29 | 30 | Checks that the tool has a valid callable function 31 | with proper configuration. 32 | 33 | Args: 34 | tool: The Tool instance to validate. 35 | 36 | Raises: 37 | ToolError: If the tool has no function or the function is not callable. 38 | """ 39 | if tool.func is None: # pragma: no cover 40 | raise ToolError(f"Tool {tool.name} has no function") 41 | 42 | if not callable(tool.func): # pragma: no cover 43 | raise ToolError(f"Tool {tool.name} function is not callable") 44 | 45 | 46 | def validate_callable(func: Callable) -> None: 47 | """Validate that a callable has proper type hints. 48 | 49 | Checks that the function has complete type annotations 50 | including return type. 51 | 52 | Args: 53 | func: The function to validate. 54 | 55 | Raises: 56 | ValueError: If the function is missing type annotations or return type. 57 | 58 | Example: 59 | ```python 60 | def valid_func(x: int) -> str: 61 | return str(x) 62 | 63 | validate_callable(valid_func) # Passes 64 | 65 | def invalid_func(x): # No type hints 66 | return str(x) 67 | 68 | validate_callable(invalid_func) # Raises ValueError 69 | ``` 70 | """ 71 | if not callable(func): # pragma: no cover 72 | raise ValueError(f"Object {func} is not callable") 73 | -------------------------------------------------------------------------------- /clientai/agent/validation/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import ModelValidator, StepValidator 2 | from .exceptions import ( 3 | SchemaValidationError, 4 | ValidationError, 5 | ) 6 | from .result import ValidationResult 7 | from .types import OutputFormat, ValidatorContext 8 | 9 | __all__ = [ 10 | "ModelValidator", 11 | "StepValidator", 12 | "OutputFormat", 13 | "ValidatorContext", 14 | "ValidationResult", 15 | "ValidationError", 16 | "SchemaValidationError", 17 | ] 18 | -------------------------------------------------------------------------------- /clientai/agent/validation/exceptions.py: -------------------------------------------------------------------------------- 1 | from ..exceptions import AgentError 2 | 3 | 4 | class ValidationError(AgentError): 5 | """Base exception for validation errors.""" 6 | 7 | pass 8 | 9 | 10 | class SchemaValidationError(ValidationError): 11 | """Exception raised when schema validation fails.""" 12 | 13 | pass 14 | -------------------------------------------------------------------------------- /clientai/agent/validation/result.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Any, Dict, Generic, TypeVar 3 | 4 | T = TypeVar("T") 5 | 6 | 7 | @dataclass 8 | class ValidationResult(Generic[T]): 9 | """Result of validation operation.""" 10 | 11 | data: T 12 | """The validated data.""" 13 | 14 | is_partial: bool = False 15 | """Whether this is a partial validation result.""" 16 | 17 | errors: Dict[str, Any] = field(default_factory=dict) 18 | """Any validation errors encountered.""" 19 | 20 | warnings: Dict[str, Any] = field(default_factory=dict) 21 | """Any validation warnings generated.""" 22 | 23 | metadata: Dict[str, Any] = field(default_factory=dict) 24 | """Additional metadata about the validation.""" 25 | 26 | @property 27 | def is_valid(self) -> bool: 28 | """Whether validation was successful.""" 29 | return not bool(self.errors) # pragma: no cover 30 | 31 | @property 32 | def is_complete(self) -> bool: 33 | """Whether this is a complete validation result.""" 34 | return not self.is_partial # pragma: no cover 35 | 36 | def __bool__(self) -> bool: 37 | """Boolean representation of validation success.""" 38 | return self.is_valid # pragma: no cover 39 | -------------------------------------------------------------------------------- /clientai/agent/validation/types.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import Any, Dict, Generic, Optional, Protocol, TypeVar 3 | 4 | T = TypeVar("T") 5 | T_co = TypeVar("T_co", covariant=True) 6 | 7 | 8 | class OutputFormat(str, Enum): 9 | """Format of the output to be validated.""" 10 | 11 | STRING = "string" 12 | JSON = "json" 13 | 14 | 15 | class Validator(Protocol[T_co]): 16 | """Protocol defining the interface for validators.""" 17 | 18 | def validate(self, data: str, partial: bool = False) -> T_co: 19 | """Validate output data. 20 | 21 | Args: 22 | data: The data to validate 23 | partial: Whether to allow partial validation for streaming 24 | 25 | Returns: 26 | Validated data of type T 27 | 28 | Raises: 29 | ValidationError: If validation fails 30 | """ 31 | ... 32 | 33 | 34 | class ValidatorContext(Generic[T]): 35 | """Context for validation operations.""" 36 | 37 | data: Any 38 | format: OutputFormat 39 | partial: bool 40 | metadata: Optional[Dict[str, Any]] = None 41 | 42 | def __init__( 43 | self, 44 | data: Any, 45 | format: OutputFormat = OutputFormat.STRING, 46 | partial: bool = False, 47 | metadata: Optional[Dict[str, Any]] = None, 48 | ) -> None: 49 | self.data = data 50 | self.format = format 51 | self.partial = partial 52 | self.metadata = metadata or {} 53 | -------------------------------------------------------------------------------- /clientai/ai_provider.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any, List, Optional 3 | 4 | from ._common_types import GenericResponse, Message 5 | 6 | 7 | class AIProvider(ABC): 8 | """ 9 | Abstract base class for AI providers. 10 | """ 11 | 12 | @abstractmethod 13 | def generate_text( 14 | self, 15 | prompt: str, 16 | model: str, 17 | system_prompt: Optional[str] = None, 18 | return_full_response: bool = False, 19 | stream: bool = False, 20 | json_output: bool = False, 21 | temperature: Optional[float] = None, 22 | top_p: Optional[float] = None, 23 | **kwargs: Any, 24 | ) -> GenericResponse: 25 | """ 26 | Generate text based on a given prompt. 27 | 28 | Args: 29 | prompt: The input prompt for text generation. 30 | model: The name or identifier of the AI model to use. 31 | system_prompt: Optional system prompt to guide model behavior. 32 | return_full_response: If True, return the full response object 33 | instead of just the generated text. 34 | stream: If True, return an iterator for streaming responses. 35 | json_output: If True, format the response as valid JSON. 36 | Each provider uses its native JSON support mechanism. 37 | temperature: Optional temperature value controlling randomness. 38 | Usually between 0.0 and 2.0, with lower values making 39 | the output more focused and deterministic, and higher 40 | values making it more creative and variable. 41 | top_p: Optional nucleus sampling parameter controlling diversity. 42 | Usually between 0.0 and 1.0, with lower values making the 43 | output more focused on likely tokens, and higher values 44 | allowing more diverse selections. 45 | **kwargs: Additional keyword arguments specific to 46 | the provider's API. 47 | 48 | Returns: 49 | GenericResponse: 50 | The generated text response, full response object, 51 | or an iterator for streaming responses. 52 | 53 | Note: 54 | When json_output is True: 55 | - OpenAI/Groq use response_format={"type": "json_object"} 56 | - Replicate adds output="json" to input parameters 57 | - Ollama uses format="json" parameter 58 | 59 | Temperature ranges: 60 | - OpenAI: 0.0 to 2.0 (default: 1.0) 61 | - Ollama: 0.0 to 2.0 (default: 0.8) 62 | - Replicate: Model-dependent 63 | - Groq: 0.0 to 2.0 (default: 1.0) 64 | 65 | Top-p ranges: 66 | - OpenAI: 0.0 to 1.0 (default: 1.0) 67 | - Ollama: 0.0 to 1.0 (default: 0.9) 68 | - Replicate: Model-dependent 69 | - Groq: 0.0 to 1.0 (default: 1.0) 70 | """ 71 | pass # pragma: no cover 72 | 73 | @abstractmethod 74 | def chat( 75 | self, 76 | messages: List[Message], 77 | model: str, 78 | system_prompt: Optional[str] = None, 79 | return_full_response: bool = False, 80 | stream: bool = False, 81 | json_output: bool = False, 82 | temperature: Optional[float] = None, 83 | top_p: Optional[float] = None, 84 | **kwargs: Any, 85 | ) -> GenericResponse: 86 | """ 87 | Engage in a chat conversation. 88 | 89 | Args: 90 | messages: A list of message dictionaries, each containing 91 | 'role' and 'content'. 92 | model: The name or identifier of the AI model to use. 93 | system_prompt: Optional system prompt to guide model behavior. 94 | return_full_response: If True, return the full response object 95 | instead of just the chat content. 96 | stream: If True, return an iterator for streaming responses. 97 | json_output: If True, format the response as valid JSON. 98 | Each provider uses its native JSON support mechanism. 99 | temperature: Optional temperature value controlling randomness. 100 | Usually between 0.0 and 2.0, with lower values making 101 | the output more focused and deterministic, and higher 102 | values making it more creative and variable. 103 | top_p: Optional nucleus sampling parameter controlling diversity. 104 | Usually between 0.0 and 1.0, with lower values making the 105 | output more focused on likely tokens, and higher values 106 | allowing more diverse selections. 107 | **kwargs: Additional keyword arguments specific to 108 | the provider's API. 109 | 110 | Returns: 111 | GenericResponse: 112 | The chat response, either as a string, a dictionary, 113 | or an iterator for streaming responses. 114 | 115 | Note: 116 | When json_output is True: 117 | - OpenAI/Groq use response_format={"type": "json_object"} 118 | - Replicate adds output="json" to input parameters 119 | - Ollama uses format="json" parameter 120 | 121 | Temperature ranges: 122 | - OpenAI: 0.0 to 2.0 (default: 1.0) 123 | - Ollama: 0.0 to 2.0 (default: 0.8) 124 | - Replicate: Model-dependent 125 | - Groq: 0.0 to 2.0 (default: 1.0) 126 | 127 | Top-p ranges: 128 | - OpenAI: 0.0 to 1.0 (default: 1.0) 129 | - Ollama: 0.0 to 1.0 (default: 0.9) 130 | - Replicate: Model-dependent 131 | - Groq: 0.0 to 1.0 (default: 1.0) 132 | """ 133 | pass # pragma: no cover 134 | -------------------------------------------------------------------------------- /clientai/exceptions.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | 4 | class ClientAIError(Exception): 5 | """Base exception class for ClientAI errors.""" 6 | 7 | def __init__( 8 | self, 9 | message: str, 10 | status_code: Optional[int] = None, 11 | original_error: Optional[Exception] = None, 12 | ): 13 | super().__init__(message) 14 | self.status_code = status_code 15 | self.original_error = original_error 16 | 17 | def __str__(self): 18 | error_msg = super().__str__() 19 | if self.status_code: 20 | error_msg = f"[{self.status_code}] {error_msg}" 21 | return error_msg 22 | 23 | @property 24 | def original_exception(self) -> Optional[Exception]: 25 | """Returns the original exception object if available.""" 26 | return self.original_error 27 | 28 | 29 | class AuthenticationError(ClientAIError): 30 | """Raised when there's an authentication problem with the AI provider.""" 31 | 32 | 33 | class APIError(ClientAIError): 34 | """Raised when there's an API-related error from the AI provider.""" 35 | 36 | 37 | class RateLimitError(ClientAIError): 38 | """Raised when the AI provider's rate limit is exceeded.""" 39 | 40 | 41 | class InvalidRequestError(ClientAIError): 42 | """Raised when the request to the AI provider is invalid.""" 43 | 44 | 45 | class ModelError(ClientAIError): 46 | """Raised when there's an issue with the specified model.""" 47 | 48 | 49 | class ProviderNotInstalledError(ClientAIError): 50 | """Raised when the required provider package is not installed.""" 51 | 52 | 53 | class TimeoutError(ClientAIError): 54 | """Raised when a request to the AI provider times out.""" 55 | 56 | 57 | def map_status_code_to_exception( 58 | status_code: int, 59 | ) -> Type[ClientAIError]: # pragma: no cover 60 | """ 61 | Maps an HTTP status code to the appropriate ClientAI exception class. 62 | 63 | Args: 64 | status_code (int): The HTTP status code. 65 | 66 | Returns: 67 | Type[ClientAIError]: The appropriate ClientAI exception class. 68 | """ 69 | if status_code == 401: 70 | return AuthenticationError 71 | elif status_code == 429: 72 | return RateLimitError 73 | elif status_code == 400: 74 | return InvalidRequestError 75 | elif status_code == 404: 76 | return ModelError 77 | elif status_code == 408: 78 | return TimeoutError 79 | elif status_code >= 500: 80 | return APIError 81 | else: 82 | return APIError 83 | 84 | 85 | def raise_clientai_error( 86 | status_code: int, message: str, original_error: Optional[Exception] = None 87 | ) -> None: 88 | """ 89 | Raises the appropriate ClientAI exception based on the status code. 90 | 91 | Args: 92 | status_code (int): The HTTP status code. 93 | message (str): The error message. 94 | original_error (Exception, optional): The original exception caught. 95 | 96 | Raises: 97 | ClientAIError: The appropriate ClientAI exception. 98 | """ 99 | exception_class = map_status_code_to_exception( 100 | status_code, 101 | ) 102 | raise exception_class(message, status_code, original_error) 103 | -------------------------------------------------------------------------------- /clientai/groq/__init__.py: -------------------------------------------------------------------------------- 1 | from .._constants import GROQ_INSTALLED 2 | from .provider import Provider 3 | 4 | __all__ = [ 5 | "Provider", 6 | "GROQ_INSTALLED", 7 | ] 8 | -------------------------------------------------------------------------------- /clientai/groq/_typing.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Iterator 4 | from dataclasses import dataclass 5 | from typing import ( 6 | Any, 7 | List, 8 | Literal, 9 | Optional, 10 | Protocol, 11 | Union, 12 | ) 13 | 14 | from .._common_types import GenericResponse 15 | 16 | 17 | @dataclass 18 | class Message: 19 | role: Literal["system", "user", "assistant"] 20 | content: str 21 | 22 | 23 | @dataclass 24 | class GroqChoice: 25 | index: int 26 | message: Message 27 | logprobs: Optional[Any] 28 | finish_reason: Optional[str] 29 | 30 | 31 | @dataclass 32 | class GroqUsage: 33 | queue_time: float 34 | prompt_tokens: int 35 | prompt_time: float 36 | completion_tokens: int 37 | completion_time: float 38 | total_tokens: int 39 | total_time: float 40 | 41 | 42 | @dataclass 43 | class GroqMetadata: 44 | id: str 45 | 46 | 47 | @dataclass 48 | class GroqResponse: 49 | id: str 50 | object: str 51 | created: int 52 | model: str 53 | choices: List[GroqChoice] 54 | usage: GroqUsage 55 | system_fingerprint: str 56 | x_groq: GroqMetadata 57 | 58 | 59 | @dataclass 60 | class GroqStreamDelta: 61 | role: Optional[Literal["system", "user", "assistant"]] = None 62 | content: Optional[str] = None 63 | 64 | 65 | @dataclass 66 | class GroqStreamChoice: 67 | index: int 68 | delta: GroqStreamDelta 69 | finish_reason: Optional[str] 70 | 71 | 72 | @dataclass 73 | class GroqStreamResponse: 74 | id: str 75 | object: str 76 | created: int 77 | model: str 78 | choices: List[GroqStreamChoice] 79 | system_fingerprint: str 80 | x_groq: GroqMetadata 81 | 82 | 83 | class GroqChatCompletionProtocol(Protocol): 84 | def create( 85 | self, 86 | *, 87 | messages: List[dict[str, str]], 88 | model: str, 89 | stream: bool = False, 90 | **kwargs: Any, 91 | ) -> Union[GroqResponse, Iterator[GroqStreamResponse]]: ... 92 | 93 | 94 | class GroqChatProtocol(Protocol): 95 | completions: GroqChatCompletionProtocol 96 | 97 | 98 | class GroqClientProtocol(Protocol): 99 | chat: GroqChatProtocol 100 | 101 | 102 | GroqProvider = Any 103 | GroqFullResponse = Union[GroqResponse, GroqStreamResponse] 104 | GroqStreamChunk = Union[str, GroqStreamResponse] 105 | 106 | GroqGenericResponse = GenericResponse[str, GroqFullResponse, GroqStreamChunk] 107 | 108 | Client = "groq.Groq" 109 | -------------------------------------------------------------------------------- /clientai/ollama/__init__.py: -------------------------------------------------------------------------------- 1 | from .._constants import OLLAMA_INSTALLED 2 | from .manager import OllamaManager, OllamaServerConfig 3 | from .provider import Provider 4 | 5 | __all__ = [ 6 | "Provider", 7 | "OLLAMA_INSTALLED", 8 | "OllamaManager", 9 | "OllamaServerConfig", 10 | ] 11 | -------------------------------------------------------------------------------- /clientai/ollama/_typing.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Iterator 4 | from typing import Any, List, Optional, Protocol, TypedDict, Union 5 | 6 | from .._common_types import GenericResponse, Message 7 | 8 | 9 | class OllamaResponse(TypedDict): 10 | model: str 11 | created_at: str 12 | response: str 13 | done: bool 14 | context: Optional[List[int]] 15 | total_duration: Optional[int] 16 | load_duration: Optional[int] 17 | prompt_eval_count: Optional[int] 18 | prompt_eval_duration: Optional[int] 19 | eval_count: Optional[int] 20 | eval_duration: Optional[int] 21 | done_reason: Optional[str] 22 | 23 | 24 | class OllamaStreamResponse(TypedDict): 25 | model: str 26 | created_at: str 27 | response: str 28 | done: bool 29 | 30 | 31 | class OllamaChatResponse(TypedDict): 32 | model: str 33 | created_at: str 34 | message: Message 35 | done: bool 36 | total_duration: Optional[int] 37 | load_duration: Optional[int] 38 | prompt_eval_count: Optional[int] 39 | prompt_eval_duration: Optional[int] 40 | eval_count: Optional[int] 41 | eval_duration: Optional[int] 42 | 43 | 44 | OllamaProvider = Any 45 | OllamaFullResponse = Union[OllamaResponse, OllamaChatResponse] 46 | OllamaStreamChunk = Union[str, OllamaStreamResponse] 47 | 48 | OllamaGenericResponse = GenericResponse[ 49 | str, OllamaFullResponse, OllamaStreamChunk 50 | ] 51 | 52 | 53 | class OllamaClientProtocol(Protocol): 54 | def generate( 55 | self, 56 | model: str, 57 | prompt: str, 58 | stream: bool = False, 59 | temperature: Optional[float] = None, 60 | top_p: Optional[float] = None, 61 | **kwargs: Any, 62 | ) -> Union[OllamaResponse, Iterator[OllamaStreamResponse]]: ... 63 | 64 | def chat( 65 | self, 66 | model: str, 67 | messages: List[Message], 68 | stream: bool = False, 69 | temperature: Optional[float] = None, 70 | top_p: Optional[float] = None, 71 | **kwargs: Any, 72 | ) -> Union[OllamaChatResponse, Iterator[OllamaStreamResponse]]: ... 73 | 74 | 75 | Client = "ollama.Client" 76 | -------------------------------------------------------------------------------- /clientai/ollama/manager/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import OllamaServerConfig 2 | from .core import OllamaManager 3 | from .exceptions import OllamaManagerError 4 | from .platform_info import GPUVendor, Platform 5 | 6 | __all__ = [ 7 | "OllamaManager", 8 | "OllamaServerConfig", 9 | "Platform", 10 | "GPUVendor", 11 | "OllamaManagerError", 12 | ] 13 | -------------------------------------------------------------------------------- /clientai/ollama/manager/exceptions.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | 4 | class OllamaManagerError(Exception): 5 | """Base exception class for Ollama manager errors.""" 6 | 7 | def __init__( 8 | self, 9 | message: str, 10 | original_error: Optional[Exception] = None, 11 | ): 12 | super().__init__(message) 13 | self.original_error = original_error 14 | 15 | def __str__(self) -> str: 16 | """Format the error message.""" 17 | error_msg = super().__str__() 18 | if self.original_error: 19 | error_msg = f"{error_msg}\nCaused by: {str(self.original_error)}" 20 | return error_msg 21 | 22 | @property 23 | def original_exception(self) -> Optional[Exception]: 24 | """Returns the original exception that caused this error, if any.""" 25 | return self.original_error 26 | 27 | 28 | class ExecutableNotFoundError(OllamaManagerError): 29 | """ 30 | Raised when the Ollama executable cannot be found. 31 | 32 | This typically means Ollama is not installed or not in the system PATH. 33 | """ 34 | 35 | 36 | class ServerStartupError(OllamaManagerError): 37 | """ 38 | Raised when the Ollama server fails to start. 39 | 40 | This can happen due to: 41 | - Port already in use 42 | - Insufficient permissions 43 | - Invalid configuration 44 | - System resource constraints 45 | """ 46 | 47 | 48 | class ServerShutdownError(OllamaManagerError): 49 | """ 50 | Raised when there's an error stopping the Ollama server. 51 | 52 | This can happen when: 53 | - The server process cannot be terminated 54 | - The server is in an inconsistent state 55 | - The system prevents process termination 56 | """ 57 | 58 | 59 | class ServerTimeoutError(OllamaManagerError): 60 | """ 61 | Raised when the server operation times out. 62 | 63 | This can happen during: 64 | - Server startup 65 | - Health checks 66 | - Server shutdown 67 | 68 | The timeout duration is configurable through OllamaServerConfig. 69 | """ 70 | 71 | 72 | class UnsupportedPlatformError(OllamaManagerError): 73 | """ 74 | Raised when running on an unsupported platform or configuration. 75 | 76 | This can happen when: 77 | - The operating system is not supported (e.g., BSD, Solaris) 78 | - Required system features are missing 79 | - GPU configuration is incompatible 80 | """ 81 | 82 | 83 | class ResourceError(OllamaManagerError): 84 | """ 85 | Raised when there are issues with system resources. 86 | 87 | This can happen due to: 88 | - Insufficient memory 89 | - GPU memory allocation failures 90 | - CPU thread allocation issues 91 | - Disk space constraints 92 | """ 93 | 94 | 95 | class ConfigurationError(OllamaManagerError): 96 | """ 97 | Raised when there are issues with the Ollama configuration. 98 | 99 | This can happen when: 100 | - Invalid configuration values are provided 101 | - Incompatible settings are combined 102 | - Required configuration is missing 103 | - Platform-specific settings are invalid 104 | """ 105 | 106 | 107 | def raise_ollama_error( 108 | error_class: Type[OllamaManagerError], 109 | message: str, 110 | original_error: Optional[Exception] = None, 111 | ) -> None: 112 | """ 113 | Helper function to raise Ollama manager errors with consistent formatting. 114 | 115 | Args: 116 | error_class: The specific error class to raise 117 | message: The error message 118 | original_error: The original exception that caused this error, if any 119 | 120 | Raises: 121 | OllamaManagerError: The specified error class with formatted message 122 | """ 123 | raise error_class(message, original_error) 124 | -------------------------------------------------------------------------------- /clientai/openai/__init__.py: -------------------------------------------------------------------------------- 1 | from .._constants import OPENAI_INSTALLED 2 | from .provider import Provider 3 | 4 | __all__ = [ 5 | "Provider", 6 | "OPENAI_INSTALLED", 7 | ] 8 | -------------------------------------------------------------------------------- /clientai/openai/_typing.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Iterator 4 | from dataclasses import dataclass 5 | from typing import ( 6 | Any, 7 | Dict, 8 | List, 9 | Literal, 10 | Optional, 11 | Protocol, 12 | TypedDict, 13 | Union, 14 | ) 15 | 16 | from .._common_types import GenericResponse 17 | 18 | 19 | @dataclass 20 | class Message: 21 | role: Literal["system", "user", "assistant", "function"] 22 | content: str 23 | 24 | 25 | @dataclass 26 | class OpenAIChoice: 27 | index: int 28 | message: Message 29 | finish_reason: Optional[str] 30 | 31 | 32 | class OpenAIUsage(TypedDict): 33 | prompt_tokens: int 34 | completion_tokens: int 35 | total_tokens: int 36 | 37 | 38 | @dataclass 39 | class OpenAIResponse: 40 | id: str 41 | object: str 42 | created: int 43 | model: str 44 | choices: List[OpenAIChoice] 45 | usage: OpenAIUsage 46 | 47 | 48 | @dataclass 49 | class OpenAIStreamDelta: 50 | role: Optional[Literal["system", "user", "assistant", "function"]] 51 | content: Optional[str] 52 | function_call: Optional[Dict[str, Any]] 53 | 54 | 55 | @dataclass 56 | class OpenAIStreamChoice: 57 | index: int 58 | delta: OpenAIStreamDelta 59 | finish_reason: Optional[str] 60 | 61 | 62 | @dataclass 63 | class OpenAIStreamResponse: 64 | id: str 65 | object: str 66 | created: int 67 | model: str 68 | choices: List[OpenAIStreamChoice] 69 | 70 | 71 | class OpenAIChatCompletionProtocol(Protocol): 72 | def create( 73 | self, **kwargs: Any 74 | ) -> Union[OpenAIResponse, Iterator[OpenAIStreamResponse]]: ... 75 | 76 | 77 | class OpenAIChatProtocol(Protocol): 78 | completions: OpenAIChatCompletionProtocol 79 | 80 | 81 | class OpenAIClientProtocol(Protocol): 82 | chat: OpenAIChatProtocol 83 | 84 | 85 | class OpenAIChatCompletions(Protocol): 86 | def create( 87 | self, 88 | model: str, 89 | messages: List[Message], 90 | stream: bool = False, 91 | **kwargs: Any, 92 | ) -> Union[OpenAIResponse, OpenAIStreamResponse]: ... 93 | 94 | 95 | OpenAIProvider = Any 96 | OpenAIFullResponse = Union[OpenAIResponse, OpenAIStreamResponse] 97 | OpenAIStreamChunk = Union[str, OpenAIStreamResponse] 98 | 99 | OpenAIGenericResponse = GenericResponse[ 100 | str, OpenAIFullResponse, OpenAIStreamChunk 101 | ] 102 | 103 | Client = "openai.OpenAI" 104 | -------------------------------------------------------------------------------- /clientai/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benavlabs/clientai/b5010727492fba38fecc160398f96173bad608da/clientai/py.typed -------------------------------------------------------------------------------- /clientai/replicate/__init__.py: -------------------------------------------------------------------------------- 1 | from .._constants import REPLICATE_INSTALLED 2 | from .provider import Provider 3 | 4 | __all__ = [ 5 | "Provider", 6 | "REPLICATE_INSTALLED", 7 | ] 8 | -------------------------------------------------------------------------------- /clientai/replicate/_typing.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Iterator 4 | from typing import Any, Dict, Optional, Protocol, TypedDict, Union 5 | 6 | from .._common_types import GenericResponse 7 | 8 | 9 | class ReplicatePredictionProtocol(Protocol): 10 | id: str 11 | status: str 12 | error: Optional[str] 13 | output: Any 14 | 15 | def stream(self) -> Iterator[Any]: ... 16 | 17 | 18 | ReplicatePrediction = ReplicatePredictionProtocol 19 | 20 | 21 | class ReplicateMetrics(TypedDict): 22 | batch_size: float 23 | input_token_count: int 24 | output_token_count: int 25 | predict_time: float 26 | predict_time_share: float 27 | time_to_first_token: float 28 | tokens_per_second: float 29 | 30 | 31 | class ReplicateUrls(TypedDict): 32 | cancel: str 33 | get: str 34 | stream: str 35 | 36 | 37 | class ReplicateResponse(TypedDict): 38 | id: str 39 | model: str 40 | version: str 41 | status: str 42 | input: Dict[str, Any] 43 | output: str 44 | logs: str 45 | error: Optional[Any] 46 | metrics: Optional[ReplicateMetrics] 47 | created_at: str 48 | started_at: Optional[str] 49 | completed_at: Optional[str] 50 | urls: ReplicateUrls 51 | 52 | 53 | ReplicateStreamResponse = ReplicateResponse 54 | 55 | ReplicateProvider = Any 56 | ReplicateFullResponse = ReplicateResponse 57 | ReplicateStreamChunk = Union[str, ReplicateStreamResponse] 58 | 59 | 60 | class ReplicatePredictionsProtocol(Protocol): 61 | @staticmethod 62 | def create(**kwargs: Any) -> ReplicatePredictionProtocol: ... 63 | 64 | @staticmethod 65 | def get(id: str) -> ReplicatePredictionProtocol: ... 66 | 67 | 68 | class ReplicateClientProtocol(Protocol): 69 | predictions: ReplicatePredictionsProtocol 70 | 71 | 72 | ReplicateGenericResponse = GenericResponse[ 73 | str, ReplicateFullResponse, ReplicateStreamChunk 74 | ] 75 | 76 | Client = "replicate.Client" 77 | -------------------------------------------------------------------------------- /docs/assets/ClientAI.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benavlabs/clientai/b5010727492fba38fecc160398f96173bad608da/docs/assets/ClientAI.png -------------------------------------------------------------------------------- /docs/assets/benav_labs_banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benavlabs/clientai/b5010727492fba38fecc160398f96173bad608da/docs/assets/benav_labs_banner.png -------------------------------------------------------------------------------- /docs/assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benavlabs/clientai/b5010727492fba38fecc160398f96173bad608da/docs/assets/logo.png -------------------------------------------------------------------------------- /docs/en/advanced/client/replicate_specific.md: -------------------------------------------------------------------------------- 1 | # Replicate-Specific Parameters in ClientAI 2 | 3 | This guide covers the Replicate-specific parameters that can be passed to ClientAI's `generate_text` and `chat` methods. These parameters are passed as additional keyword arguments to customize Replicate's behavior. 4 | 5 | ## generate_text Method 6 | 7 | ### Basic Structure 8 | ```python 9 | from clientai import ClientAI 10 | 11 | client = ClientAI('replicate', api_key="your-replicate-api-key") 12 | response = client.generate_text( 13 | prompt="Your prompt here", # Required 14 | model="owner/name:version", # Required 15 | webhook="https://...", # Replicate-specific 16 | webhook_completed="https://...",# Replicate-specific 17 | webhook_events_filter=[...], # Replicate-specific 18 | stream=False, # Optional 19 | wait=True # Replicate-specific 20 | ) 21 | ``` 22 | 23 | ### Replicate-Specific Parameters 24 | 25 | #### `webhook: Optional[str]` 26 | - URL to receive POST requests with prediction updates 27 | ```python 28 | response = client.generate_text( 29 | prompt="Write a story", 30 | model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf", 31 | webhook="https://your-server.com/webhook" 32 | ) 33 | ``` 34 | 35 | #### `webhook_completed: Optional[str]` 36 | - URL for receiving completion notifications 37 | ```python 38 | response = client.generate_text( 39 | prompt="Generate text", 40 | model="meta/llama-2-70b:latest", 41 | webhook_completed="https://your-server.com/completed" 42 | ) 43 | ``` 44 | 45 | #### `webhook_events_filter: Optional[List[str]]` 46 | - List of events that trigger webhooks 47 | - Common events: `"completed"`, `"output"` 48 | ```python 49 | response = client.generate_text( 50 | prompt="Analyze text", 51 | model="meta/llama-2-70b:latest", 52 | webhook_events_filter=["completed", "output"] 53 | ) 54 | ``` 55 | 56 | #### `wait: Optional[Union[int, bool]]` 57 | - Controls request blocking behavior 58 | - True: keeps request open up to 60 seconds 59 | - int: specifies seconds to hold request (1-60) 60 | - False: doesn't wait (default) 61 | ```python 62 | response = client.generate_text( 63 | prompt="Complex analysis", 64 | model="meta/llama-2-70b:latest", 65 | wait=30 # Wait for 30 seconds 66 | ) 67 | ``` 68 | 69 | #### `stream: bool` 70 | - Enables token streaming for supported models 71 | ```python 72 | for chunk in client.generate_text( 73 | prompt="Write a story", 74 | model="meta/llama-2-70b:latest", 75 | stream=True 76 | ): 77 | print(chunk, end="") 78 | ``` 79 | 80 | ## chat Method 81 | 82 | ### Basic Structure 83 | ```python 84 | response = client.chat( 85 | model="meta/llama-2-70b:latest", # Required 86 | messages=[...], # Required 87 | webhook="https://...", # Replicate-specific 88 | webhook_completed="https://...", # Replicate-specific 89 | webhook_events_filter=[...], # Replicate-specific 90 | wait=True # Replicate-specific 91 | ) 92 | ``` 93 | 94 | ### Message Formatting 95 | Replicate formats chat messages into a single prompt: 96 | ```python 97 | prompt = "\n".join([f"{m['role']}: {m['content']}" for m in messages]) 98 | prompt += "\nassistant: " 99 | ``` 100 | 101 | ## Training Parameters 102 | 103 | When using Replicate's training capabilities: 104 | 105 | ```python 106 | response = client.train( 107 | model="stability-ai/sdxl", 108 | version="39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b", 109 | input={ 110 | "input_images": "https://domain/images.zip", 111 | "token_string": "TOK", 112 | "caption_prefix": "a photo of TOK", 113 | "max_train_steps": 1000, 114 | "use_face_detection_instead": False 115 | }, 116 | destination="username/model-name" 117 | ) 118 | ``` 119 | 120 | ## Complete Examples 121 | 122 | ### Example 1: Generation with Webhooks 123 | ```python 124 | response = client.generate_text( 125 | prompt="Write a scientific paper summary", 126 | model="meta/llama-2-70b:latest", 127 | webhook="https://your-server.com/updates", 128 | webhook_completed="https://your-server.com/completed", 129 | webhook_events_filter=["completed"], 130 | wait=True 131 | ) 132 | ``` 133 | 134 | ### Example 2: Chat with Streaming 135 | ```python 136 | messages = [ 137 | {"role": "system", "content": "You are a helpful assistant"}, 138 | {"role": "user", "content": "Write a haiku about coding"} 139 | ] 140 | 141 | for chunk in client.chat( 142 | messages=messages, 143 | model="meta/llama-2-70b:latest", 144 | stream=True 145 | ): 146 | print(chunk, end="") 147 | ``` 148 | 149 | ### Example 3: Image Generation 150 | ```python 151 | response = client.generate_text( 152 | prompt="A portrait of a wombat gentleman", 153 | model="stability-ai/stable-diffusion:27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478", 154 | wait=60 155 | ) 156 | ``` 157 | 158 | ## Error Handling 159 | 160 | ClientAI maps Replicate's exceptions to its own error types: 161 | ```python 162 | try: 163 | response = client.generate_text( 164 | prompt="Test prompt", 165 | model="meta/llama-2-70b:latest", 166 | wait=True 167 | ) 168 | except ClientAIError as e: 169 | print(f"Error: {e}") 170 | ``` 171 | 172 | Error mappings: 173 | - `AuthenticationError`: API key issues 174 | - `RateLimitError`: Rate limit exceeded 175 | - `ModelError`: Model not found or failed 176 | - `InvalidRequestError`: Invalid parameters 177 | - `TimeoutError`: Request timeout (default 300s) 178 | - `APIError`: Other server errors 179 | 180 | ## Parameter Validation Notes 181 | 182 | 1. Both `model` and `prompt`/`messages` are required 183 | 2. Model string format: `"owner/name:version"` or `"owner/name"` for latest version 184 | 3. `wait` must be boolean or integer 1-60 185 | 4. Webhook URLs must be valid HTTP/HTTPS URLs 186 | 5. `webhook_events_filter` must contain valid event types 187 | 6. Some models may not support streaming 188 | 7. File inputs can be URLs or local file paths 189 | 190 | These parameters allow you to leverage Replicate's features through ClientAI, including model management, webhook notifications, and streaming capabilities. -------------------------------------------------------------------------------- /docs/en/advanced/overview.md: -------------------------------------------------------------------------------- 1 | # Advanced Overview 2 | 3 | This section provides in-depth guides on leveraging specific features of ClientAI and provider-specific functionalities. Each topic delves into a particular aspect of usage or focuses on a specific provider's unique capabilities. 4 | 5 | ## Provider-Specific Parameters 6 | 7 | Different AI providers offer unique parameters and features. Understanding these can help you fine-tune your AI interactions for optimal results. 8 | 9 | 1. **Ollama Specific Guide:** Learn about Ollama's unique parameters, including context handling, streaming options, and custom templates. 10 | - [Ollama Specific Guide](client/ollama_specific.md) 11 | 12 | 2. **OpenAI Specific Guide:** Explore OpenAI's advanced features, such as logit bias and model-specific parameters. 13 | - [OpenAI Specific Guide](client/openai_specific.md) 14 | 15 | 3. **Replicate Specific Guide**: Discover Replicate's distinctive offerings, including model versioning and custom deployment options. 16 | - [Replicate Specific Guide](client/replicate_specific.md) 17 | 18 | 4. **Groq Specific Guide**: Also check Groq's specific settings and parameters. 19 | - [Groq Specific Guide](client/groq_specific.md) 20 | 21 | ## Custom Run Workflows 22 | 23 | 5. **Custom Run Methods**: Learn how to replace default execution with custom workflows through a detailed code review assistant example. 24 | - [Creating Custom Run Guide](agent/creating_run.md#understanding-custom-run-methods) 25 | 26 | ## Complex Workflows 27 | 28 | 6. **Multi-Step Decision Making**: Advanced patterns for complex decision workflows. 29 | - `🚧 Coming Soon` 30 | 31 | 7. **Parallel Step Execution**: Implementing concurrent step execution patterns. 32 | - `🚧 Coming Soon` 33 | 34 | 8. **Conditional Workflows**: Creating dynamic, condition-based workflows. 35 | - `🚧 Coming Soon` 36 | 37 | ## Advanced Tool Patterns 38 | 39 | 9. **Tool Chaining**: Techniques for combining multiple tools effectively. 40 | - `🚧 Coming Soon` 41 | 42 | 10. **Tool Result Caching**: Optimizing tool execution with intelligent caching. 43 | - `🚧 Coming Soon` 44 | 45 | 11. **Tool Fallback Strategies**: Implementing robust tool execution patterns. 46 | - `🚧 Coming Soon` 47 | 48 | ## Advanced Integration Topics 49 | 50 | 12. **External Service Integration**: Patterns for integrating with external services. 51 | - `🚧 Coming Soon` 52 | 53 | 13. **Database Integration**: Implementing persistent storage in agent workflows. 54 | - `🚧 Coming Soon` 55 | 56 | 14. **Event Systems**: Building event-driven agent architectures. 57 | - `🚧 Coming Soon` 58 | 59 | ## Testing and Monitoring 60 | 61 | 15. **Testing Strategies**: Comprehensive testing approaches for agent systems. 62 | - `🚧 Coming Soon` 63 | 64 | 16. **Performance Monitoring**: Monitoring and optimizing agent performance. 65 | - `🚧 Coming Soon` 66 | 67 | 17. **Error Handling and Retry Strategies**: Best practices for error handling. 68 | - [Error Handling and Retry Strategies](error_handling.md) 69 | 70 | Each guide in this section is designed to provide you with a deeper understanding of ClientAI's capabilities and how to leverage them effectively in your projects. -------------------------------------------------------------------------------- /docs/en/api/agent/core/agent.md: -------------------------------------------------------------------------------- 1 | # Agent Class API Reference 2 | 3 | The `Agent` class is the foundation for creating AI-powered agents with automated tool selection and workflow management capabilities. It provides a flexible framework for building agents that can execute multi-step workflows, manage tools, and interact with language models. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.agent.core.Agent 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/agent/core/context.md: -------------------------------------------------------------------------------- 1 | # AgentContext Class API Reference 2 | 3 | The `AgentContext` class maintains state, memory, and results across agent workflow steps. It provides a structured way to store and access data during agent execution. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.agent.core.AgentContext 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/agent/core/execution.md: -------------------------------------------------------------------------------- 1 | # StepExecutionEngine Class API Reference 2 | 3 | The `StepExecutionEngine` class manages the execution of individual workflow steps, handling tool selection and LLM interactions. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.agent.core.StepExecutionEngine 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/agent/core/workflow.md: -------------------------------------------------------------------------------- 1 | # WorkflowManager Class API Reference 2 | 3 | The `WorkflowManager` class handles the registration and execution of workflow steps. It manages step ordering, dependencies, and execution flow. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.agent.core.WorkflowManager 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/agent/steps/decorators.md: -------------------------------------------------------------------------------- 1 | # Step Decorators API Reference 2 | 3 | The step decorators provide a way to define workflow steps using Python decorators like @think, @act, @observe, and @synthesize. 4 | 5 | ## Function Definitions 6 | 7 | ::: clientai.agent.steps.decorators 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/agent/steps/step.md: -------------------------------------------------------------------------------- 1 | # Step Class API Reference 2 | 3 | The `Step` class represents a single step in an agent's workflow, encapsulating its function, type, and configuration. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.agent.steps.Step 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/agent/steps/types.md: -------------------------------------------------------------------------------- 1 | # StepTypes API Reference 2 | 3 | The `StepTypes` module defines the types of steps available in an agent's workflow, such as THINK, ACT, OBSERVE, and SYNTHESIZE. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.agent.steps.types.StepType 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/agent/tools/registry.md: -------------------------------------------------------------------------------- 1 | # ToolRegistry Class API Reference 2 | 3 | The `ToolRegistry` class manages the registration and organization of tools, maintaining indices by name and scope. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.agent.tools.registry.ToolRegistry 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/agent/tools/selector.md: -------------------------------------------------------------------------------- 1 | # ToolSelector Class API Reference 2 | 3 | The `ToolSelector` class handles the automatic selection and execution of tools using LLM-based decision making. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.agent.tools.selection.ToolSelector 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/agent/tools/tool.md: -------------------------------------------------------------------------------- 1 | # Tool Class API Reference 2 | 3 | The `Tool` class represents a callable tool with associated metadata that can be used by agents in their workflows. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.agent.tools.Tool 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/client/ai_provider.md: -------------------------------------------------------------------------------- 1 | # AIProvider Class API Reference 2 | 3 | The `AIProvider` class is an abstract base class that defines the interface for all AI provider implementations in ClientAI. It ensures consistency across different providers. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.ai_provider.AIProvider 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/client/clientai.md: -------------------------------------------------------------------------------- 1 | # ClientAI Class API Reference 2 | 3 | The `ClientAI` class is the primary interface for interacting with various AI providers in a unified manner. It provides methods for text generation and chat functionality across different AI services. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.ClientAI 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/client/ollama_manager/ollama_manager.md: -------------------------------------------------------------------------------- 1 | # OllamaManager Class API Reference 2 | 3 | The `OllamaManager` class is a utility class that manages the lifecycle of a local Ollama server instance. It handles server process startup, monitoring, and shutdown while respecting platform-specific requirements and custom configurations. The manager supports configurable GPU acceleration, CPU thread allocation, and memory limits through `OllamaServerConfig`. It provides both context manager and manual management interfaces for controlling the server process. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.ollama.OllamaManager 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/client/ollama_manager/ollama_server_config.md: -------------------------------------------------------------------------------- 1 | # OllamaServerConfig Class API Reference 2 | 3 | The `OllamaServerConfig` class is a configuration container that defines the runtime parameters for an Ollama server instance. It allows users to specify network settings (host/port), hardware utilization options (GPU layers, CPU threads, memory limits), and environment variables. The class provides sensible defaults while allowing fine-grained control over server behavior through optional configuration parameters. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.ollama.OllamaServerConfig 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/client/specific_providers/groq_provider.md: -------------------------------------------------------------------------------- 1 | # Groq Provider API Reference 2 | 3 | The `GroqProvider` class implements the `AIProvider` interface for the Groq service. It provides methods for text generation and chat functionality using Groq's models. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.groq.Provider 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/client/specific_providers/ollama_provider.md: -------------------------------------------------------------------------------- 1 | # Ollama Provider API Reference 2 | 3 | The `OllamaProvider` class implements the `AIProvider` interface for the Ollama service. It provides methods for text generation and chat functionality using locally hosted models through Ollama. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.ollama.Provider 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/client/specific_providers/openai_provider.md: -------------------------------------------------------------------------------- 1 | # OpenAI Provider API Reference 2 | 3 | The `OpenAIProvider` class implements the `AIProvider` interface for the OpenAI service. It provides methods for text generation and chat functionality using OpenAI's models. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.openai.Provider 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/client/specific_providers/replicate_provider.md: -------------------------------------------------------------------------------- 1 | # Replicate Provider API Reference 2 | 3 | The `ReplicateProvider` class implements the `AIProvider` interface for the Replicate service. It provides methods for text generation and chat functionality using models hosted on Replicate. 4 | 5 | ## Class Definition 6 | 7 | ::: clientai.replicate.Provider 8 | rendering: 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/en/api/overview.md: -------------------------------------------------------------------------------- 1 | # API Reference Overview 2 | 3 | Welcome to the API Reference section of ClientAI documentation. This section provides detailed information about the various classes, functions, and modules that make up ClientAI. Whether you're looking to integrate ClientAI into your project, extend its functionality, or simply explore its capabilities, this section will guide you through the intricacies of our codebase. 4 | 5 | ## Key Components 6 | 7 | ClientAI's API is comprised of several key components, each serving a specific purpose: 8 | 9 | 1. **ClientAI Class**: This is the main class of our library. It provides a unified interface for interacting with different AI providers and is the primary entry point for using ClientAI. 10 | 11 | - [ClientAI Class Reference](client/clientai.md) 12 | 13 | 2. **AIProvider Class**: An abstract base class that defines the interface for all AI provider implementations. It ensures consistency across different providers. 14 | 15 | - [AIProvider Class Reference](client/ai_provider.md) 16 | 17 | 3. **Provider-Specific Classes**: These classes implement the AIProvider interface for each supported AI service (Ollama, OpenAI, Replicate, Groq). 18 | 19 | - [Ollama Provider Reference](client/specific_providers/ollama_provider.md) 20 | - [OpenAI Provider Reference](client/specific_providers/openai_provider.md) 21 | - [Replicate Provider Reference](client/specific_providers/replicate_provider.md) 22 | - [Groq Provider Reference](client/specific_providers/groq_provider.md) 23 | 24 | 4. **Ollama Manager**: These classes handle the local Ollama server configuration and lifecycle management. 25 | 26 | - [OllamaManager Class Reference](client/ollama_manager/ollama_manager.md) 27 | - [OllamaServerConfig Class Reference](client/ollama_manager/ollama_server_config.md) 28 | 29 | ## Usage 30 | 31 | Each component is documented with its own dedicated page, where you can find detailed information about its methods, parameters, return types, and usage examples. These pages are designed to provide you with all the information you need to understand and work with ClientAI effectively. 32 | 33 | ### Basic Usage Example 34 | 35 | Here's a quick example of how to use the main ClientAI class: 36 | 37 | ```python 38 | from clientai import ClientAI 39 | 40 | # Initialize the client 41 | client = ClientAI('openai', api_key="your-openai-api-key") 42 | 43 | # Generate text 44 | response = client.generate_text( 45 | "Explain quantum computing", 46 | model="gpt-3.5-turbo" 47 | ) 48 | 49 | print(response) 50 | ``` 51 | 52 | For more detailed usage instructions and examples, please refer to the [Usage Guide](../usage/overview.md). 53 | 54 | ## Contribution 55 | 56 | We welcome contributions to ClientAI! If you're interested in contributing, please refer to our [Contributing Guidelines](../community/CONTRIBUTING.md). Contributions can range from bug fixes and documentation improvements to adding support for new AI providers. 57 | 58 | ## Feedback 59 | 60 | Your feedback is crucial in helping us improve ClientAI and its documentation. If you have any suggestions, corrections, or queries, please don't hesitate to reach out to us via GitHub issues. -------------------------------------------------------------------------------- /docs/en/community/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | --8<-- "CODE_OF_CONDUCT.md" -------------------------------------------------------------------------------- /docs/en/community/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | --8<-- "CONTRIBUTING.md" -------------------------------------------------------------------------------- /docs/en/community/LICENSE.md: -------------------------------------------------------------------------------- 1 | --8<-- "LICENSE" -------------------------------------------------------------------------------- /docs/en/community/overview.md: -------------------------------------------------------------------------------- 1 | # Community Overview 2 | 3 | Welcome to the project's community hub. Here, you'll find essential resources and guidelines that are crucial for contributing to and participating in the project. Please take the time to familiarize yourself with the following documents: 4 | 5 | ## Table of Contents 6 | - [Contributing](#contributing) 7 | - [Code of Conduct](#code-of-conduct) 8 | - [License](#license) 9 | 10 | --- 11 | 12 | ## Contributing 13 | [View the Contributing Guidelines](CONTRIBUTING.md) 14 | 15 | Interested in contributing to the project? Great! The contributing guidelines will provide you with all the information you need to get started. This includes how to submit issues, propose changes, and the process for submitting pull requests. 16 | 17 | --- 18 | 19 | ## Code of Conduct 20 | [View the Code of Conduct](CODE_OF_CONDUCT.md) 21 | 22 | The Code of Conduct outlines the standards and behaviors expected of our community members. It's crucial to ensure a welcoming and inclusive environment for everyone. Please take the time to read and adhere to these guidelines. 23 | 24 | --- 25 | 26 | ## License 27 | [View the License](LICENSE.md) 28 | 29 | The license document outlines the terms under which our project can be used, modified, and distributed. Understanding the licensing is important for both users and contributors of the project. 30 | 31 | --- 32 | 33 | Thank you for being a part of our community and for contributing to our project's success! -------------------------------------------------------------------------------- /docs/en/community/showcase_submission.md: -------------------------------------------------------------------------------- 1 | # Submit Your Project 2 | 3 | !!! tip "Share Your Work" 4 | Have you built something with ClientAI? We'd love to feature it in our showcase! 5 | 6 | ## Project Categories 7 | 8 | !!! example "What You Can Submit" 9 | - **Tutorials**: Step-by-step guides teaching others how to build with ClientAI 10 | - **Open Source Projects**: Libraries, tools, or applications others can use and learn from 11 | - **Applications**: Web apps, desktop tools, or services built with ClientAI 12 | - **Commercial Services**: Products or services powered by ClientAI 13 | 14 | ## How to Submit 15 | 16 | !!! info "Submission Steps" 17 | 1. Create a new issue using our [Showcase Submission Template](https://github.com/igorbenav/clientai/issues/new?assignees=&labels=showcase&projects=&template=showcase-submission.md&title=Showcase%3A+) 18 | 2. Fill in the relevant information for your project type 19 | 3. We'll review your submission and add it to the showcase! 20 | 21 | ## Requirements by Category 22 | 23 | !!! success "What We Look For" 24 | **For Tutorials:** 25 | 26 | - Clear step-by-step instructions 27 | - Working code examples 28 | - Explanation of concepts used 29 | 30 | **For Open Source Projects:** 31 | 32 | - Public repository 33 | - Basic documentation 34 | - Installation/usage instructions 35 | 36 | **For Applications/Services:** 37 | 38 | - Public demo or screenshots 39 | - Description of ClientAI features used 40 | - Link to live service (if applicable) 41 | 42 | ## Need Help? 43 | 44 | !!! question "Questions?" 45 | Need help with your submission? We're here to help! 46 | 47 | - Open a discussion on [GitHub](https://github.com/igorbenav/clientai/discussions) -------------------------------------------------------------------------------- /docs/en/examples/agent/simple_qa.md: -------------------------------------------------------------------------------- 1 | # Building a Simple Q&A Bot with ClientAI 2 | 3 | Let's build a straightforward Q&A bot using ClientAI's `create_agent` function. This approach gives us powerful features like context management and response streaming while keeping the code minimal and easy to understand. 4 | 5 | ## Setting Up 6 | 7 | Before we start coding, you'll need to install ClientAI with OpenAI support. Open your terminal and run: 8 | 9 | ```bash 10 | pip install clientai[openai] 11 | ``` 12 | 13 | You'll also need an OpenAI API key. Create a `.env` file in your project directory and add your key: 14 | 15 | ```plaintext 16 | OPENAI_API_KEY=your_openai_api_key_here 17 | ``` 18 | 19 | ## Creating the Bot 20 | 21 | Let's create our bot in a file called `qa_bot.py`. We'll break down each part of the code and understand what it does. 22 | 23 | First, let's import what we need: 24 | 25 | ```python 26 | from clientai import ClientAI 27 | from clientai.agent import create_agent 28 | from typing import Iterator, Union 29 | ``` 30 | 31 | Now let's write the function that creates our bot: 32 | 33 | ```python 34 | def create_bot(api_key: str = None): 35 | """Create a simple Q&A bot.""" 36 | # Initialize the AI client 37 | client = ClientAI('openai', api_key=api_key) 38 | 39 | # Create an agent with a helpful personality 40 | system_prompt = """ 41 | You are a friendly and helpful assistant. Your role is to: 42 | - Answer questions clearly and concisely 43 | - Maintain a conversational tone 44 | - Ask for clarification when needed 45 | """ 46 | 47 | return create_agent( 48 | client=client, 49 | role="assistant", 50 | system_prompt=system_prompt, 51 | model="gpt-4", # Or use "gpt-3.5-turbo" for a more economical option 52 | stream=True, # Enable real-time response streaming 53 | temperature=0.7 # Add some creativity to responses 54 | ) 55 | ``` 56 | 57 | The `create_bot` function does two important things. First, it sets up a connection to OpenAI through ClientAI. Then it creates an agent with a specific personality defined in the system prompt. The agent will use GPT-4 (though you can switch to GPT-3.5 to save costs), stream its responses in real-time, and use a moderate temperature setting to balance creativity and accuracy. 58 | 59 | Next, we need a way to display the bot's responses. Since we're using streaming, we need to handle both regular and streaming responses: 60 | 61 | ```python 62 | def display_response(response: Union[str, Iterator[str]]): 63 | """Display the bot's response, handling both streaming and non-streaming.""" 64 | if isinstance(response, str): 65 | print(response) 66 | else: 67 | for chunk in response: 68 | print(chunk, end="", flush=True) 69 | print() 70 | ``` 71 | 72 | This function checks whether it received a complete string or a stream of text chunks. For streams, it prints each chunk as it arrives, creating that nice "thinking in real-time" effect. 73 | 74 | Finally, let's create the main interaction loop: 75 | 76 | ```python 77 | def main(): 78 | # Create our bot 79 | bot = create_bot() 80 | 81 | print("Simple Q&A Bot (type 'quit' to exit, 'clear' to reset)") 82 | print("Watch the bot think in real-time!\n") 83 | 84 | while True: 85 | # Get user input 86 | question = input("\nYou: ").strip() 87 | 88 | # Handle commands 89 | if question.lower() == 'quit': 90 | break 91 | elif question.lower() == 'clear': 92 | bot.reset_context() 93 | print("Memory cleared!") 94 | continue 95 | 96 | # Get and display response 97 | print("\nBot: ", end="") 98 | response = bot.run(question) 99 | display_response(response) 100 | 101 | if __name__ == "__main__": 102 | main() 103 | ``` 104 | 105 | The main loop creates a simple command-line interface where users can ask questions, clear the conversation history, or quit the program. When a question is asked, it runs it through the agent and displays the response in real-time. 106 | 107 | ## Using Your Bot 108 | 109 | Running the bot is as simple as executing the Python file: 110 | 111 | ```bash 112 | python qa_bot.py 113 | ``` 114 | 115 | When you run it, you'll see a welcome message and a prompt for your first question. The bot will maintain context between questions, so you can have natural back-and-forth conversations. If you want to start fresh, just type 'clear'. 116 | 117 | ## Making It Your Own 118 | 119 | The bot is quite flexible and can be customized in several ways. Want a more creative bot? Increase the temperature to 0.9. Need more precise answers? Lower it to 0.2. You can even change the bot's personality by modifying the system prompt - make it funny, professional, or anything in between. 120 | 121 | If you're watching costs, switch to "gpt-3.5-turbo" instead of "gpt-4". And if you prefer instant complete responses rather than streaming, just set `stream=False` in the create_agent call. 122 | 123 | ## Taking It Further 124 | 125 | This simple bot can grow with your needs. You might want to add error handling for when the API has issues, or save conversations to files for later reference. You could even create a web interface or add support for different AI providers. The foundation we've built here makes all of these enhancements straightforward to add. -------------------------------------------------------------------------------- /docs/en/examples/overview.md: -------------------------------------------------------------------------------- 1 | # Examples Overview 2 | 3 | Welcome to the Examples section of the ClientAI documentation. We provide both complete example applications and core usage patterns to help you get started. 4 | 5 | ## Example Applications 6 | 7 | ### Client-Based Examples 8 | 1. [**Simple Q&A Bot**](client/simple_qa.md): Basic question-answering bot showing provider initialization, prompt handling, and core text generation/chat methods. 9 | 10 | 2. [**Multi-Provider Translator**](client/translator.md): Translation comparator demonstrating simultaneous usage of multiple providers, configurations, and response handling. 11 | 12 | 3. [**AI Dungeon Master**](client/ai_dungeon_master.md): Text-based RPG orchestrating multiple providers for game state management and dynamic narrative generation. 13 | 14 | ### Agent-Based Examples 15 | 1. [**Simple Q&A Bot**](agent/simple_qa.md): Q&A Bot implementation with Agent, introducing basic agent features. 16 | 17 | 2. [**Task Planner**](agent/task_planner.md): Basic agent that breaks down goals into steps, introducing create_agent and simple tool creation. 18 | 19 | 3. [**Writing Assistant**](agent/writing_assistant.md): Multi-step writing improvement agent showcasing workflow steps with think/act/synthesize, decorator configurations, and tool integration. 20 | 21 | 4. [**Code Analyzer**](agent/code_analyzer.md): Code analysis assistant showcasing custom workflows. 22 | 23 | ## Core Usage Patterns 24 | 25 | ### Working with Providers 26 | 27 | ```python 28 | from clientai import ClientAI 29 | 30 | # Initialize with your preferred provider 31 | client = ClientAI('openai', api_key="your-openai-key") 32 | # Or: ClientAI('groq', api_key="your-groq-key") 33 | # Or: ClientAI('replicate', api_key="your-replicate-key") 34 | # Or: ClientAI('ollama', host="your-ollama-host") 35 | 36 | # Basic text generation 37 | response = client.generate_text( 38 | "Tell me a joke", 39 | model="gpt-3.5-turbo", 40 | ) 41 | print(response) 42 | 43 | # Chat functionality 44 | messages = [ 45 | {"role": "user", "content": "What is the capital of France?"}, 46 | {"role": "assistant", "content": "Paris."}, 47 | {"role": "user", "content": "What is its population?"} 48 | ] 49 | 50 | response = client.chat( 51 | messages, 52 | model="gpt-3.5-turbo", 53 | ) 54 | print(response) 55 | ``` 56 | 57 | ### Working with Agents 58 | 59 | #### Quick-Start Agent 60 | ```python 61 | from clientai import client 62 | from clientai.agent import create_agent, tool 63 | 64 | @tool(name="add", description="Add two numbers together") 65 | def add(x: int, y: int) -> int: 66 | return x + y 67 | 68 | @tool(name="multiply") 69 | def multiply(x: int, y: int) -> int: 70 | """Multiply two numbers and return their product.""" 71 | return x * y 72 | 73 | # Create a simple calculator agent 74 | calculator = create_agent( 75 | client=client("groq", api_key="your-groq-key"), 76 | role="calculator", 77 | system_prompt="You are a helpful calculator assistant.", 78 | model="llama-3.2-3b-preview", 79 | tools=[add, multiply] 80 | ) 81 | 82 | result = calculator.run("What is 5 plus 3, then multiplied by 2?") 83 | print(result) 84 | ``` 85 | 86 | #### Custom Workflow Agent 87 | ```python 88 | from clientai import Agent, think, act, tool 89 | 90 | @tool(name="calculator") 91 | def calculate_average(numbers: list[float]) -> float: 92 | """Calculate the arithmetic mean of a list of numbers.""" 93 | return sum(numbers) / len(numbers) 94 | 95 | class DataAnalyzer(Agent): 96 | @think("analyze") 97 | def analyze_data(self, input_data: str) -> str: 98 | """Analyze sales data by calculating key metrics.""" 99 | return f""" 100 | Please analyze these sales figures: 101 | 102 | {input_data} 103 | 104 | Calculate the average using the calculator tool 105 | and identify the trend. 106 | """ 107 | 108 | @act 109 | def summarize(self, analysis: str) -> str: 110 | """Create a brief summary of the analysis.""" 111 | return """ 112 | Create a brief summary that includes: 113 | 1. The average sales figure 114 | 2. Whether sales are trending up or down 115 | 3. One key recommendation 116 | """ 117 | 118 | # Initialize with the tool 119 | analyzer = DataAnalyzer( 120 | client=client("replicate", api_key="your-replicate-key"), 121 | default_model="meta/meta-llama-3-70b-instruct", 122 | tool_confidence=0.8, 123 | tools=[calculate_average] 124 | ) 125 | 126 | result = analyzer.run("Monthly sales: [1000, 1200, 950, 1100]") 127 | print(result) 128 | ``` 129 | 130 | ## Best Practices 131 | 132 | 1. **Handle API Keys Securely**: Never hardcode API keys in your source code 133 | 2. **Use Type Hints**: Take advantage of ClientAI's type system for better IDE support 134 | 3. **Implement Error Handling**: Add appropriate try/catch blocks for API calls 135 | 4. **Monitor Usage**: Keep track of API calls and token usage across providers 136 | 137 | ## Contributing 138 | 139 | Have you built something interesting with ClientAI? We'd love to feature it! Check our [Contributing Guidelines](../community/CONTRIBUTING.md) for information on how to submit your examples. 140 | 141 | ## Next Steps 142 | 143 | - Explore the [Usage Guide](../usage/overview.md) for detailed documentation 144 | - Review the [API Reference](../api/overview.md) for complete API details 145 | - Join our community to share your experiences and get help -------------------------------------------------------------------------------- /docs/en/learn/overview.md: -------------------------------------------------------------------------------- 1 | # Learning Guide Overview 2 | 3 | Welcome to the ClientAI Learning Guide! This comprehensive resource is designed to teach you the fundamentals of Large Language Models (LLMs), AI agents, Retrieval-Augmented Generation (RAG), and Machine Learning Engineering using practical examples with ClientAI. 4 | 5 | ## 🚧 Under Construction 6 | 7 | This learning guide is currently under active development. While we're working hard to create comprehensive content, some sections may be incomplete or pending. We appreciate your patience and encourage you to check back regularly for updates. 8 | 9 | ## What You'll Learn 10 | 11 | This guide will take you through: 12 | 13 | - **LLM Fundamentals**: Understanding how large language models work, their capabilities, and how to effectively use them through ClientAI 14 | - **Agent Development**: Learning to build intelligent AI agents that can reason, use tools, and solve complex tasks 15 | - **Retrieval Systems**: Implementing RAG systems to give your applications access to custom knowledge 16 | - **Production Engineering**: Deploying and managing LLM applications in production environments 17 | 18 | !!! TIP 19 | **Looking for Quick Start Instructions?** 20 | This Learning Guide focuses on building a comprehensive understanding of theoretical concepts and practices, you might want to check our [Usage Guide](../usage/overview.md) to get up and running with ClientAI. 21 | 22 | ## Prerequisites 23 | 24 | To get the most out of this guide, you should have: 25 | 26 | - Basic Python programming knowledge 27 | - ClientAI installed in your environment 28 | - Basic understanding of machine learning concepts (helpful but not required) 29 | 30 | ## Complete Curriculum 31 | 32 | ### Fundamentals 33 | 34 | 1. #### LLM Basics `🚧 Coming Soon` 35 | 36 | - Learn the fundamentals of Large Language Models, including their architecture, capabilities, and key concepts through hands-on practice with ClientAI. 37 | 38 | 2. #### Prompt Engineering `🚧 Coming Soon` 39 | 40 | - Master effective prompt design techniques, from basic principles to advanced templates, with practical ClientAI implementation examples. 41 | 42 | ### Working with Agents 43 | 44 | 1. #### Agent Fundamentals `🚧 Coming Soon` 45 | 46 | - Understand AI agents and their architectures while building your first reasoning agents using ClientAI's framework. 47 | 48 | 2. #### Tools and Reasoning `🚧 Coming Soon` 49 | 50 | - Create sophisticated tool-using agents with automated selection and custom tools for complex problem-solving tasks. 51 | 52 | ### Retrieval and Knowledge 53 | 54 | 1. #### Introduction to RAG `🚧 Coming Soon` 55 | 56 | - Explore the basics of Retrieval-Augmented Generation and build your first RAG system with vector databases and embeddings. 57 | 58 | 2. #### Advanced RAG `🚧 Coming Soon` 59 | 60 | - Master advanced retrieval strategies and optimization techniques for building sophisticated document processing systems. 61 | 62 | ### MLOps and Engineering 63 | 64 | 1. #### Deployment Patterns `🚧 Coming Soon` 65 | 66 | - Learn essential deployment architectures and strategies for scaling LLM applications in production environments. 67 | 68 | 2. #### Best Practices `🚧 Coming Soon` 69 | 70 | - Master production-grade practices for testing, security, and optimization of LLM applications. 71 | 72 | ### Applied Projects 73 | 74 | 1. #### Building a Production Chatbot `🚧 Coming Soon` 75 | 76 | - Build a complete, production-ready chatbot from design to deployment with advanced context management. 77 | 78 | 2. #### Document Assistant `🚧 Coming Soon` 79 | 80 | - Create a comprehensive document Q&A system using RAG, with optimized retrieval and tool integration. 81 | 82 | ## How to Use This Guide 83 | 84 | The guide is structured progressively, building from fundamental concepts to advanced implementations. To better learn: 85 | 86 | 1. Read the theoretical explanations and see how they translate to code 87 | 2. Run the practical examples, experiment with them, and modify parameters 88 | 3. Consult the [Usage Guide](../usage/overview.md) or [API documentation](../api/overview.md) to understand features in depth 89 | 4. Review the [Examples](../examples/overview.md) for additional learning material 90 | 5. **Create your own projects!** Building and breaking things is the best way to learn 91 | 92 | 93 | ## Contributing 94 | 95 | This guide is open to community contributions! If you'd like to help improve or expand the learning materials: 96 | 97 | - Check our [Contributing Guidelines](../community/CONTRIBUTING.md) 98 | - Submit issues or pull requests for improvements 99 | - Share your feedback and suggestions with the community 100 | 101 | You'll soon be able to tart your learning journey by heading to the LLM Basics section! -------------------------------------------------------------------------------- /docs/en/usage/client/chat_functionality.md: -------------------------------------------------------------------------------- 1 | # Chat Functionality in ClientAI 2 | 3 | This guide covers how to leverage ClientAI's chat functionality. You'll learn about creating chat conversations, managing context, and handling chat-specific features across supported providers. 4 | 5 | ## Table of Contents 6 | 7 | 1. [Basic Chat Interaction](#basic-chat-interaction) 8 | 2. [Managing Conversation Context](#managing-conversation-context) 9 | 3. [Advanced Chat Features](#advanced-chat-features) 10 | 4. [Provider-Specific Chat Capabilities](#provider-specific-chat-capabilities) 11 | 5. [Best Practices](#best-practices) 12 | 13 | ## Basic Chat Interaction 14 | 15 | To use the chat functionality in ClientAI, use the `chat` method: 16 | 17 | ```python 18 | from clientai import ClientAI 19 | 20 | client = ClientAI('openai', api_key="your-openai-api-key") 21 | 22 | messages = [ 23 | {"role": "user", "content": "Hello, who are you?"} 24 | ] 25 | 26 | response = client.chat(messages, model="gpt-3.5-turbo") 27 | print(response) 28 | 29 | # Continue the conversation 30 | messages.append({"role": "assistant", "content": response}) 31 | messages.append({"role": "user", "content": "What can you help me with?"}) 32 | 33 | response = client.chat(messages, model="gpt-3.5-turbo") 34 | print(response) 35 | ``` 36 | 37 | This example demonstrates a simple back-and-forth conversation. 38 | 39 | ## Managing Conversation Context 40 | 41 | Effective context management is crucial for coherent conversations: 42 | 43 | ```python 44 | conversation = [ 45 | {"role": "system", "content": "You are a helpful assistant specializing in Python programming."}, 46 | {"role": "user", "content": "How do I use list comprehensions in Python?"} 47 | ] 48 | 49 | response = client.chat(conversation, model="gpt-3.5-turbo") 50 | print(response) 51 | 52 | conversation.append({"role": "assistant", "content": response}) 53 | conversation.append({"role": "user", "content": "Can you give an example?"}) 54 | 55 | response = client.chat(conversation, model="gpt-3.5-turbo") 56 | print(response) 57 | ``` 58 | 59 | This example shows how to maintain context across multiple exchanges, including a system message to set the assistant's role. 60 | 61 | ## Advanced Chat Features 62 | 63 | ### Streaming Chat Responses 64 | 65 | For real-time conversation, you can stream chat responses: 66 | 67 | ```python 68 | conversation = [ 69 | {"role": "user", "content": "Tell me a long story about space exploration"} 70 | ] 71 | 72 | for chunk in client.chat(conversation, model="gpt-3.5-turbo", stream=True): 73 | print(chunk, end="", flush=True) 74 | ``` 75 | 76 | ### Temperature and Top-p Sampling 77 | 78 | Adjust the creativity and randomness of responses: 79 | 80 | ```python 81 | response = client.chat( 82 | conversation, 83 | model="gpt-3.5-turbo", 84 | temperature=0.7, 85 | top_p=0.9 86 | ) 87 | ``` 88 | 89 | ## Provider-Specific Chat Capabilities 90 | 91 | Different providers may offer unique chat features: 92 | 93 | ### OpenAI 94 | 95 | ```python 96 | openai_client = ClientAI('openai', api_key="your-openai-api-key") 97 | 98 | response = openai_client.chat( 99 | [{"role": "user", "content": "Translate 'Hello, world!' to Japanese"}], 100 | model="gpt-4" 101 | ) 102 | ``` 103 | 104 | ### Replicate 105 | 106 | ```python 107 | replicate_client = ClientAI('replicate', api_key="your-replicate-api-key") 108 | 109 | response = replicate_client.chat( 110 | [{"role": "user", "content": "Explain quantum computing"}], 111 | model="meta/llama-2-70b-chat:latest" 112 | ) 113 | ``` 114 | 115 | ### Ollama 116 | 117 | ```python 118 | ollama_client = ClientAI('ollama', host="http://localhost:11434") 119 | 120 | response = ollama_client.chat( 121 | [{"role": "user", "content": "What are the three laws of robotics?"}], 122 | model="llama2" 123 | ) 124 | ``` 125 | 126 | ## Best Practices 127 | 128 | 1. **Context Management**: Keep track of the conversation history, but be mindful of token limits. 129 | 130 | ```python 131 | max_context_length = 10 132 | if len(conversation) > max_context_length: 133 | conversation = conversation[-max_context_length:] 134 | ``` 135 | 136 | 2. **Error Handling**: Implement robust error handling for chat interactions: 137 | 138 | ```python 139 | try: 140 | response = client.chat(conversation, model="gpt-3.5-turbo") 141 | except Exception as e: 142 | print(f"An error occurred during chat: {e}") 143 | response = "I'm sorry, I encountered an error. Could you please try again?" 144 | ``` 145 | 146 | 3. **User Input Validation**: Validate and sanitize user inputs to prevent potential issues: 147 | 148 | ```python 149 | def sanitize_input(user_input): 150 | # Implement appropriate sanitization logic 151 | return user_input.strip() 152 | 153 | user_message = sanitize_input(input("Your message: ")) 154 | conversation.append({"role": "user", "content": user_message}) 155 | ``` 156 | 157 | 4. **Graceful Fallbacks**: Implement fallback mechanisms for when the AI doesn't understand or can't provide a suitable response: 158 | 159 | ```python 160 | if not response or response.lower() == "i don't know": 161 | response = "I'm not sure about that. Could you please rephrase or ask something else?" 162 | ``` 163 | 164 | 5. **Model Selection**: Choose appropriate models based on the complexity of your chat application: 165 | 166 | ```python 167 | model = "gpt-4" if complex_conversation else "gpt-3.5-turbo" 168 | response = client.chat(conversation, model=model) 169 | ``` 170 | 171 | 6. **Conversation Resetting**: Provide options to reset or start new conversations: 172 | 173 | ```python 174 | def reset_conversation(): 175 | return [{"role": "system", "content": "You are a helpful assistant."}] 176 | 177 | # Usage 178 | conversation = reset_conversation() 179 | ``` 180 | 181 | By following these guidelines and exploring the various features available, you can create sophisticated chat applications using ClientAI across different AI providers. -------------------------------------------------------------------------------- /docs/en/usage/client/initialization.md: -------------------------------------------------------------------------------- 1 | # Initializing ClientAI 2 | 3 | This guide covers the process of initializing ClientAI with different AI providers. You'll learn how to set up ClientAI for use with OpenAI, Replicate, and Ollama. 4 | 5 | ## Table of Contents 6 | 7 | 1. [Prerequisites](#prerequisites) 8 | 2. [OpenAI Initialization](#openai-initialization) 9 | 3. [Replicate Initialization](#replicate-initialization) 10 | 4. [Ollama Initialization](#ollama-initialization) 11 | 5. [Multiple Provider Initialization](#multiple-provider-initialization) 12 | 6. [Best Practices](#best-practices) 13 | 14 | ## Prerequisites 15 | 16 | Before initializing ClientAI, ensure you have: 17 | 18 | 1. Installed ClientAI: `pip install clientai[all]` 19 | 2. Obtained necessary API keys for the providers you plan to use 20 | 3. Basic understanding of Python and asynchronous programming 21 | 22 | ## OpenAI Initialization 23 | 24 | To initialize ClientAI with OpenAI: 25 | 26 | ```python 27 | from clientai import ClientAI 28 | 29 | openai_client = ClientAI('openai', api_key="your-openai-api-key") 30 | ``` 31 | 32 | Replace `"your-openai-api-key"` with your actual OpenAI API key. 33 | 34 | ## Replicate Initialization 35 | 36 | To initialize ClientAI with Replicate: 37 | 38 | ```python 39 | from clientai import ClientAI 40 | 41 | replicate_client = ClientAI('replicate', api_key="your-replicate-api-key") 42 | ``` 43 | 44 | Replace `"your-replicate-api-key"` with your actual Replicate API key. 45 | 46 | ## Ollama Initialization 47 | 48 | To initialize ClientAI with Ollama: 49 | 50 | ```python 51 | from clientai import ClientAI 52 | 53 | ollama_client = ClientAI('ollama', host="http://localhost:11434") 54 | ``` 55 | 56 | Ensure that you have Ollama running locally on the specified host. 57 | 58 | ## Groq Initialization 59 | 60 | To initialize ClientAI with Groq: 61 | 62 | ```python 63 | from clientai import ClientAI 64 | 65 | replicate_client = ClientAI('groq', api_key="your-groq-api-key") 66 | ``` 67 | 68 | ## Multiple Provider Initialization 69 | 70 | You can initialize multiple providers in the same script: 71 | 72 | ```python 73 | from clientai import ClientAI 74 | 75 | openai_client = ClientAI('openai', api_key="your-openai-api-key") 76 | replicate_client = ClientAI('replicate', api_key="your-replicate-api-key") 77 | groq_client = ClientAI('groq', api_key="your-groq-api-key") 78 | ollama_client = ClientAI('ollama', host="http://localhost:11434") 79 | ``` 80 | 81 | ## Best Practices 82 | 83 | 1. **Environment Variables**: Store API keys in environment variables instead of hardcoding them in your script: 84 | 85 | ```python 86 | import os 87 | from clientai import ClientAI 88 | 89 | openai_client = ClientAI('openai', api_key=os.getenv('OPENAI_API_KEY')) 90 | ``` 91 | 92 | 2. **Error Handling**: Wrap initialization in a try-except block to handle potential errors: 93 | 94 | ```python 95 | try: 96 | client = ClientAI('openai', api_key="your-openai-api-key") 97 | except ValueError as e: 98 | print(f"Error initializing ClientAI: {e}") 99 | ``` 100 | 101 | 3. **Configuration Files**: For projects with multiple providers, consider using a configuration file: 102 | 103 | ```python 104 | import json 105 | from clientai import ClientAI 106 | 107 | with open('config.json') as f: 108 | config = json.load(f) 109 | 110 | openai_client = ClientAI('openai', **config['openai']) 111 | replicate_client = ClientAI('replicate', **config['replicate']) 112 | ``` 113 | 114 | 4. **Lazy Initialization**: If you're not sure which provider you'll use, initialize clients only when needed: 115 | 116 | ```python 117 | def get_client(provider): 118 | if provider == 'openai': 119 | return ClientAI('openai', api_key="your-openai-api-key") 120 | elif provider == 'replicate': 121 | return ClientAI('replicate', api_key="your-replicate-api-key") 122 | # ... other providers ... 123 | 124 | # Use the client when needed 125 | client = get_client('openai') 126 | ``` 127 | 128 | By following these initialization guidelines, you'll be well-prepared to start using ClientAI with various AI providers in your projects. -------------------------------------------------------------------------------- /docs/en/usage/client/text_generation.md: -------------------------------------------------------------------------------- 1 | # Text Generation with ClientAI 2 | 3 | This guide explores how to use ClientAI for text generation tasks across different AI providers. You'll learn about the various options and parameters available for generating text. 4 | 5 | ## Table of Contents 6 | 7 | 1. [Basic Text Generation](#basic-text-generation) 8 | 2. [Advanced Parameters](#advanced-parameters) 9 | 3. [Streaming Responses](#streaming-responses) 10 | 4. [Provider-Specific Features](#provider-specific-features) 11 | 5. [Best Practices](#best-practices) 12 | 13 | ## Basic Text Generation 14 | 15 | To generate text using ClientAI, use the `generate_text` method: 16 | 17 | ```python 18 | from clientai import ClientAI 19 | 20 | client = ClientAI('openai', api_key="your-openai-api-key") 21 | 22 | response = client.generate_text( 23 | "Write a short story about a robot learning to paint.", 24 | model="gpt-3.5-turbo" 25 | ) 26 | 27 | print(response) 28 | ``` 29 | 30 | This will generate a short story based on the given prompt. 31 | 32 | ## Advanced Parameters 33 | 34 | ClientAI supports various parameters to fine-tune text generation: 35 | 36 | ```python 37 | response = client.generate_text( 38 | "Explain the theory of relativity", 39 | model="gpt-4", 40 | max_tokens=150, 41 | temperature=0.7, 42 | top_p=0.9, 43 | presence_penalty=0.1, 44 | frequency_penalty=0.1 45 | ) 46 | ``` 47 | 48 | - `max_tokens`: Maximum number of tokens to generate 49 | - `temperature`: Controls randomness (0.0 to 1.0) 50 | - `top_p`: Nucleus sampling parameter 51 | - `presence_penalty`: Penalizes new tokens based on their presence in the text so far 52 | - `frequency_penalty`: Penalizes new tokens based on their frequency in the text so far 53 | 54 | Note: Available parameters may vary depending on the provider. 55 | 56 | ## Streaming Responses 57 | 58 | For long-form content, you can use streaming to get partial responses as they're generated: 59 | 60 | ```python 61 | for chunk in client.generate_text( 62 | "Write a comprehensive essay on climate change", 63 | model="gpt-3.5-turbo", 64 | stream=True 65 | ): 66 | print(chunk, end="", flush=True) 67 | ``` 68 | 69 | This allows for real-time display of generated text, which can be useful for user interfaces or long-running generations. 70 | 71 | ## Provider-Specific Features 72 | 73 | Different providers may offer unique features. Here are some examples: 74 | 75 | ### OpenAI 76 | 77 | ```python 78 | response = openai_client.generate_text( 79 | "Translate the following to French: 'Hello, how are you?'", 80 | model="gpt-3.5-turbo" 81 | ) 82 | ``` 83 | 84 | ### Replicate 85 | 86 | ```python 87 | response = replicate_client.generate_text( 88 | "Generate a haiku about mountains", 89 | model="meta/llama-2-70b-chat:latest" 90 | ) 91 | ``` 92 | 93 | ### Ollama 94 | 95 | ```python 96 | response = ollama_client.generate_text( 97 | "Explain the concept of neural networks", 98 | model="llama2" 99 | ) 100 | ``` 101 | 102 | ## Best Practices 103 | 104 | 1. **Prompt Engineering**: Craft clear and specific prompts for better results. 105 | 106 | ```python 107 | good_prompt = "Write a detailed description of a futuristic city, focusing on transportation and architecture." 108 | ``` 109 | 110 | 2. **Model Selection**: Choose appropriate models based on your task complexity and requirements. 111 | 112 | 3. **Error Handling**: Always handle potential errors in text generation: 113 | 114 | ```python 115 | try: 116 | response = client.generate_text("Your prompt here", model="gpt-3.5-turbo") 117 | except Exception as e: 118 | print(f"An error occurred: {e}") 119 | ``` 120 | 121 | 4. **Rate Limiting**: Be mindful of rate limits imposed by providers. Implement appropriate delays or queuing mechanisms for high-volume applications. 122 | 123 | 5. **Content Filtering**: Implement content filtering or moderation for user-facing applications to ensure appropriate outputs. 124 | 125 | 6. **Consistency**: For applications requiring consistent outputs, consider using lower temperature values or implementing your own post-processing. 126 | 127 | By following these guidelines and exploring the various parameters and features available, you can effectively leverage ClientAI for a wide range of text generation tasks across different AI providers. -------------------------------------------------------------------------------- /docs/en/usage/overview.md: -------------------------------------------------------------------------------- 1 | # Usage Overview 2 | 3 | This Usage section provides comprehensive guides on how to effectively use ClientAI's two main components: the Client for direct AI provider interactions and the Agent for building autonomous AI workflows. Each topic focuses on specific aspects, ensuring you have all the information needed to leverage the full potential of ClientAI in your projects. 4 | 5 | ## Client Features 6 | 7 | Learn how to initialize and use ClientAI with different AI providers. These guides cover the fundamentals of direct AI interaction: 8 | 9 | - [Initialization Guide](client/initialization.md) 10 | - [Text Generation Guide](client/text_generation.md) 11 | - [Chat Functionality Guide](client/chat_functionality.md) 12 | - [Multiple Providers Guide](client/multiple_providers.md) 13 | - [Ollama Manager Guide](ollama_manager.md) 14 | 15 | ## Agent Features 16 | 17 | Discover how to create and customize AI agents for autonomous workflows: 18 | 19 | - [Creating Agents Guide](agent/creating_agents.md) 20 | - [Workflow Steps Guide](agent/workflow_steps.md) 21 | - [Tools and Tool Selection](agent/tools.md) 22 | - [Context Management](agent/context.md) 23 | - [Validation Guide](agent/validation.md) 24 | 25 | ## Getting Started 26 | 27 | ### Quick Start with Client 28 | 29 | Here's a simple example using the basic client for direct AI interaction: 30 | 31 | ```python 32 | from clientai import ClientAI 33 | 34 | # Initialize the client 35 | client = ClientAI('openai', api_key="your-openai-api-key") 36 | 37 | # Generate text 38 | response = client.generate_text( 39 | "Explain the concept of machine learning in simple terms.", 40 | model="gpt-3.5-turbo" 41 | ) 42 | 43 | print(response) 44 | ``` 45 | 46 | ### Quick Start with Agent 47 | 48 | Here's how to create a simple agent with tools: 49 | 50 | ```python 51 | from clientai import ClientAI, create_agent 52 | 53 | client = ClientAI('openai', api_key="your-openai-api-key") 54 | 55 | # Create a calculator tool 56 | @tool(name="Calculator", description="Performs basic math operations") 57 | def calculate(x: int, y: int) -> int: 58 | return x + y 59 | 60 | # Create an agent with the calculator tool 61 | agent = create_agent( 62 | client=client, 63 | role="math_helper", 64 | system_prompt="You are a helpful math assistant.", 65 | model="gpt-4", 66 | tools=[calculate] 67 | ) 68 | 69 | # Run the agent 70 | result = agent.run("What is 5 plus 3?") 71 | print(result) 72 | ``` 73 | 74 | ### Quick Start with Validation 75 | 76 | Here's how to create an agent with validated outputs: 77 | 78 | ```python 79 | from pydantic import BaseModel, Field 80 | from typing import List 81 | 82 | class Analysis(BaseModel): 83 | summary: str = Field(min_length=10) 84 | sentiment: str = Field(pattern="^(positive|negative|neutral)$") 85 | key_points: List[str] = Field(min_items=1) 86 | 87 | class AnalysisAgent(Agent): 88 | @think( 89 | name="analyze", 90 | json_output=True, 91 | return_type=Analysis 92 | ) 93 | def analyze_text(self, text: str) -> Analysis: 94 | return """ 95 | Analyze this text and return: 96 | - summary (10+ chars) 97 | - sentiment (positive/negative/neutral) 98 | - key_points (non-empty list) 99 | 100 | Text: {text} 101 | """ 102 | 103 | agent = AnalysisAgent(client=client, default_model="gpt-4") 104 | result = agent.run("Great product, highly recommend!") 105 | print(f"Sentiment: {result.sentiment}") 106 | print(f"Key points: {result.key_points}") 107 | ``` 108 | 109 | ## Advanced Usage 110 | 111 | ### Streaming with Client 112 | 113 | The client supports streaming responses: 114 | 115 | ```python 116 | for chunk in client.generate_text( 117 | "Tell me a story about space exploration", 118 | model="gpt-3.5-turbo", 119 | stream=True 120 | ): 121 | print(chunk, end="", flush=True) 122 | ``` 123 | 124 | ### Multi-Step Agent Workflows 125 | 126 | Create agents with multiple processing steps: 127 | 128 | ```python 129 | class AnalysisAgent(Agent): 130 | @think("analyze") 131 | def analyze_data(self, input_data: str) -> str: 132 | return f"Analyze this data: {input_data}" 133 | 134 | @act("process") 135 | def process_results(self, analysis: str) -> str: 136 | return f"Based on the analysis: {analysis}" 137 | 138 | agent = AnalysisAgent( 139 | client=client, 140 | default_model="gpt-4", 141 | tool_confidence=0.8 142 | ) 143 | ``` 144 | 145 | ## Best Practices 146 | 147 | ### Client Best Practices 148 | 149 | 1. **API Key Management**: Store API keys securely as environment variables 150 | 2. **Error Handling**: Implement proper error handling for API failures 151 | 3. **Model Selection**: Choose models based on task requirements and budget 152 | 4. **Context Management**: Manage conversation context efficiently 153 | 154 | ### Agent Best Practices 155 | 156 | 1. **Validation**: Use appropriate validation levels: 157 | - Plain text for simple responses 158 | - JSON output for basic structure 159 | - Pydantic models for strict validation 160 | 2. **Error Handling**: Always wrap validated calls in try/except blocks 161 | 3. **Tools**: Choose tool confidence thresholds based on task criticality 162 | 4. **Context**: Use context to share state between steps effectively 163 | 164 | ## Contribution 165 | 166 | If you have suggestions or contributions to these guides, please refer to our [Contributing Guidelines](../community/CONTRIBUTING.md). We appreciate your input in improving our documentation and making ClientAI more accessible to all users. -------------------------------------------------------------------------------- /docs/pt/showcase.md: -------------------------------------------------------------------------------- 1 | # Showcase 2 | 3 | !!! example "**Categories**" 4 | Browse by type: [Applications](#applications) · [Tutorials](#tutorials) 5 | 6 | ## Applications 7 | 8 | !!! tip "Be the First!" 9 | No applications yet. Have you built something with ClientAI? We'd love to feature it here! 10 | 11 | **[Submit Your Project](community/showcase_submission.md)** 12 | 13 | ## Tutorials 14 | 15 | !!! note "Simple Q&A Bot" 16 | **By ClientAI Team** · [View Tutorial](examples/client/simple_qa.md) 17 | 18 | Learn the basics of ClientAI by building a Q&A bot with chat functionality, context management, and real-time streaming responses. 19 | 20 | `OpenAI` `Chat` `Beginner` 21 | 22 | !!! note "Multi-Provider Translator" 23 | **By ClientAI Team** · [View Tutorial](examples/client/translator.md) 24 | 25 | Build a translator that compares outputs from different AI providers, with performance metrics and parallel processing. 26 | 27 | `OpenAI` `Groq` `Replicate` `Intermediate` 28 | 29 | !!! note "AI Dungeon Master" 30 | **By ClientAI Team** · [View Tutorial](examples/client/ai_dungeon_master.md) 31 | 32 | Create an AI-powered text adventure game using multiple providers, with dynamic storytelling and game state management. 33 | 34 | `OpenAI` `Replicate` `Ollama` `Advanced` 35 | 36 | !!! note "Simple Q&A Bot (Agent)" 37 | **By ClientAI Team** · [View Tutorial](examples/agent/simple_qa.md) 38 | 39 | Build a Q&A bot using ClientAI's agent framework, demonstrating core agent features and context management. 40 | 41 | `OpenAI` `Agent` `Beginner` 42 | 43 | !!! note "Task Planner" 44 | **By ClientAI Team** · [View Tutorial](examples/agent/task_planner.md) 45 | 46 | Develop a local task planning system that breaks down goals into actionable steps with realistic timelines. 47 | 48 | `Ollama` `Planning` `Intermediate` 49 | 50 | !!! note "Writing Assistant" 51 | **By ClientAI Team** · [View Tutorial](examples/agent/writing_assistant.md) 52 | 53 | Create a sophisticated writing assistant that analyzes text, suggests improvements, and rewrites content while maintaining context. 54 | 55 | `Groq` `Writing` `Intermediate` 56 | 57 | !!! note "Code Analyzer" 58 | **By ClientAI Team** · [View Tutorial](examples/agent/code_analyzer.md) 59 | 60 | Build a code analysis assistant that examines code structure, identifies potential issues, and suggests improvements. 61 | 62 | `Ollama` `Development` `Advanced` 63 | 64 | --- 65 | 66 | !!! question "Add Your Project" 67 | Built something with ClientAI? We'd love to showcase it! 68 | 69 | **[Submit Your Project](community/showcase_submission.md)** -------------------------------------------------------------------------------- /docs/pt/usage/client/chat_functionality.md: -------------------------------------------------------------------------------- 1 | # Chat Functionality in ClientAI 2 | 3 | This guide covers how to leverage ClientAI's chat functionality. You'll learn about creating chat conversations, managing context, and handling chat-specific features across supported providers. 4 | 5 | ## Table of Contents 6 | 7 | 1. [Basic Chat Interaction](#basic-chat-interaction) 8 | 2. [Managing Conversation Context](#managing-conversation-context) 9 | 3. [Advanced Chat Features](#advanced-chat-features) 10 | 4. [Provider-Specific Chat Capabilities](#provider-specific-chat-capabilities) 11 | 5. [Best Practices](#best-practices) 12 | 13 | ## Basic Chat Interaction 14 | 15 | To use the chat functionality in ClientAI, use the `chat` method: 16 | 17 | ```python 18 | from clientai import ClientAI 19 | 20 | client = ClientAI('openai', api_key="your-openai-api-key") 21 | 22 | messages = [ 23 | {"role": "user", "content": "Hello, who are you?"} 24 | ] 25 | 26 | response = client.chat(messages, model="gpt-3.5-turbo") 27 | print(response) 28 | 29 | # Continue the conversation 30 | messages.append({"role": "assistant", "content": response}) 31 | messages.append({"role": "user", "content": "What can you help me with?"}) 32 | 33 | response = client.chat(messages, model="gpt-3.5-turbo") 34 | print(response) 35 | ``` 36 | 37 | This example demonstrates a simple back-and-forth conversation. 38 | 39 | ## Managing Conversation Context 40 | 41 | Effective context management is crucial for coherent conversations: 42 | 43 | ```python 44 | conversation = [ 45 | {"role": "system", "content": "You are a helpful assistant specializing in Python programming."}, 46 | {"role": "user", "content": "How do I use list comprehensions in Python?"} 47 | ] 48 | 49 | response = client.chat(conversation, model="gpt-3.5-turbo") 50 | print(response) 51 | 52 | conversation.append({"role": "assistant", "content": response}) 53 | conversation.append({"role": "user", "content": "Can you give an example?"}) 54 | 55 | response = client.chat(conversation, model="gpt-3.5-turbo") 56 | print(response) 57 | ``` 58 | 59 | This example shows how to maintain context across multiple exchanges, including a system message to set the assistant's role. 60 | 61 | ## Advanced Chat Features 62 | 63 | ### Streaming Chat Responses 64 | 65 | For real-time conversation, you can stream chat responses: 66 | 67 | ```python 68 | conversation = [ 69 | {"role": "user", "content": "Tell me a long story about space exploration"} 70 | ] 71 | 72 | for chunk in client.chat(conversation, model="gpt-3.5-turbo", stream=True): 73 | print(chunk, end="", flush=True) 74 | ``` 75 | 76 | ### Temperature and Top-p Sampling 77 | 78 | Adjust the creativity and randomness of responses: 79 | 80 | ```python 81 | response = client.chat( 82 | conversation, 83 | model="gpt-3.5-turbo", 84 | temperature=0.7, 85 | top_p=0.9 86 | ) 87 | ``` 88 | 89 | ## Provider-Specific Chat Capabilities 90 | 91 | Different providers may offer unique chat features: 92 | 93 | ### OpenAI 94 | 95 | ```python 96 | openai_client = ClientAI('openai', api_key="your-openai-api-key") 97 | 98 | response = openai_client.chat( 99 | [{"role": "user", "content": "Translate 'Hello, world!' to Japanese"}], 100 | model="gpt-4" 101 | ) 102 | ``` 103 | 104 | ### Replicate 105 | 106 | ```python 107 | replicate_client = ClientAI('replicate', api_key="your-replicate-api-key") 108 | 109 | response = replicate_client.chat( 110 | [{"role": "user", "content": "Explain quantum computing"}], 111 | model="meta/llama-2-70b-chat:latest" 112 | ) 113 | ``` 114 | 115 | ### Ollama 116 | 117 | ```python 118 | ollama_client = ClientAI('ollama', host="http://localhost:11434") 119 | 120 | response = ollama_client.chat( 121 | [{"role": "user", "content": "What are the three laws of robotics?"}], 122 | model="llama2" 123 | ) 124 | ``` 125 | 126 | ## Best Practices 127 | 128 | 1. **Context Management**: Keep track of the conversation history, but be mindful of token limits. 129 | 130 | ```python 131 | max_context_length = 10 132 | if len(conversation) > max_context_length: 133 | conversation = conversation[-max_context_length:] 134 | ``` 135 | 136 | 2. **Error Handling**: Implement robust error handling for chat interactions: 137 | 138 | ```python 139 | try: 140 | response = client.chat(conversation, model="gpt-3.5-turbo") 141 | except Exception as e: 142 | print(f"An error occurred during chat: {e}") 143 | response = "I'm sorry, I encountered an error. Could you please try again?" 144 | ``` 145 | 146 | 3. **User Input Validation**: Validate and sanitize user inputs to prevent potential issues: 147 | 148 | ```python 149 | def sanitize_input(user_input): 150 | # Implement appropriate sanitization logic 151 | return user_input.strip() 152 | 153 | user_message = sanitize_input(input("Your message: ")) 154 | conversation.append({"role": "user", "content": user_message}) 155 | ``` 156 | 157 | 4. **Graceful Fallbacks**: Implement fallback mechanisms for when the AI doesn't understand or can't provide a suitable response: 158 | 159 | ```python 160 | if not response or response.lower() == "i don't know": 161 | response = "I'm not sure about that. Could you please rephrase or ask something else?" 162 | ``` 163 | 164 | 5. **Model Selection**: Choose appropriate models based on the complexity of your chat application: 165 | 166 | ```python 167 | model = "gpt-4" if complex_conversation else "gpt-3.5-turbo" 168 | response = client.chat(conversation, model=model) 169 | ``` 170 | 171 | 6. **Conversation Resetting**: Provide options to reset or start new conversations: 172 | 173 | ```python 174 | def reset_conversation(): 175 | return [{"role": "system", "content": "You are a helpful assistant."}] 176 | 177 | # Usage 178 | conversation = reset_conversation() 179 | ``` 180 | 181 | By following these guidelines and exploring the various features available, you can create sophisticated chat applications using ClientAI across different AI providers. -------------------------------------------------------------------------------- /docs/pt/usage/client/initialization.md: -------------------------------------------------------------------------------- 1 | # Initializing ClientAI 2 | 3 | This guide covers the process of initializing ClientAI with different AI providers. You'll learn how to set up ClientAI for use with OpenAI, Replicate, and Ollama. 4 | 5 | ## Table of Contents 6 | 7 | 1. [Prerequisites](#prerequisites) 8 | 2. [OpenAI Initialization](#openai-initialization) 9 | 3. [Replicate Initialization](#replicate-initialization) 10 | 4. [Ollama Initialization](#ollama-initialization) 11 | 5. [Multiple Provider Initialization](#multiple-provider-initialization) 12 | 6. [Best Practices](#best-practices) 13 | 14 | ## Prerequisites 15 | 16 | Before initializing ClientAI, ensure you have: 17 | 18 | 1. Installed ClientAI: `pip install clientai[all]` 19 | 2. Obtained necessary API keys for the providers you plan to use 20 | 3. Basic understanding of Python and asynchronous programming 21 | 22 | ## OpenAI Initialization 23 | 24 | To initialize ClientAI with OpenAI: 25 | 26 | ```python 27 | from clientai import ClientAI 28 | 29 | openai_client = ClientAI('openai', api_key="your-openai-api-key") 30 | ``` 31 | 32 | Replace `"your-openai-api-key"` with your actual OpenAI API key. 33 | 34 | ## Replicate Initialization 35 | 36 | To initialize ClientAI with Replicate: 37 | 38 | ```python 39 | from clientai import ClientAI 40 | 41 | replicate_client = ClientAI('replicate', api_key="your-replicate-api-key") 42 | ``` 43 | 44 | Replace `"your-replicate-api-key"` with your actual Replicate API key. 45 | 46 | ## Ollama Initialization 47 | 48 | To initialize ClientAI with Ollama: 49 | 50 | ```python 51 | from clientai import ClientAI 52 | 53 | ollama_client = ClientAI('ollama', host="http://localhost:11434") 54 | ``` 55 | 56 | Ensure that you have Ollama running locally on the specified host. 57 | 58 | ## Groq Initialization 59 | 60 | To initialize ClientAI with Groq: 61 | 62 | ```python 63 | from clientai import ClientAI 64 | 65 | replicate_client = ClientAI('groq', api_key="your-groq-api-key") 66 | ``` 67 | 68 | ## Multiple Provider Initialization 69 | 70 | You can initialize multiple providers in the same script: 71 | 72 | ```python 73 | from clientai import ClientAI 74 | 75 | openai_client = ClientAI('openai', api_key="your-openai-api-key") 76 | replicate_client = ClientAI('replicate', api_key="your-replicate-api-key") 77 | groq_client = ClientAI('groq', api_key="your-groq-api-key") 78 | ollama_client = ClientAI('ollama', host="http://localhost:11434") 79 | ``` 80 | 81 | ## Best Practices 82 | 83 | 1. **Environment Variables**: Store API keys in environment variables instead of hardcoding them in your script: 84 | 85 | ```python 86 | import os 87 | from clientai import ClientAI 88 | 89 | openai_client = ClientAI('openai', api_key=os.getenv('OPENAI_API_KEY')) 90 | ``` 91 | 92 | 2. **Error Handling**: Wrap initialization in a try-except block to handle potential errors: 93 | 94 | ```python 95 | try: 96 | client = ClientAI('openai', api_key="your-openai-api-key") 97 | except ValueError as e: 98 | print(f"Error initializing ClientAI: {e}") 99 | ``` 100 | 101 | 3. **Configuration Files**: For projects with multiple providers, consider using a configuration file: 102 | 103 | ```python 104 | import json 105 | from clientai import ClientAI 106 | 107 | with open('config.json') as f: 108 | config = json.load(f) 109 | 110 | openai_client = ClientAI('openai', **config['openai']) 111 | replicate_client = ClientAI('replicate', **config['replicate']) 112 | ``` 113 | 114 | 4. **Lazy Initialization**: If you're not sure which provider you'll use, initialize clients only when needed: 115 | 116 | ```python 117 | def get_client(provider): 118 | if provider == 'openai': 119 | return ClientAI('openai', api_key="your-openai-api-key") 120 | elif provider == 'replicate': 121 | return ClientAI('replicate', api_key="your-replicate-api-key") 122 | # ... other providers ... 123 | 124 | # Use the client when needed 125 | client = get_client('openai') 126 | ``` 127 | 128 | By following these initialization guidelines, you'll be well-prepared to start using ClientAI with various AI providers in your projects. -------------------------------------------------------------------------------- /docs/pt/usage/client/text_generation.md: -------------------------------------------------------------------------------- 1 | # Text Generation with ClientAI 2 | 3 | This guide explores how to use ClientAI for text generation tasks across different AI providers. You'll learn about the various options and parameters available for generating text. 4 | 5 | ## Table of Contents 6 | 7 | 1. [Basic Text Generation](#basic-text-generation) 8 | 2. [Advanced Parameters](#advanced-parameters) 9 | 3. [Streaming Responses](#streaming-responses) 10 | 4. [Provider-Specific Features](#provider-specific-features) 11 | 5. [Best Practices](#best-practices) 12 | 13 | ## Basic Text Generation 14 | 15 | To generate text using ClientAI, use the `generate_text` method: 16 | 17 | ```python 18 | from clientai import ClientAI 19 | 20 | client = ClientAI('openai', api_key="your-openai-api-key") 21 | 22 | response = client.generate_text( 23 | "Write a short story about a robot learning to paint.", 24 | model="gpt-3.5-turbo" 25 | ) 26 | 27 | print(response) 28 | ``` 29 | 30 | This will generate a short story based on the given prompt. 31 | 32 | ## Advanced Parameters 33 | 34 | ClientAI supports various parameters to fine-tune text generation: 35 | 36 | ```python 37 | response = client.generate_text( 38 | "Explain the theory of relativity", 39 | model="gpt-4", 40 | max_tokens=150, 41 | temperature=0.7, 42 | top_p=0.9, 43 | presence_penalty=0.1, 44 | frequency_penalty=0.1 45 | ) 46 | ``` 47 | 48 | - `max_tokens`: Maximum number of tokens to generate 49 | - `temperature`: Controls randomness (0.0 to 1.0) 50 | - `top_p`: Nucleus sampling parameter 51 | - `presence_penalty`: Penalizes new tokens based on their presence in the text so far 52 | - `frequency_penalty`: Penalizes new tokens based on their frequency in the text so far 53 | 54 | Note: Available parameters may vary depending on the provider. 55 | 56 | ## Streaming Responses 57 | 58 | For long-form content, you can use streaming to get partial responses as they're generated: 59 | 60 | ```python 61 | for chunk in client.generate_text( 62 | "Write a comprehensive essay on climate change", 63 | model="gpt-3.5-turbo", 64 | stream=True 65 | ): 66 | print(chunk, end="", flush=True) 67 | ``` 68 | 69 | This allows for real-time display of generated text, which can be useful for user interfaces or long-running generations. 70 | 71 | ## Provider-Specific Features 72 | 73 | Different providers may offer unique features. Here are some examples: 74 | 75 | ### OpenAI 76 | 77 | ```python 78 | response = openai_client.generate_text( 79 | "Translate the following to French: 'Hello, how are you?'", 80 | model="gpt-3.5-turbo" 81 | ) 82 | ``` 83 | 84 | ### Replicate 85 | 86 | ```python 87 | response = replicate_client.generate_text( 88 | "Generate a haiku about mountains", 89 | model="meta/llama-2-70b-chat:latest" 90 | ) 91 | ``` 92 | 93 | ### Ollama 94 | 95 | ```python 96 | response = ollama_client.generate_text( 97 | "Explain the concept of neural networks", 98 | model="llama2" 99 | ) 100 | ``` 101 | 102 | ## Best Practices 103 | 104 | 1. **Prompt Engineering**: Craft clear and specific prompts for better results. 105 | 106 | ```python 107 | good_prompt = "Write a detailed description of a futuristic city, focusing on transportation and architecture." 108 | ``` 109 | 110 | 2. **Model Selection**: Choose appropriate models based on your task complexity and requirements. 111 | 112 | 3. **Error Handling**: Always handle potential errors in text generation: 113 | 114 | ```python 115 | try: 116 | response = client.generate_text("Your prompt here", model="gpt-3.5-turbo") 117 | except Exception as e: 118 | print(f"An error occurred: {e}") 119 | ``` 120 | 121 | 4. **Rate Limiting**: Be mindful of rate limits imposed by providers. Implement appropriate delays or queuing mechanisms for high-volume applications. 122 | 123 | 5. **Content Filtering**: Implement content filtering or moderation for user-facing applications to ensure appropriate outputs. 124 | 125 | 6. **Consistency**: For applications requiring consistent outputs, consider using lower temperature values or implementing your own post-processing. 126 | 127 | By following these guidelines and exploring the various parameters and features available, you can effectively leverage ClientAI for a wide range of text generation tasks across different AI providers. -------------------------------------------------------------------------------- /docs/pt/usage/overview.md: -------------------------------------------------------------------------------- 1 | # Visão Geral 2 | 3 | This Usage section provides comprehensive guides on how to effectively use ClientAI's two main components: the Client for direct AI provider interactions and the Agent for building autonomous AI workflows. Each topic focuses on specific aspects, ensuring you have all the information needed to leverage the full potential of ClientAI in your projects. 4 | 5 | ## Client Features 6 | 7 | Learn how to initialize and use ClientAI with different AI providers. These guides cover the fundamentals of direct AI interaction: 8 | 9 | - [Initialization Guide](client/initialization.md) 10 | - [Text Generation Guide](client/text_generation.md) 11 | - [Chat Functionality Guide](client/chat_functionality.md) 12 | - [Multiple Providers Guide](client/multiple_providers.md) 13 | - [Ollama Manager Guide](ollama_manager.md) 14 | 15 | ## Agent Features 16 | 17 | Discover how to create and customize AI agents for autonomous workflows: 18 | 19 | - [Creating Agents Guide](agent/creating_agents.md) 20 | - [Workflow Steps Guide](agent/workflow_steps.md) 21 | - [Tools and Tool Selection](agent/tools.md) 22 | - [Context Management](agent/context.md) 23 | - [Validation Guide](agent/validation.md) 24 | 25 | ## Getting Started 26 | 27 | ### Quick Start with Client 28 | 29 | Here's a simple example using the basic client for direct AI interaction: 30 | 31 | ```python 32 | from clientai import ClientAI 33 | 34 | # Initialize the client 35 | client = ClientAI('openai', api_key="your-openai-api-key") 36 | 37 | # Generate text 38 | response = client.generate_text( 39 | "Explain the concept of machine learning in simple terms.", 40 | model="gpt-3.5-turbo" 41 | ) 42 | 43 | print(response) 44 | ``` 45 | 46 | ### Quick Start with Agent 47 | 48 | Here's how to create a simple agent with tools: 49 | 50 | ```python 51 | from clientai import ClientAI, create_agent 52 | 53 | client = ClientAI('openai', api_key="your-openai-api-key") 54 | 55 | # Create a calculator tool 56 | @tool(name="Calculator", description="Performs basic math operations") 57 | def calculate(x: int, y: int) -> int: 58 | return x + y 59 | 60 | # Create an agent with the calculator tool 61 | agent = create_agent( 62 | client=client, 63 | role="math_helper", 64 | system_prompt="You are a helpful math assistant.", 65 | model="gpt-4", 66 | tools=[calculate] 67 | ) 68 | 69 | # Run the agent 70 | result = agent.run("What is 5 plus 3?") 71 | print(result) 72 | ``` 73 | 74 | ### Quick Start with Validation 75 | 76 | Here's how to create an agent with validated outputs: 77 | 78 | ```python 79 | from pydantic import BaseModel, Field 80 | from typing import List 81 | 82 | class Analysis(BaseModel): 83 | summary: str = Field(min_length=10) 84 | sentiment: str = Field(pattern="^(positive|negative|neutral)$") 85 | key_points: List[str] = Field(min_items=1) 86 | 87 | class AnalysisAgent(Agent): 88 | @think( 89 | name="analyze", 90 | json_output=True, 91 | return_type=Analysis 92 | ) 93 | def analyze_text(self, text: str) -> Analysis: 94 | return """ 95 | Analyze this text and return: 96 | - summary (10+ chars) 97 | - sentiment (positive/negative/neutral) 98 | - key_points (non-empty list) 99 | 100 | Text: {text} 101 | """ 102 | 103 | agent = AnalysisAgent(client=client, default_model="gpt-4") 104 | result = agent.run("Great product, highly recommend!") 105 | print(f"Sentiment: {result.sentiment}") 106 | print(f"Key points: {result.key_points}") 107 | ``` 108 | 109 | ## Advanced Usage 110 | 111 | ### Streaming with Client 112 | 113 | The client supports streaming responses: 114 | 115 | ```python 116 | for chunk in client.generate_text( 117 | "Tell me a story about space exploration", 118 | model="gpt-3.5-turbo", 119 | stream=True 120 | ): 121 | print(chunk, end="", flush=True) 122 | ``` 123 | 124 | ### Multi-Step Agent Workflows 125 | 126 | Create agents with multiple processing steps: 127 | 128 | ```python 129 | class AnalysisAgent(Agent): 130 | @think("analyze") 131 | def analyze_data(self, input_data: str) -> str: 132 | return f"Analyze this data: {input_data}" 133 | 134 | @act("process") 135 | def process_results(self, analysis: str) -> str: 136 | return f"Based on the analysis: {analysis}" 137 | 138 | agent = AnalysisAgent( 139 | client=client, 140 | default_model="gpt-4", 141 | tool_confidence=0.8 142 | ) 143 | ``` 144 | 145 | ## Best Practices 146 | 147 | ### Client Best Practices 148 | 149 | 1. **API Key Management**: Store API keys securely as environment variables 150 | 2. **Error Handling**: Implement proper error handling for API failures 151 | 3. **Model Selection**: Choose models based on task requirements and budget 152 | 4. **Context Management**: Manage conversation context efficiently 153 | 154 | ### Agent Best Practices 155 | 156 | 1. **Validation**: Use appropriate validation levels: 157 | - Plain text for simple responses 158 | - JSON output for basic structure 159 | - Pydantic models for strict validation 160 | 2. **Error Handling**: Always wrap validated calls in try/except blocks 161 | 3. **Tools**: Choose tool confidence thresholds based on task criticality 162 | 4. **Context**: Use context to share state between steps effectively 163 | 164 | ## Contribution 165 | 166 | If you have suggestions or contributions to these guides, please refer to our [Contributing Guidelines](../community/CONTRIBUTING.md). We appreciate your input in improving our documentation and making ClientAI more accessible to all users. -------------------------------------------------------------------------------- /docs/showcase.md: -------------------------------------------------------------------------------- 1 | # Showcase 2 | 3 | !!! example "**Categories**" 4 | Browse by type: [Applications](#applications) · [Tutorials](#tutorials) 5 | 6 | ## Applications 7 | 8 | !!! tip "Be the First!" 9 | No applications yet. Have you built something with ClientAI? We'd love to feature it here! 10 | 11 | **[Submit Your Project](community/showcase_submission.md)** 12 | 13 | ## Tutorials 14 | 15 | !!! note "Simple Q&A Bot" 16 | **By ClientAI Team** · [View Tutorial](examples/client/simple_qa.md) 17 | 18 | Learn the basics of ClientAI by building a Q&A bot with chat functionality, context management, and real-time streaming responses. 19 | 20 | `OpenAI` `Chat` `Beginner` 21 | 22 | !!! note "Multi-Provider Translator" 23 | **By ClientAI Team** · [View Tutorial](examples/client/translator.md) 24 | 25 | Build a translator that compares outputs from different AI providers, with performance metrics and parallel processing. 26 | 27 | `OpenAI` `Groq` `Replicate` `Intermediate` 28 | 29 | !!! note "AI Dungeon Master" 30 | **By ClientAI Team** · [View Tutorial](examples/client/ai_dungeon_master.md) 31 | 32 | Create an AI-powered text adventure game using multiple providers, with dynamic storytelling and game state management. 33 | 34 | `OpenAI` `Replicate` `Ollama` `Advanced` 35 | 36 | !!! note "Simple Q&A Bot (Agent)" 37 | **By ClientAI Team** · [View Tutorial](examples/agent/simple_qa.md) 38 | 39 | Build a Q&A bot using ClientAI's agent framework, demonstrating core agent features and context management. 40 | 41 | `OpenAI` `Agent` `Beginner` 42 | 43 | !!! note "Task Planner" 44 | **By ClientAI Team** · [View Tutorial](examples/agent/task_planner.md) 45 | 46 | Develop a local task planning system that breaks down goals into actionable steps with realistic timelines. 47 | 48 | `Ollama` `Planning` `Intermediate` 49 | 50 | !!! note "Writing Assistant" 51 | **By ClientAI Team** · [View Tutorial](examples/agent/writing_assistant.md) 52 | 53 | Create a sophisticated writing assistant that analyzes text, suggests improvements, and rewrites content while maintaining context. 54 | 55 | `Groq` `Writing` `Intermediate` 56 | 57 | !!! note "Code Analyzer" 58 | **By ClientAI Team** · [View Tutorial](examples/agent/code_analyzer.md) 59 | 60 | Build a code analysis assistant that examines code structure, identifies potential issues, and suggests improvements. 61 | 62 | `Ollama` `Development` `Advanced` 63 | 64 | --- 65 | 66 | !!! question "Add Your Project" 67 | Built something with ClientAI? We'd love to showcase it! 68 | 69 | **[Submit Your Project](community/showcase_submission.md)** -------------------------------------------------------------------------------- /docs/stylesheets/extra.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --md-primary-fg-color: #ff7f50; 3 | } -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: ClientAI 2 | site_description: A unified client for AI providers with built-in agent support. 3 | site_author: Igor Benav 4 | site_url: https://github.com/igorbenav/clientai 5 | 6 | theme: 7 | name: material 8 | font: 9 | text: Ubuntu 10 | logo: assets/logo.png 11 | favicon: assets/logo.png 12 | features: 13 | - navigation.instant 14 | - navigation.instant.prefetch 15 | - navigation.tabs 16 | - navigation.indexes 17 | - search.suggest 18 | - content.code.copy 19 | palette: 20 | - media: "(prefers-color-scheme: light)" 21 | scheme: default 22 | primary: custom 23 | accent: custom 24 | toggle: 25 | icon: material/brightness-7 26 | name: Switch to dark mode 27 | - media: "(prefers-color-scheme: dark)" 28 | scheme: slate 29 | primary: custom 30 | accent: custom 31 | toggle: 32 | icon: material/brightness-4 33 | name: Switch to light mode 34 | 35 | plugins: 36 | - search 37 | - meta-descriptions 38 | - mkdocstrings: 39 | handlers: 40 | python: 41 | rendering: 42 | show_source: true 43 | - i18n: 44 | languages: 45 | - locale: en 46 | name: en - English 47 | build: true 48 | default: true 49 | - locale: pt 50 | name: pt - Português (Brasil) 51 | build: true 52 | default: false 53 | nav: 54 | - ClientAI: pt/index.md 55 | - Uso: 56 | - Visão Geral: pt/usage/overview.md 57 | - Exemplos: 58 | - Visão Geral: pt/usage/overview.md 59 | - Aprenda: 60 | - Visão Geral: pt/usage/overview.md 61 | - Avançado: 62 | - Visão Geral: pt/usage/overview.md 63 | - Referência da API: 64 | - Visão Geral: pt/usage/overview.md 65 | - ShowCases: 66 | - Visão Geral: pt/usage/overview.md 67 | - Comunidade: 68 | - Visão Geral: pt/usage/overview.md 69 | 70 | extra: 71 | alternate: 72 | - name: en - English 73 | link: / 74 | lang: en 75 | - name: pt - Português (Brasil) 76 | link: /pt/pt 77 | lang: pt 78 | 79 | nav: 80 | - ClientAI: index.md 81 | - Usage: 82 | - Overview: usage/overview.md 83 | - Client: 84 | - Initialization: usage/client/initialization.md 85 | - Text Generation: usage/client/text_generation.md 86 | - Chat Functionality: usage/client/chat_functionality.md 87 | - Multiple Providers: usage/client/multiple_providers.md 88 | - Error Handling: usage/client/error_handling.md 89 | - Agent: 90 | - Creating Agents: usage/agent/creating_agents.md 91 | - Workflow Steps: usage/agent/workflow_steps.md 92 | - Tools and Tool Selection: usage/agent/tools.md 93 | - Context Management: usage/agent/context.md 94 | - Validation: usage/agent/validation.md 95 | - Ollama Manager: usage/ollama_manager.md 96 | - Examples: 97 | - Overview: examples/overview.md 98 | - Client Examples: 99 | - Simple Q&A Bot: examples/client/simple_qa.md 100 | - Multi-Provider Translator: examples/client/translator.md 101 | - AI Dungeon Master: examples/client/ai_dungeon_master.md 102 | - Agent Examples: 103 | - Simple Q&A Bot: examples/agent/simple_qa.md 104 | - Task Planner: examples/agent/task_planner.md 105 | - Writing Assistant: examples/agent/writing_assistant.md 106 | - Code Analyzer: examples/agent/code_analyzer.md 107 | - Learn: 108 | - Overview: learn/overview.md 109 | - Advanced: 110 | - Overview: advanced/overview.md 111 | - Client: 112 | - Ollama: advanced/client/ollama_specific.md 113 | - OpenAI: advanced/client/openai_specific.md 114 | - Replicate: advanced/client/replicate_specific.md 115 | - Groq: advanced/client/groq_specific.md 116 | - Agent: 117 | - Creating Custom Run: advanced/agent/creating_run.md 118 | - Error Handling: advanced/error_handling.md 119 | - API Reference: 120 | - Overview: api/overview.md 121 | - Agent: 122 | - Core: 123 | - Agent: api/agent/core/agent.md 124 | - AgentContext: api/agent/core/context.md 125 | - WorkflowManager: api/agent/core/workflow.md 126 | - StepExecutionEngine: api/agent/core/execution.md 127 | - Steps: 128 | - Step: api/agent/steps/step.md 129 | - StepTypes: api/agent/steps/types.md 130 | - Decorators: api/agent/steps/decorators.md 131 | - Tools: 132 | - Tool: api/agent/tools/tool.md 133 | - ToolRegistry: api/agent/tools/registry.md 134 | - ToolSelector: api/agent/tools/selector.md 135 | - Client: 136 | - ClientAI: api/client/clientai.md 137 | - AIProvider: api/client/ai_provider.md 138 | - Specific Providers: 139 | - Ollama: api/client/specific_providers/ollama_provider.md 140 | - OpenAI: api/client/specific_providers/openai_provider.md 141 | - Replicate: api/client/specific_providers/replicate_provider.md 142 | - Groq: api/client/specific_providers/groq_provider.md 143 | - Ollama Manager: 144 | - OllamaManager: api/client/ollama_manager/ollama_manager.md 145 | - OllamaServerConfig: api/client/ollama_manager/ollama_server_config.md 146 | - Showcase: showcase.md 147 | - Community: 148 | - Overview: community/overview.md 149 | - Contributing: community/CONTRIBUTING.md 150 | - Showcase Submission: community/showcase_submission.md 151 | - Code of Conduct: community/CODE_OF_CONDUCT.md 152 | - License: community/LICENSE.md 153 | 154 | markdown_extensions: 155 | - admonition 156 | - codehilite 157 | - toc: 158 | permalink: true 159 | - pymdownx.details: 160 | - pymdownx.highlight: 161 | anchor_linenums: true 162 | line_spans: __span 163 | pygments_lang_class: true 164 | - pymdownx.inlinehilite 165 | - pymdownx.snippets 166 | - pymdownx.superfences 167 | 168 | repo_name: igorbenav/clientai 169 | repo_url: https://github.com/igorbenav/clientai 170 | edit_uri: edit/main/docs/ 171 | 172 | extra_css: 173 | - stylesheets/extra.css 174 | 175 | extra: 176 | analytics: 177 | provider: google 178 | property: !ENV [GOOGLE_ANALYTICS_KEY, ''] 179 | -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | python_version = 3.11 3 | warn_return_any = True 4 | warn_unused_configs = True 5 | ignore_missing_imports = True 6 | 7 | [mypy-src.app.*] 8 | disallow_untyped_defs = True -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "clientai" 3 | version = "0.5.0" 4 | description = "A unified client for AI providers with built-in agent support." 5 | authors = ["Igor Benav "] 6 | readme = "README.md" 7 | 8 | classifiers = [ 9 | "Development Status :: 4 - Beta", 10 | "Intended Audience :: Developers", 11 | "Intended Audience :: Information Technology", 12 | "Intended Audience :: Science/Research", 13 | "Intended Audience :: System Administrators", 14 | "Topic :: Software Development :: Libraries", 15 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 16 | "Topic :: Software Development :: Libraries :: Python Modules", 17 | "License :: OSI Approved :: MIT License", 18 | "Programming Language :: Python :: 3", 19 | "Programming Language :: Python :: 3.9", 20 | "Programming Language :: Python :: 3.10", 21 | "Programming Language :: Python :: 3.11", 22 | "Programming Language :: Python :: 3.12", 23 | "Programming Language :: Python :: 3.13", 24 | "Operating System :: OS Independent", 25 | "Typing :: Typed", 26 | ] 27 | 28 | keywords = ["ai", "agents", "llm", "nlp", "language-model", "ai-agents"] 29 | 30 | [tool.poetry.dependencies] 31 | python = "^3.9" 32 | httpx = ">=0.27.0,<0.28.0" 33 | openai = {version = "^1.50.2", optional = true} 34 | replicate = {version = "^0.34.1", optional = true} 35 | ollama = {version = "^0.3.3", optional = true} 36 | groq = {version = "^0.11.0", optional = true} 37 | pydantic = "^2.10.3" 38 | 39 | [tool.poetry.group.dev.dependencies] 40 | ruff = "^0.6.8" 41 | pytest = "^8.3.3" 42 | mypy = "1.9.0" 43 | openai = "^1.50.2" 44 | replicate = "^0.34.1" 45 | ollama = "^0.3.3" 46 | groq = "^0.11.0" 47 | httpx = ">=0.27.0,<0.28.0" 48 | coverage = "^7.4.4" 49 | 50 | [tool.poetry.group.docs.dependencies] 51 | mkdocs = "^1.6.1" 52 | python-dotenv = "^1.0.1" 53 | mkdocs-meta-descriptions-plugin = "^3.0.0" 54 | mkdocs-material = "^9.5.48" 55 | mkdocstrings = {extras = ["python"], version = "^0.27.0"} 56 | mkdocs-static-i18n = "^1.3.0" 57 | 58 | [tool.poetry.extras] 59 | minimal = [] 60 | openai = ["openai", "httpx"] 61 | replicate = ["replicate"] 62 | ollama = ["ollama"] 63 | groq = ["groq", "httpx"] 64 | all = ["openai", "replicate", "ollama", "groq", "httpx"] 65 | 66 | [build-system] 67 | requires = ["poetry-core"] 68 | build-backend = "poetry.core.masonry.api" 69 | 70 | [tool.ruff] 71 | target-version = "py312" 72 | line-length = 79 73 | fix = true 74 | 75 | [tool.ruff.lint] 76 | select = [ 77 | # https://docs.astral.sh/ruff/rules/#pyflakes-f 78 | "F", # Pyflakes 79 | # https://docs.astral.sh/ruff/rules/#pycodestyle-e-w 80 | "E", # pycodestyle 81 | "W", # Warning 82 | # https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4 83 | # https://docs.astral.sh/ruff/rules/#mccabe-c90 84 | "C", # Complexity (mccabe+) & comprehensions 85 | # https://docs.astral.sh/ruff/rules/#pyupgrade-up 86 | "UP", # pyupgrade 87 | # https://docs.astral.sh/ruff/rules/#isort-i 88 | "I", # isort 89 | ] 90 | ignore = [ 91 | # https://docs.astral.sh/ruff/rules/#pycodestyle-e-w 92 | "E402", # module level import not at top of file 93 | # https://docs.astral.sh/ruff/rules/#pyupgrade-up 94 | "UP006", # use-pep585-annotation 95 | "UP007", # use-pep604-annotation 96 | "E741", # Ambiguous variable name 97 | "UP035", # deprecated-assertion 98 | ] 99 | 100 | [tool.ruff.lint.per-file-ignores] 101 | "__init__.py" = [ 102 | "F401", # unused import 103 | "F403", # star imports 104 | ] 105 | 106 | [tool.ruff.lint.mccabe] 107 | max-complexity = 24 108 | 109 | [tool.ruff.lint.pydocstyle] 110 | convention = "google" -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benavlabs/clientai/b5010727492fba38fecc160398f96173bad608da/tests/__init__.py -------------------------------------------------------------------------------- /tests/agent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benavlabs/clientai/b5010727492fba38fecc160398f96173bad608da/tests/agent/__init__.py -------------------------------------------------------------------------------- /tests/groq/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benavlabs/clientai/b5010727492fba38fecc160398f96173bad608da/tests/groq/__init__.py -------------------------------------------------------------------------------- /tests/groq/test_exceptions.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock, patch 2 | 3 | import httpx 4 | import pytest 5 | 6 | from clientai.exceptions import ( 7 | APIError, 8 | AuthenticationError, 9 | ClientAIError, 10 | InvalidRequestError, 11 | ModelError, 12 | RateLimitError, 13 | TimeoutError, 14 | ) 15 | from clientai.groq.provider import ( 16 | GroqAuthenticationError, 17 | GroqRateLimitError, 18 | Provider, 19 | ) 20 | 21 | 22 | @pytest.fixture 23 | def provider(): 24 | return Provider(api_key="test_key") 25 | 26 | 27 | @pytest.fixture 28 | def mock_groq_client(): 29 | with patch("clientai.groq.provider.Client") as mock_client: 30 | mock_instance = MagicMock() 31 | mock_client.return_value = mock_instance 32 | mock_instance.chat.completions.create.return_value = MagicMock() 33 | yield mock_instance 34 | 35 | 36 | class MockResponse: 37 | def __init__(self, status_code: int): 38 | self.status_code = status_code 39 | self.request = httpx.Request( 40 | "GET", "https://api.groq.com/v1/chat/completions" 41 | ) 42 | 43 | 44 | def test_generate_text_authentication_error(mock_groq_client, provider): 45 | error = GroqAuthenticationError( 46 | message="Invalid API key: test_key", 47 | response=MockResponse(401), 48 | body=None, 49 | ) 50 | mock_groq_client.chat.completions.create.side_effect = error 51 | 52 | with pytest.raises(AuthenticationError) as exc_info: 53 | provider.generate_text("Test prompt", "llama3-8b-8192") 54 | 55 | assert "Invalid API key" in str(exc_info.value) 56 | assert exc_info.value.status_code == 401 57 | assert isinstance(exc_info.value.original_error, GroqAuthenticationError) 58 | 59 | 60 | def test_generate_text_rate_limit_error(mock_groq_client, provider): 61 | error = GroqRateLimitError( 62 | message="Rate limit exceeded", response=MockResponse(429), body=None 63 | ) 64 | mock_groq_client.chat.completions.create.side_effect = error 65 | 66 | with pytest.raises(RateLimitError) as exc_info: 67 | provider.generate_text("Test prompt", "llama3-8b-8192") 68 | 69 | assert "Rate limit exceeded" in str(exc_info.value) 70 | assert exc_info.value.status_code == 429 71 | assert isinstance(exc_info.value.original_error, GroqRateLimitError) 72 | 73 | 74 | def test_generate_text_model_error(mock_groq_client, provider): 75 | from groq import NotFoundError 76 | 77 | error = NotFoundError( 78 | message="Model not found", response=MockResponse(404), body=None 79 | ) 80 | mock_groq_client.chat.completions.create.side_effect = error 81 | 82 | with pytest.raises(ModelError) as exc_info: 83 | provider.generate_text("Test prompt", "invalid-model") 84 | 85 | assert "Model not found" in str(exc_info.value) 86 | assert exc_info.value.status_code == 404 87 | 88 | 89 | def test_generate_text_invalid_request_error(mock_groq_client, provider): 90 | from groq import BadRequestError 91 | 92 | error = BadRequestError( 93 | message="Invalid request parameters", 94 | response=MockResponse(400), 95 | body=None, 96 | ) 97 | mock_groq_client.chat.completions.create.side_effect = error 98 | 99 | with pytest.raises(InvalidRequestError) as exc_info: 100 | provider.generate_text("Test prompt", "llama3-8b-8192") 101 | 102 | assert "Invalid request parameters" in str(exc_info.value) 103 | assert exc_info.value.status_code == 400 104 | 105 | 106 | def test_generate_text_timeout_error(mock_groq_client, provider): 107 | from groq import APITimeoutError 108 | 109 | error = APITimeoutError( 110 | request=httpx.Request("GET", "https://api.groq.com") 111 | ) 112 | mock_groq_client.chat.completions.create.side_effect = error 113 | 114 | with pytest.raises(TimeoutError) as exc_info: 115 | provider.generate_text("Test prompt", "llama3-8b-8192") 116 | 117 | assert "Request timed out" in str(exc_info.value) 118 | assert exc_info.value.status_code == 408 119 | 120 | 121 | def test_generate_text_api_error(mock_groq_client, provider): 122 | from groq import InternalServerError 123 | 124 | error = InternalServerError( 125 | message="Internal server error", response=MockResponse(500), body=None 126 | ) 127 | mock_groq_client.chat.completions.create.side_effect = error 128 | 129 | with pytest.raises(APIError) as exc_info: 130 | provider.generate_text("Test prompt", "llama3-8b-8192") 131 | 132 | assert "Internal server error" in str(exc_info.value) 133 | assert exc_info.value.status_code == 500 134 | 135 | 136 | def test_chat_error(mock_groq_client, provider): 137 | from groq import BadRequestError 138 | 139 | error = BadRequestError( 140 | message="Invalid request parameters", 141 | response=MockResponse(400), 142 | body=None, 143 | ) 144 | mock_groq_client.chat.completions.create.side_effect = error 145 | 146 | with pytest.raises(InvalidRequestError) as exc_info: 147 | provider.chat( 148 | [{"role": "user", "content": "Test message"}], "llama3-8b-8192" 149 | ) 150 | 151 | assert "Invalid request parameters" in str(exc_info.value) 152 | assert exc_info.value.status_code == 400 153 | 154 | 155 | def test_generic_error(mock_groq_client, provider): 156 | mock_groq_client.chat.completions.create.side_effect = Exception( 157 | "Unexpected error" 158 | ) 159 | 160 | with pytest.raises(ClientAIError) as exc_info: 161 | provider.generate_text("Test prompt", "llama3-8b-8192") 162 | 163 | assert "Unexpected error" in str(exc_info.value) 164 | assert isinstance(exc_info.value.original_error, Exception) 165 | -------------------------------------------------------------------------------- /tests/ollama/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benavlabs/clientai/b5010727492fba38fecc160398f96173bad608da/tests/ollama/__init__.py -------------------------------------------------------------------------------- /tests/ollama/test_exceptions.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import patch 2 | 3 | import pytest 4 | 5 | from clientai.exceptions import ( 6 | APIError, 7 | AuthenticationError, 8 | InvalidRequestError, 9 | ModelError, 10 | RateLimitError, 11 | TimeoutError, 12 | ) 13 | from clientai.ollama.provider import Provider 14 | 15 | 16 | @pytest.fixture 17 | def provider(): 18 | return Provider() 19 | 20 | 21 | @pytest.fixture(autouse=True) 22 | def mock_ollama(): 23 | with patch("clientai.ollama.provider.ollama") as mock: 24 | mock.RequestError = type("RequestError", (Exception,), {}) 25 | mock.ResponseError = type("ResponseError", (Exception,), {}) 26 | yield mock 27 | 28 | 29 | @pytest.fixture 30 | def valid_chat_request(): 31 | return { 32 | "model": "test-model", 33 | "messages": [{"role": "user", "content": "Test message"}], 34 | "stream": False, 35 | "format": "", 36 | "options": None, 37 | "keep_alive": None, 38 | } 39 | 40 | 41 | def test_generate_text_authentication_error(mock_ollama, provider): 42 | error = mock_ollama.RequestError("Authentication failed") 43 | mock_ollama.generate.side_effect = error 44 | 45 | with pytest.raises(AuthenticationError) as exc_info: 46 | provider.generate_text(prompt="Test prompt", model="test-model") 47 | 48 | assert str(exc_info.value) == "[401] Authentication failed" 49 | assert exc_info.value.status_code == 401 50 | assert exc_info.value.original_exception is error 51 | 52 | 53 | def test_generate_text_rate_limit_error(mock_ollama, provider): 54 | error = mock_ollama.RequestError("Rate limit exceeded") 55 | mock_ollama.generate.side_effect = error 56 | 57 | with pytest.raises(RateLimitError) as exc_info: 58 | provider.generate_text(prompt="Test prompt", model="test-model") 59 | 60 | assert str(exc_info.value) == "[429] Rate limit exceeded" 61 | assert exc_info.value.status_code == 429 62 | assert exc_info.value.original_exception is error 63 | 64 | 65 | def test_generate_text_model_error(mock_ollama, provider): 66 | error = mock_ollama.RequestError("Model not found") 67 | mock_ollama.generate.side_effect = error 68 | 69 | with pytest.raises(ModelError) as exc_info: 70 | provider.generate_text(prompt="Test prompt", model="test-model") 71 | 72 | assert str(exc_info.value) == "[404] Model not found" 73 | assert exc_info.value.status_code == 404 74 | assert exc_info.value.original_exception is error 75 | 76 | 77 | def test_generate_text_invalid_request_error(mock_ollama, provider): 78 | error = mock_ollama.RequestError("Invalid request") 79 | mock_ollama.generate.side_effect = error 80 | 81 | with pytest.raises(InvalidRequestError) as exc_info: 82 | provider.generate_text(prompt="Test prompt", model="test-model") 83 | 84 | assert str(exc_info.value) == "[400] Invalid request" 85 | assert exc_info.value.status_code == 400 86 | assert exc_info.value.original_exception is error 87 | 88 | 89 | def test_generate_text_timeout_error(mock_ollama, provider): 90 | error = mock_ollama.ResponseError("Request timed out") 91 | mock_ollama.generate.side_effect = error 92 | 93 | with pytest.raises(TimeoutError) as exc_info: 94 | provider.generate_text(prompt="Test prompt", model="test-model") 95 | 96 | assert str(exc_info.value) == "[408] Request timed out" 97 | assert exc_info.value.status_code == 408 98 | assert exc_info.value.original_exception is error 99 | 100 | 101 | def test_generate_text_api_error(mock_ollama, provider): 102 | error = mock_ollama.ResponseError("API response error") 103 | mock_ollama.generate.side_effect = error 104 | 105 | with pytest.raises(APIError) as exc_info: 106 | provider.generate_text(prompt="Test prompt", model="test-model") 107 | 108 | assert str(exc_info.value) == "[500] API response error" 109 | assert exc_info.value.status_code == 500 110 | assert exc_info.value.original_exception is error 111 | 112 | 113 | def test_chat_request_error(mock_ollama, provider, valid_chat_request): 114 | error = mock_ollama.RequestError("Invalid chat request") 115 | mock_ollama.chat.side_effect = error 116 | 117 | with pytest.raises(InvalidRequestError) as exc_info: 118 | provider.chat(**valid_chat_request) 119 | 120 | assert str(exc_info.value) == "[400] Invalid chat request" 121 | assert exc_info.value.status_code == 400 122 | assert exc_info.value.original_exception is error 123 | 124 | 125 | def test_chat_response_error(mock_ollama, provider, valid_chat_request): 126 | error = mock_ollama.ResponseError("Chat API response error") 127 | mock_ollama.chat.side_effect = error 128 | 129 | with pytest.raises(APIError) as exc_info: 130 | provider.chat(**valid_chat_request) 131 | 132 | assert str(exc_info.value) == "[500] Chat API response error" 133 | assert exc_info.value.status_code == 500 134 | assert exc_info.value.original_exception is error 135 | -------------------------------------------------------------------------------- /tests/openai/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benavlabs/clientai/b5010727492fba38fecc160398f96173bad608da/tests/openai/__init__.py -------------------------------------------------------------------------------- /tests/openai/test_exceptions.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock, patch 2 | 3 | import pytest 4 | 5 | from clientai.exceptions import ( 6 | APIError, 7 | AuthenticationError, 8 | ClientAIError, 9 | InvalidRequestError, 10 | ModelError, 11 | RateLimitError, 12 | TimeoutError, 13 | ) 14 | from clientai.openai.provider import Provider 15 | 16 | 17 | class MockOpenAIError(Exception): 18 | def __init__(self, message, error_type, status_code): 19 | self.message = message 20 | self.type = error_type 21 | self.status_code = status_code 22 | 23 | def __str__(self): 24 | return f"Error code: {self.status_code} - {self.message}" 25 | 26 | 27 | class MockOpenAIAuthenticationError(MockOpenAIError): 28 | def __init__(self, message, status_code=401): 29 | super().__init__(message, "invalid_request_error", status_code) 30 | 31 | 32 | @pytest.fixture 33 | def provider(): 34 | return Provider(api_key="test_key") 35 | 36 | 37 | @pytest.fixture 38 | def mock_openai_client(): 39 | with patch("clientai.openai.provider.Client") as mock_client: 40 | mock_instance = MagicMock() 41 | mock_client.return_value = mock_instance 42 | 43 | mock_instance.with_api_key.return_value = mock_instance 44 | mock_instance.chat.completions.create.return_value = MagicMock() 45 | 46 | yield mock_instance 47 | 48 | 49 | def test_generate_text_authentication_error(mock_openai_client, provider): 50 | error = MockOpenAIAuthenticationError( 51 | "Incorrect API key provided: test_key. You can find your API key at https://platform.openai.com/account/api-keys." 52 | ) 53 | mock_openai_client.chat.completions.create.side_effect = error 54 | 55 | with pytest.raises(AuthenticationError) as exc_info: 56 | provider.generate_text("Test prompt", "test-model") 57 | 58 | assert "Incorrect API key provided" in str(exc_info.value) 59 | assert exc_info.value.status_code == 401 60 | assert isinstance( 61 | exc_info.value.original_error, MockOpenAIAuthenticationError 62 | ) 63 | 64 | 65 | def test_generate_text_rate_limit_error(mock_openai_client, provider): 66 | error = MockOpenAIError("Rate limit exceeded", "rate_limit_error", 429) 67 | mock_openai_client.chat.completions.create.side_effect = error 68 | 69 | with pytest.raises(RateLimitError) as exc_info: 70 | provider.generate_text("Test prompt", "test-model") 71 | 72 | assert "Rate limit exceeded" in str(exc_info.value) 73 | assert exc_info.value.status_code == 429 74 | assert isinstance(exc_info.value.original_error, MockOpenAIError) 75 | 76 | 77 | def test_generate_text_model_error(mock_openai_client, provider): 78 | error = MockOpenAIError("Model not found", "model_not_found", 404) 79 | mock_openai_client.chat.completions.create.side_effect = error 80 | 81 | with pytest.raises(ModelError) as exc_info: 82 | provider.generate_text("Test prompt", "test-model") 83 | 84 | assert "Model not found" in str(exc_info.value) 85 | assert exc_info.value.status_code == 404 86 | assert isinstance(exc_info.value.original_error, MockOpenAIError) 87 | 88 | 89 | def test_generate_text_invalid_request_error(mock_openai_client, provider): 90 | error = MockOpenAIError("Invalid request", "invalid_request_error", 400) 91 | mock_openai_client.chat.completions.create.side_effect = error 92 | 93 | with pytest.raises(InvalidRequestError) as exc_info: 94 | provider.generate_text("Test prompt", "test-model") 95 | 96 | assert "Invalid request" in str(exc_info.value) 97 | assert exc_info.value.status_code == 400 98 | assert isinstance(exc_info.value.original_error, MockOpenAIError) 99 | 100 | 101 | def test_generate_text_timeout_error(mock_openai_client, provider): 102 | error = MockOpenAIError("Request timed out", "timeout_error", 408) 103 | mock_openai_client.chat.completions.create.side_effect = error 104 | 105 | with pytest.raises(TimeoutError) as exc_info: 106 | provider.generate_text("Test prompt", "test-model") 107 | 108 | assert "Request timed out" in str(exc_info.value) 109 | assert exc_info.value.status_code == 408 110 | assert isinstance(exc_info.value.original_error, MockOpenAIError) 111 | 112 | 113 | def test_generate_text_api_error(mock_openai_client, provider): 114 | error = MockOpenAIError("API error", "api_error", 500) 115 | mock_openai_client.chat.completions.create.side_effect = error 116 | 117 | with pytest.raises(APIError) as exc_info: 118 | provider.generate_text("Test prompt", "test-model") 119 | 120 | assert "API error" in str(exc_info.value) 121 | assert exc_info.value.status_code == 500 122 | assert isinstance(exc_info.value.original_error, MockOpenAIError) 123 | 124 | 125 | def test_chat_error(mock_openai_client, provider): 126 | error = MockOpenAIError("Chat error", "invalid_request_error", 400) 127 | mock_openai_client.chat.completions.create.side_effect = error 128 | 129 | with pytest.raises(InvalidRequestError) as exc_info: 130 | provider.chat( 131 | [{"role": "user", "content": "Test message"}], "test-model" 132 | ) 133 | 134 | assert "Chat error" in str(exc_info.value) 135 | assert exc_info.value.status_code == 400 136 | assert isinstance(exc_info.value.original_error, MockOpenAIError) 137 | 138 | 139 | def test_generic_error(mock_openai_client, provider): 140 | mock_openai_client.chat.completions.create.side_effect = Exception( 141 | "Unexpected error" 142 | ) 143 | 144 | with pytest.raises(ClientAIError) as exc_info: 145 | provider.generate_text("Test prompt", "test-model") 146 | 147 | assert "Unexpected error" in str(exc_info.value) 148 | assert isinstance(exc_info.value.original_error, Exception) 149 | -------------------------------------------------------------------------------- /tests/replicate/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benavlabs/clientai/b5010727492fba38fecc160398f96173bad608da/tests/replicate/__init__.py -------------------------------------------------------------------------------- /tests/replicate/test_exceptions.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock, patch 2 | 3 | import pytest 4 | 5 | from clientai.exceptions import ( 6 | APIError, 7 | AuthenticationError, 8 | InvalidRequestError, 9 | ModelError, 10 | RateLimitError, 11 | TimeoutError, 12 | ) 13 | from clientai.replicate.provider import Provider 14 | 15 | 16 | class MockReplicateError(Exception): 17 | def __init__(self, message, status_code=None): 18 | self.message = message 19 | self.status_code = status_code 20 | 21 | def __str__(self): 22 | return self.message 23 | 24 | 25 | @pytest.fixture 26 | def provider(): 27 | return Provider(api_key="test_key") 28 | 29 | 30 | @pytest.fixture 31 | def mock_replicate_client(): 32 | with patch("clientai.replicate.provider.Client") as mock_client: 33 | mock_instance = MagicMock() 34 | mock_client.return_value = mock_instance 35 | yield mock_instance 36 | 37 | 38 | def test_generate_text_authentication_error(mock_replicate_client, provider): 39 | error = MockReplicateError("Authentication failed", status_code=401) 40 | mock_replicate_client.predictions.create.side_effect = error 41 | 42 | with pytest.raises(AuthenticationError) as exc_info: 43 | provider.generate_text("Test prompt", "test-model") 44 | 45 | assert "Authentication failed" in str(exc_info.value) 46 | assert exc_info.value.status_code == 401 47 | assert isinstance(exc_info.value.original_error, MockReplicateError) 48 | 49 | 50 | def test_generate_text_rate_limit_error(mock_replicate_client, provider): 51 | error = MockReplicateError("Rate limit exceeded", status_code=429) 52 | mock_replicate_client.predictions.create.side_effect = error 53 | 54 | with pytest.raises(RateLimitError) as exc_info: 55 | provider.generate_text("Test prompt", "test-model") 56 | 57 | assert "Rate limit exceeded" in str(exc_info.value) 58 | assert exc_info.value.status_code == 429 59 | assert isinstance(exc_info.value.original_error, MockReplicateError) 60 | 61 | 62 | def test_generate_text_model_error(mock_replicate_client, provider): 63 | error = MockReplicateError("Model not found", status_code=404) 64 | mock_replicate_client.predictions.create.side_effect = error 65 | 66 | with pytest.raises(ModelError) as exc_info: 67 | provider.generate_text("Test prompt", "test-model") 68 | 69 | assert "Model not found" in str(exc_info.value) 70 | assert exc_info.value.status_code == 404 71 | assert isinstance(exc_info.value.original_error, MockReplicateError) 72 | 73 | 74 | def test_generate_text_invalid_request_error(mock_replicate_client, provider): 75 | error = MockReplicateError("Invalid request", status_code=400) 76 | mock_replicate_client.predictions.create.side_effect = error 77 | 78 | with pytest.raises(InvalidRequestError) as exc_info: 79 | provider.generate_text("Test prompt", "test-model") 80 | 81 | assert "Invalid request" in str(exc_info.value) 82 | assert exc_info.value.status_code == 400 83 | assert isinstance(exc_info.value.original_error, MockReplicateError) 84 | 85 | 86 | def test_generate_text_api_error(mock_replicate_client, provider): 87 | error = MockReplicateError("API error", status_code=500) 88 | mock_replicate_client.predictions.create.side_effect = error 89 | 90 | with pytest.raises(APIError) as exc_info: 91 | provider.generate_text("Test prompt", "test-model") 92 | 93 | assert "API error" in str(exc_info.value) 94 | assert exc_info.value.status_code == 500 95 | assert isinstance(exc_info.value.original_error, MockReplicateError) 96 | 97 | 98 | def test_generate_text_timeout_error(mock_replicate_client, provider): 99 | error = MockReplicateError("Request timed out", status_code=408) 100 | mock_replicate_client.predictions.create.side_effect = error 101 | 102 | with pytest.raises(TimeoutError) as exc_info: 103 | provider.generate_text("Test prompt", "test-model") 104 | 105 | assert "Request timed out" in str(exc_info.value) 106 | assert exc_info.value.status_code == 408 107 | assert isinstance(exc_info.value.original_error, MockReplicateError) 108 | 109 | 110 | def test_chat_error(mock_replicate_client, provider): 111 | error = MockReplicateError("Chat error", status_code=400) 112 | mock_replicate_client.predictions.create.side_effect = error 113 | 114 | with pytest.raises(InvalidRequestError) as exc_info: 115 | provider.chat( 116 | [{"role": "user", "content": "Test message"}], "test-model" 117 | ) 118 | 119 | assert "Chat error" in str(exc_info.value) 120 | assert exc_info.value.status_code == 400 121 | assert isinstance(exc_info.value.original_error, MockReplicateError) 122 | 123 | 124 | def test_generic_error(mock_replicate_client, provider): 125 | error = Exception("Unexpected error") 126 | mock_replicate_client.predictions.create.side_effect = error 127 | 128 | with pytest.raises(APIError) as exc_info: 129 | provider.generate_text("Test prompt", "test-model") 130 | 131 | assert "Unexpected error" in str(exc_info.value) 132 | assert isinstance(exc_info.value.original_error, Exception) 133 | --------------------------------------------------------------------------------