├── docs
├── services.py
├── .gitignore
├── .vitepress
│ ├── cache
│ │ └── deps
│ │ │ ├── package.json
│ │ │ ├── vue.js.map
│ │ │ ├── vitepress___@vueuse_core.js.map
│ │ │ ├── _metadata.json
│ │ │ └── vue.js
│ ├── theme
│ │ ├── index.ts
│ │ ├── components
│ │ │ ├── CodeComparison.vue
│ │ │ ├── FeatureCard.vue
│ │ │ ├── ApiTable.vue
│ │ │ └── InstallationSteps.vue
│ │ └── style.css
│ └── config.mts
├── package.json
├── services.md
├── installation.md
├── errorhandling.md
├── texttospeech.md
├── pipelines.md
├── overview.md
├── appcreation.md
├── index.md
└── quickstart.md
├── easilyai
├── py.typed
├── services
│ ├── __init__.py
│ ├── ollama_service.py
│ ├── grok_service.py
│ ├── anthropic_service.py
│ ├── gemini_service.py
│ ├── openai_service.py
│ └── huggingface_service.py
├── utils.py
├── custom_ai.py
├── pipeline.py
├── exceptions.py
├── app.py
├── __init__.py
├── config.py
└── utils
│ └── retry.py
├── requirements.txt
├── .github
├── dependabot.yml
└── workflows
│ ├── dependency-review.yml
│ ├── docs.yml
│ ├── docsdeploy.yml
│ ├── python-publish.yml
│ ├── claude.yml
│ ├── claude-code-review.yml
│ └── ci.yml
├── .deepsource.toml
├── .gitignore
├── requirements-test.txt
├── requirements-docs.txt
├── .flake8
├── .coveragerc
├── .env.example
├── mypy.ini
├── tests
├── test_utils.py
├── test_ollama_service.py
├── test_enhanced_app.py
├── test_batch.py
├── test_gemini_service.py
├── test_openai_service.py
├── test_app_creation.py
├── test_huggingface_service.py
├── test_anthropic_service.py
├── test_grok_service.py
├── test_custom_ai.py
├── test_pipeline.py
├── test_exceptions.py
├── test_enhanced_pipeline.py
└── conftest.py
├── pytest.ini
├── requirements-dev.txt
├── setup.py
├── examples
└── basic_example.py
├── .pre-commit-config.yaml
├── Makefile
├── mkdocs.yml
├── README.md
├── CODE_OF_CONDUCT.md
├── pyproject.toml
└── CONTRIBUTING.md
/docs/services.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/easilyai/py.typed:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/
--------------------------------------------------------------------------------
/easilyai/services/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/.vitepress/cache/deps/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "module"
3 | }
4 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | openai>=1.0.0
2 | requests>=2.0.0
3 | google-generativeai>=0.8.3
4 | anthropic>=0.42.0
5 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "pip"
4 | directory: "/"
5 | schedule:
6 | interval: "daily"
7 |
--------------------------------------------------------------------------------
/docs/.vitepress/cache/deps/vue.js.map:
--------------------------------------------------------------------------------
1 | {
2 | "version": 3,
3 | "sources": [],
4 | "sourcesContent": [],
5 | "mappings": "",
6 | "names": []
7 | }
8 |
--------------------------------------------------------------------------------
/docs/.vitepress/cache/deps/vitepress___@vueuse_core.js.map:
--------------------------------------------------------------------------------
1 | {
2 | "version": 3,
3 | "sources": [],
4 | "sourcesContent": [],
5 | "mappings": "",
6 | "names": []
7 | }
8 |
--------------------------------------------------------------------------------
/.deepsource.toml:
--------------------------------------------------------------------------------
1 | version = 1
2 |
3 | [[analyzers]]
4 | name = "python"
5 |
6 | [analyzers.meta]
7 | runtime_version = "3.x.x"
8 |
9 | [[transformers]]
10 | name = "yapf"
11 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.pyc
3 | *.pyo
4 | *.pyd
5 | .env
6 | build/
7 | dist/
8 | *.egg-info/
9 | .vscode/
10 | .DS_Store
11 | easyai_env/
12 | docs/node_modules/
13 | easyai.egg-info/
14 | .venv/
15 | CLAUDE.md
--------------------------------------------------------------------------------
/docs/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "devDependencies": {
3 | "vitepress": "^1.5.0"
4 | },
5 | "scripts": {
6 | "docs:dev": "vitepress dev",
7 | "docs:build": "vitepress build",
8 | "docs:preview": "vitepress preview"
9 | }
10 | }
--------------------------------------------------------------------------------
/docs/services.md:
--------------------------------------------------------------------------------
1 | # Service List
2 | Below is a list of all services currently supported by EasilyAI. Don't see one you like? [Suggest](https://github.com/GustyCube/EasilyAI/issues) it!
3 |
4 | - [OpenAI](/openai) (GPT, DALLE, etc)
5 | - [Ollama](/ollama)
6 | - [Grok](/grok)
7 | - [Anthropic](/anthropic) (Claude, etc)
8 | - [Hugging Face](/huggingface)
9 |
--------------------------------------------------------------------------------
/docs/installation.md:
--------------------------------------------------------------------------------
1 | # Installation Guide
2 |
3 | ## Installation
4 |
5 | To install the EasilyAI library, use pip:
6 |
7 | ```bash
8 | pip install easilyai
9 | ```
10 |
11 | ## Requirements
12 | - **Python 3.7 or higher**
13 | - **API Key for OpenAI** *(optional if using OpenAI services)*
14 | - **Ollama Installation** for running local models.
15 |
16 | After installation, proceed to [Creating an AI App](./appcreation.md).
--------------------------------------------------------------------------------
/requirements-test.txt:
--------------------------------------------------------------------------------
1 | # Testing dependencies for EasilyAI
2 | # Install with: pip install -r requirements-test.txt
3 |
4 | # Core testing framework
5 | pytest>=7.4.0
6 | pytest-cov>=4.1.0
7 | pytest-asyncio>=0.21.0
8 | pytest-mock>=3.11.0
9 | pytest-xdist>=3.3.0
10 | coverage>=7.3.0
11 |
12 | # Mocking and test utilities
13 | responses>=0.23.0
14 | freezegun>=1.2.2
15 | factory-boy>=3.3.0
16 | faker>=19.3.0
17 |
18 | # Environment management for tests
19 | python-dotenv>=1.0.0
20 |
21 | # Performance testing
22 | pytest-benchmark>=4.0.0
--------------------------------------------------------------------------------
/requirements-docs.txt:
--------------------------------------------------------------------------------
1 | # Documentation dependencies for EasilyAI
2 | # Install with: pip install -r requirements-docs.txt
3 |
4 | # Core documentation tools
5 | mkdocs>=1.5.0
6 | mkdocs-material>=9.2.0
7 | mkdocstrings[python]>=0.22.0
8 | mkdocs-autorefs>=0.5.0
9 |
10 | # Optional documentation enhancements
11 | mkdocs-mermaid2-plugin>=1.1.0
12 | mkdocs-git-revision-date-localized-plugin>=1.2.0
13 | mkdocs-minify-plugin>=0.7.0
14 | mkdocs-redirects>=1.2.0
15 |
16 | # Dependencies for API reference generation
17 | griffe>=0.35.0
18 | markdown>=3.4.0
19 | pymdown-extensions>=10.0.0
--------------------------------------------------------------------------------
/.github/workflows/dependency-review.yml:
--------------------------------------------------------------------------------
1 | name: Dependency Review
2 |
3 | on:
4 | pull_request:
5 | branches: [ main, develop ]
6 |
7 | permissions:
8 | contents: read
9 | pull-requests: write
10 |
11 | jobs:
12 | dependency-review:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Checkout Repository
16 | uses: actions/checkout@v4
17 |
18 | - name: Dependency Review
19 | uses: actions/dependency-review-action@v3
20 | with:
21 | fail-on-severity: moderate
22 | deny-licenses: GPL-3.0, AGPL-3.0
23 | comment-summary-in-pr: always
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 100
3 | max-complexity = 10
4 | extend-ignore =
5 | E203, # whitespace before ':' (conflicts with black)
6 | E501, # line too long (handled by black)
7 | W503, # line break before binary operator (conflicts with black)
8 | W504, # line break after binary operator (conflicts with black)
9 | exclude =
10 | .git,
11 | __pycache__,
12 | .venv,
13 | venv,
14 | build,
15 | dist,
16 | *.egg-info,
17 | .pytest_cache,
18 | .mypy_cache,
19 | .ruff_cache,
20 | htmlcov,
21 | easilyai/_version.py
22 | per-file-ignores =
23 | __init__.py:F401,F403
24 | tests/*:F401,F403,F811
25 | statistics = True
26 | count = True
27 | show-source = True
--------------------------------------------------------------------------------
/docs/errorhandling.md:
--------------------------------------------------------------------------------
1 | # Error Handling
2 |
3 | ## Overview
4 | EasilyAI includes robust error handling with clear, emoji-coded messages for quick debugging.
5 |
6 | ### Common Errors
7 | - 🔐 **Missing API Key**: "No API key provided! Add your API key to initialize the service."
8 | - 🚫 **Invalid Request**: "The request is invalid. Please check your inputs."
9 | - 🌐 **Connection Error**: "Unable to connect to the API. Ensure the server is running."
10 | - ⏳ **Rate Limit Exceeded**: "Too many requests! Wait and try again."
11 |
12 | ## Example
13 |
14 | ```python
15 | try:
16 | app = easilyai.create_app(name="example", service="openai")
17 | app.request("Test request")
18 | except Exception as e:
19 | print(e)
20 | ```
21 |
--------------------------------------------------------------------------------
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | source = easilyai
3 | omit =
4 | */tests/*
5 | */_version.py
6 | */test_*.py
7 | */conftest.py
8 | */__pycache__/*
9 | */venv/*
10 | */.venv/*
11 |
12 | [report]
13 | exclude_lines =
14 | pragma: no cover
15 | def __repr__
16 | def __str__
17 | raise AssertionError
18 | raise NotImplementedError
19 | if __name__ == .__main__.:
20 | if TYPE_CHECKING:
21 | if typing.TYPE_CHECKING:
22 | @abstractmethod
23 | @abc.abstractmethod
24 | except ImportError:
25 | pass
26 | ...
27 |
28 | precision = 2
29 | show_missing = True
30 | skip_covered = False
31 | skip_empty = True
32 |
33 | [html]
34 | directory = htmlcov
35 | title = EasilyAI Coverage Report
36 |
37 | [xml]
38 | output = coverage.xml
--------------------------------------------------------------------------------
/docs/texttospeech.md:
--------------------------------------------------------------------------------
1 | # Text-to-Speech Guide
2 |
3 | ## Overview
4 | EasilyAI supports OpenAI's Text-to-Speech API for converting text into audio files.
5 |
6 | ## Generate Speech with OpenAI
7 |
8 | ```python
9 | # Initialize a TTS App
10 | tts_app = easilyai.create_tts_app(
11 | name="tts_app",
12 | service="openai",
13 | apikey="YOUR_API_KEY",
14 | model="tts-1"
15 | )
16 |
17 | # Convert text to speech
18 | output_file = tts_app.request_tts(
19 | text="Hello, I am your AI assistant!",
20 | tts_model="tts-1",
21 | voice="onyx",
22 | output_file="hello_ai.mp3"
23 | )
24 |
25 | print(f"TTS output saved to: {output_file}")
26 | ```
27 |
28 | ## Supported Voices
29 | - `onyx`
30 | - `alloy`
31 | - `echo`
32 |
33 | Next, explore [Pipelines](./pipelines.md) for chaining tasks.
--------------------------------------------------------------------------------
/docs/pipelines.md:
--------------------------------------------------------------------------------
1 | # Pipeline Guide
2 |
3 | ## Overview
4 | Pipelines in EasilyAI allow you to chain multiple tasks (e.g., text generation, image generation, and TTS) into a workflow.
5 |
6 | ## Example Pipeline
7 |
8 | ```python
9 | # Create a pipeline
10 | pipeline = easyai.EasilyAIPipeline(app)
11 |
12 | # Add tasks
13 | pipeline.add_task("generate_text", "Write a poem about AI and nature.")
14 | pipeline.add_task("generate_image", "A futuristic city skyline.")
15 | pipeline.add_task("text_to_speech", "Here is a futuristic AI-powered city!")
16 |
17 | # Run the pipeline
18 | results = pipeline.run()
19 |
20 | # Print results
21 | for task_result in results:
22 | print(f"Task: {task_result['task']}\nResult: {task_result['result']}\n")
23 | ```
24 |
25 | Discover how to extend EasilyAI with [Custom AI Models](./customai.md).
26 |
--------------------------------------------------------------------------------
/docs/overview.md:
--------------------------------------------------------------------------------
1 | ---
2 | outline: deep
3 | ---
4 |
5 | # Overview
6 |
7 | **EasilyAI** is a Python library that simplifies AI app development by integrating popular AI services like **OpenAI** and **Ollama**. It provides a clean, unified interface for text generation, image generation, and text-to-speech (TTS) tasks.
8 |
9 | ## Features
10 | - **App Creation**: Simplify initializing AI services like OpenAI and Ollama.
11 | - **Text-to-Speech**: Convert text to speech with services like OpenAI's TTS API (with voice selection).
12 | - **Custom AI Support**: Integrate and register custom AI models.
13 | - **Unified Request Handling**: Automatically determine task types like text, image, or TTS requests.
14 | - **Pipeline Support**: Chain multiple tasks into a seamless workflow.
15 |
16 | Get started by following the [Installation Guide](./installation.md).
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | # Example environment variables for EasilyAI
2 | # Copy this file to .env and fill in your actual API keys
3 |
4 | # OpenAI API Configuration
5 | OPENAI_API_KEY=your_openai_api_key_here
6 | OPENAI_ORG_ID=your_openai_org_id_here # Optional
7 |
8 | # Anthropic API Configuration
9 | ANTHROPIC_API_KEY=your_anthropic_api_key_here
10 |
11 | # Google Gemini API Configuration
12 | GEMINI_API_KEY=your_gemini_api_key_here
13 |
14 | # Groq API Configuration
15 | GROQ_API_KEY=your_groq_api_key_here
16 |
17 | # Logging Configuration
18 | LOG_LEVEL=INFO
19 | LOG_FILE=logs/easilyai.log
20 |
21 | # Rate Limiting Configuration
22 | DEFAULT_RATE_LIMIT=10 # requests per second
23 | DEFAULT_BURST_LIMIT=5 # burst capacity
24 |
25 | # Testing Configuration
26 | TEST_TIMEOUT=30 # seconds
27 | TEST_RETRIES=3
28 |
29 | # Development Configuration
30 | DEV_MODE=false
31 | DEBUG=false
--------------------------------------------------------------------------------
/easilyai/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | def save_to_file(data, filename="output.txt"):
4 | """
5 | Save text data to a file.
6 | """
7 | with open(filename, "w") as file:
8 | file.write(data)
9 | return f"Data saved to {os.path.abspath(filename)}"
10 |
11 | def validate_service(service_name):
12 | """
13 | Validate the service name input.
14 | """
15 | valid_services = ["openai", "ollama", "grok", "gemini", "claude"]
16 | if service_name not in valid_services:
17 | raise ValueError(f"Invalid service: {service_name}. Supported: {valid_services}")
18 |
19 | def print_banner(app_name):
20 | """
21 | Display a simple banner when the app is created.
22 | """
23 | banner = f"""
24 | *************************************
25 | {app_name} Initialized
26 | *************************************
27 | """
28 | print(banner)
29 |
--------------------------------------------------------------------------------
/mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 | python_version = 3.8
3 | warn_return_any = True
4 | warn_unused_configs = True
5 | disallow_untyped_defs = False
6 | disallow_any_generics = False
7 | check_untyped_defs = True
8 | no_implicit_optional = True
9 | warn_redundant_casts = True
10 | warn_unused_ignores = True
11 | warn_no_return = True
12 | show_error_codes = True
13 | strict_equality = True
14 | pretty = True
15 | show_column_numbers = True
16 | show_error_context = True
17 | show_traceback = True
18 |
19 | [mypy-setuptools.*]
20 | ignore_missing_imports = True
21 |
22 | [mypy-google.*]
23 | ignore_missing_imports = True
24 |
25 | [mypy-anthropic.*]
26 | ignore_missing_imports = True
27 |
28 | [mypy-openai.*]
29 | ignore_missing_imports = True
30 |
31 | [mypy-groq.*]
32 | ignore_missing_imports = True
33 |
34 | [mypy-pytest.*]
35 | ignore_missing_imports = True
36 |
37 | [mypy-responses.*]
38 | ignore_missing_imports = True
--------------------------------------------------------------------------------
/docs/appcreation.md:
--------------------------------------------------------------------------------
1 | # App Creation
2 |
3 | ## Overview
4 | EasilyAI allows you to initialize an AI app quickly and seamlessly using OpenAI or Ollama.
5 |
6 | EasilyAI supports many different services. You can view a full list here.
7 |
8 | ## Creating an OpenAI App
9 |
10 | ```python
11 | import easilyai
12 |
13 | app = easilyai.create_app(
14 | name="my_ai_app",
15 | service="openai",
16 | apikey="YOUR_API_KEY",
17 | model="gpt-4o"
18 | )
19 |
20 | response = app.request("Tell me a joke about AI.")
21 | print(response)
22 | ```
23 |
24 | ## Creating an Ollama App
25 |
26 | For local models using Ollama:
27 |
28 | ```python
29 | app = easilyai.create_app(
30 | name="my_ai_app",
31 | service="ollama",
32 | model="llama2"
33 | )
34 |
35 | response = app.request("What is the future of AI?")
36 | print(response)
37 | ```
38 |
39 | Learn more about [Text-to-Speech](./texttospeech.md) in EasyAI.
--------------------------------------------------------------------------------
/docs/.vitepress/theme/index.ts:
--------------------------------------------------------------------------------
1 | import { h } from 'vue'
2 | import type { Theme } from 'vitepress'
3 | import DefaultTheme from 'vitepress/theme'
4 | import './style.css'
5 |
6 | // Import custom components
7 | import FeatureCard from './components/FeatureCard.vue'
8 | import CodeComparison from './components/CodeComparison.vue'
9 | import ApiTable from './components/ApiTable.vue'
10 | import InstallationSteps from './components/InstallationSteps.vue'
11 |
12 | export default {
13 | extends: DefaultTheme,
14 | Layout: () => {
15 | return h(DefaultTheme.Layout, null, {
16 | // https://vitepress.dev/guide/extending-default-theme#layout-slots
17 | })
18 | },
19 | enhanceApp({ app, router, siteData }) {
20 | // Register global components
21 | app.component('FeatureCard', FeatureCard)
22 | app.component('CodeComparison', CodeComparison)
23 | app.component('ApiTable', ApiTable)
24 | app.component('InstallationSteps', InstallationSteps)
25 | }
26 | } satisfies Theme
--------------------------------------------------------------------------------
/easilyai/custom_ai.py:
--------------------------------------------------------------------------------
1 | class CustomAIService:
2 | def __init__(self, model, apikey=None):
3 | self.model = model
4 | self.apikey = apikey
5 |
6 | def generate_text(self, prompt):
7 | raise NotImplementedError("Custom AI services must implement 'generate_text'.")
8 |
9 | def generate_image(self, prompt):
10 | raise NotImplementedError("Custom AI services must implement 'generate_image'.")
11 |
12 | def text_to_speech(self, text):
13 | raise NotImplementedError("Custom AI services must implement 'text_to_speech'.")
14 |
15 | _registered_custom_ais = {}
16 |
17 | def register_custom_ai(name, custom_service_class):
18 | """
19 | Register a custom AI service.
20 |
21 | :param name: Name of the custom service.
22 | :param custom_service_class: Class inheriting from CustomAIService.
23 | """
24 | if not issubclass(custom_service_class, CustomAIService):
25 | raise TypeError("Custom service must inherit from CustomAIService.")
26 | _registered_custom_ais[name] = custom_service_class
27 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import MagicMock, patch
3 |
4 |
5 | class TestUtils(unittest.TestCase):
6 |
7 | def test_utils_module_imports(self):
8 | # Test that utils module can be imported
9 | try:
10 | from easilyai import utils
11 | self.assertIsNotNone(utils)
12 | except ImportError:
13 | self.skipTest("Utils module not available")
14 |
15 | def test_utils_has_functions(self):
16 | # Test that utils module has some functionality
17 | try:
18 | import easilyai.utils
19 | # Check if the module has any functions or classes
20 | module_attrs = [attr for attr in dir(easilyai.utils)
21 | if not attr.startswith('_')]
22 | # Should have at least some public attributes
23 | self.assertGreater(len(module_attrs), 0)
24 | except ImportError:
25 | self.skipTest("Utils module not available")
26 |
27 |
28 | if __name__ == "__main__":
29 | unittest.main()
--------------------------------------------------------------------------------
/tests/test_ollama_service.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import patch
3 | from requests.exceptions import ConnectionError
4 | from easilyai.services.ollama_service import OllamaService
5 | from easilyai.exceptions import APIConnectionError
6 |
7 | class TestOllamaService(unittest.TestCase):
8 | def setUp(self):
9 | self.service = OllamaService(model="llama2")
10 |
11 | @patch("requests.post")
12 | def test_generate_text_success(self, mock_post):
13 | mock_post.return_value.status_code = 200
14 | mock_post.return_value.json.return_value = {"response": "Mocked Ollama response"}
15 | response = self.service.generate_text("Test prompt")
16 | self.assertEqual(response, "Mocked Ollama response")
17 |
18 | @patch("requests.post")
19 | def test_generate_text_connection_error(self, mock_post):
20 | mock_post.side_effect = ConnectionError
21 | with self.assertRaises(APIConnectionError):
22 | self.service.generate_text("Test prompt")
23 |
24 | if __name__ == "__main__":
25 | unittest.main()
26 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | minversion = 7.0
3 | testpaths = tests
4 | pythonpath = .
5 | addopts =
6 | --cov=easilyai
7 | --cov-report=html
8 | --cov-report=term-missing
9 | --cov-report=xml
10 | --cov-fail-under=80
11 | --strict-markers
12 | --strict-config
13 | --verbose
14 | -ra
15 | --tb=short
16 | markers =
17 | unit: Unit tests that don't require external services
18 | integration: Integration tests requiring API keys
19 | slow: Slow tests that should be run less frequently
20 | requires_api_key: Tests that require specific API keys
21 | openai: Tests for OpenAI service
22 | anthropic: Tests for Anthropic service
23 | gemini: Tests for Google Gemini service
24 | groq: Tests for Groq service
25 | filterwarnings =
26 | ignore::DeprecationWarning
27 | ignore::PendingDeprecationWarning
28 | ignore::ResourceWarning
29 | norecursedirs =
30 | .git
31 | .tox
32 | dist
33 | build
34 | *.egg
35 | venv
36 | .venv
37 | python_files = test_*.py
38 | python_classes = Test*
39 | python_functions = test_*
--------------------------------------------------------------------------------
/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | # Development dependencies for EasilyAI
2 | # Install with: pip install -r requirements-dev.txt
3 |
4 | # Testing
5 | pytest>=7.4.0
6 | pytest-cov>=4.1.0
7 | pytest-asyncio>=0.21.0
8 | pytest-mock>=3.11.0
9 | pytest-xdist>=3.3.0
10 | pytest-benchmark>=4.0.0
11 | coverage>=7.3.0
12 |
13 | # Code quality
14 | black>=23.7.0
15 | isort>=5.12.0
16 | flake8>=6.1.0
17 | flake8-docstrings>=1.7.0
18 | flake8-bugbear>=23.7.0
19 | mypy>=1.5.0
20 | bandit>=1.7.5
21 | safety>=2.3.0
22 | pre-commit>=3.3.0
23 |
24 | # Type stubs
25 | types-requests>=2.31.0
26 | types-setuptools>=68.0.0
27 |
28 | # Documentation
29 | mkdocs>=1.5.0
30 | mkdocs-material>=9.2.0
31 | mkdocstrings[python]>=0.22.0
32 | mkdocs-autorefs>=0.5.0
33 |
34 | # Development tools
35 | ipython>=8.14.0
36 | ipdb>=0.13.13
37 | tox>=4.6.0
38 | bump2version>=1.0.1
39 | twine>=4.0.2
40 | build>=0.10.0
41 |
42 | # Optional performance tools
43 | py-spy>=0.3.14
44 | memory-profiler>=0.61.0
45 |
46 | # Mock and testing utilities
47 | responses>=0.23.0
48 | freezegun>=1.2.2
49 | factory-boy>=3.3.0
50 | faker>=19.3.0
51 |
52 | # Environment management
53 | python-dotenv>=1.0.0
--------------------------------------------------------------------------------
/tests/test_enhanced_app.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import MagicMock, patch
3 |
4 |
5 | class TestEnhancedApp(unittest.TestCase):
6 |
7 | def test_enhanced_app_imports(self):
8 | # Test that enhanced app modules can be imported
9 | try:
10 | from easilyai.enhanced_app import create_enhanced_app
11 | self.assertTrue(callable(create_enhanced_app))
12 | except ImportError:
13 | self.fail("Could not import enhanced_app module")
14 |
15 | def test_enhanced_app_function_signature(self):
16 | # Test the function exists with correct parameters
17 | from easilyai.enhanced_app import create_enhanced_app
18 | import inspect
19 |
20 | sig = inspect.signature(create_enhanced_app)
21 | params = list(sig.parameters.keys())
22 |
23 | # Check required parameters exist
24 | self.assertIn('name', params)
25 | self.assertIn('service', params)
26 | self.assertIn('api_key', params)
27 | self.assertIn('model', params)
28 |
29 |
30 | if __name__ == "__main__":
31 | unittest.main()
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 | from pathlib import Path
3 | this_directory = Path(__file__).parent
4 |
5 | long_description = (this_directory / "README.md").read_text()
6 |
7 | setup(
8 | name="EasilyAI",
9 | version="0.3.0",
10 | description="A library that simplifies the usage of AI!",
11 | author="GustyCube",
12 | author_email="gc@gustycube.xyz",
13 | url="https://github.com/GustyCube/EasilyAI",
14 | packages=find_packages(),
15 | install_requires=[
16 | "openai>=1.0.0",
17 | "requests>=2.0.0",
18 | "google-generativeai>=0.8.3",
19 | "anthropic>=0.42.0"
20 | ],
21 | classifiers=[
22 | "Programming Language :: Python :: 3",
23 | "Programming Language :: Python :: 3.11",
24 | "Programming Language :: Python :: 3.12",
25 | "Programming Language :: Python :: 3.13",
26 | "License :: OSI Approved :: Apache Software License",
27 | "Operating System :: OS Independent",
28 | ],
29 | python_requires='>=3.7',
30 | long_description=long_description,
31 | long_description_content_type='text/markdown'
32 | )
33 |
--------------------------------------------------------------------------------
/tests/test_batch.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import MagicMock, patch
3 |
4 |
5 | class TestBatchProcessing(unittest.TestCase):
6 |
7 | def test_batch_module_imports(self):
8 | # Test that batch module can be imported
9 | try:
10 | from easilyai import batch
11 | self.assertIsNotNone(batch)
12 | except ImportError:
13 | self.skipTest("Batch module not available")
14 |
15 | def test_batch_functionality_exists(self):
16 | # Test that batch processing functionality exists
17 | try:
18 | from easilyai.batch import BatchProcessor
19 | self.assertTrue(hasattr(BatchProcessor, '__init__'))
20 | except ImportError:
21 | # If BatchProcessor doesn't exist, check for other batch functions
22 | try:
23 | import easilyai.batch
24 | # At least the module should exist
25 | self.assertIsNotNone(easilyai.batch)
26 | except ImportError:
27 | self.skipTest("No batch processing functionality found")
28 |
29 |
30 | if __name__ == "__main__":
31 | unittest.main()
--------------------------------------------------------------------------------
/docs/.vitepress/cache/deps/_metadata.json:
--------------------------------------------------------------------------------
1 | {
2 | "hash": "d6583594",
3 | "configHash": "c4b03129",
4 | "lockfileHash": "d1ebd6ff",
5 | "browserHash": "d47ea5d5",
6 | "optimized": {
7 | "vue": {
8 | "src": "../../../node_modules/vue/dist/vue.runtime.esm-bundler.js",
9 | "file": "vue.js",
10 | "fileHash": "7ad5fc8c",
11 | "needsInterop": false
12 | },
13 | "vitepress > @vue/devtools-api": {
14 | "src": "../../../node_modules/@vue/devtools-api/dist/index.js",
15 | "file": "vitepress___@vue_devtools-api.js",
16 | "fileHash": "19c8795d",
17 | "needsInterop": false
18 | },
19 | "vitepress > @vueuse/core": {
20 | "src": "../../../node_modules/@vueuse/core/index.mjs",
21 | "file": "vitepress___@vueuse_core.js",
22 | "fileHash": "12dfe7a6",
23 | "needsInterop": false
24 | },
25 | "@theme/index": {
26 | "src": "../../../node_modules/vitepress/dist/client/theme-default/index.js",
27 | "file": "@theme_index.js",
28 | "fileHash": "be618a6a",
29 | "needsInterop": false
30 | }
31 | },
32 | "chunks": {
33 | "chunk-A7UTJUSY": {
34 | "file": "chunk-A7UTJUSY.js"
35 | },
36 | "chunk-VJWGEPT5": {
37 | "file": "chunk-VJWGEPT5.js"
38 | }
39 | }
40 | }
--------------------------------------------------------------------------------
/tests/test_gemini_service.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import patch
3 | from easilyai.services.gemini_service import GeminiService
4 | from easilyai.exceptions import MissingAPIKeyError, ServerError
5 |
6 | class TestGeminiService(unittest.TestCase):
7 | def setUp(self):
8 | self.service = GeminiService(apikey="fake_api_key", model="gemini-1")
9 |
10 | def test_missing_api_key(self):
11 | with self.assertRaises(MissingAPIKeyError):
12 | GeminiService(apikey=None, model="gemini-1")
13 |
14 | @patch("google.generativeai.GenerativeModel.generate_content")
15 | def test_generate_text_success(self, mock_generate):
16 | mock_generate.return_value = MockResponse("Mocked Gemini response")
17 | response = self.service.generate_text("Test prompt")
18 | self.assertEqual(response, "Mocked Gemini response")
19 |
20 | @patch("google.generativeai.GenerativeModel.generate_content")
21 | def test_generate_text_server_error(self, mock_generate):
22 | mock_generate.side_effect = Exception("Server error")
23 | with self.assertRaises(ServerError):
24 | self.service.generate_text("Error prompt")
25 |
26 | class MockResponse:
27 | def __init__(self, text):
28 | self.text = text
29 |
30 | if __name__ == "__main__":
31 | unittest.main()
32 |
--------------------------------------------------------------------------------
/tests/test_openai_service.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import patch
3 | from easilyai.services.openai_service import OpenAIService
4 | from easilyai.exceptions import MissingAPIKeyError, AuthenticationError
5 |
6 |
7 | class TestOpenAIService(unittest.TestCase):
8 | def setUp(self):
9 | self.apikey = "fake_api_key"
10 | self.model = "gpt-4"
11 | self.service = OpenAIService(apikey=self.apikey, model=self.model)
12 |
13 | @patch.object(OpenAIService, '__init__', lambda x, y, z: None)
14 | @patch('openai.OpenAI')
15 | def test_generate_text_success(self, mock_openai_class):
16 | mock_client = mock_openai_class.return_value
17 | mock_response = type('Response', (), {})()
18 | mock_choice = type('Choice', (), {})()
19 | mock_message = type('Message', (), {'content': 'Mocked OpenAI response'})()
20 | mock_choice.message = mock_message
21 | mock_response.choices = [mock_choice]
22 |
23 | mock_client.chat.completions.create.return_value = mock_response
24 |
25 | service = OpenAIService("fake_key", "gpt-4")
26 | service.client = mock_client
27 | service.model = "gpt-4"
28 |
29 | response = service.generate_text("Test prompt")
30 | self.assertEqual(response, "Mocked OpenAI response")
31 |
32 |
33 | if __name__ == "__main__":
34 | unittest.main()
35 |
--------------------------------------------------------------------------------
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
1 | name: Deploy VitePress docs to GitHub Pages
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | pull_request:
7 | branches: [main]
8 |
9 | permissions:
10 | contents: read
11 | pages: write
12 | id-token: write
13 |
14 | concurrency:
15 | group: pages
16 | cancel-in-progress: false
17 |
18 | jobs:
19 | build:
20 | runs-on: ubuntu-latest
21 | steps:
22 | - name: Checkout
23 | uses: actions/checkout@v4
24 | with:
25 | fetch-depth: 0
26 |
27 | - name: Setup Node.js
28 | uses: actions/setup-node@v4
29 | with:
30 | node-version: 18
31 | cache: npm
32 | cache-dependency-path: docs/package-lock.json
33 |
34 | - name: Setup Pages
35 | uses: actions/configure-pages@v4
36 |
37 | - name: Install dependencies
38 | run: npm ci
39 | working-directory: docs
40 |
41 | - name: Build with VitePress
42 | run: npm run docs:build
43 | working-directory: docs
44 |
45 | - name: Upload artifact
46 | uses: actions/upload-pages-artifact@v3
47 | with:
48 | path: docs/.vitepress/dist
49 |
50 | deploy:
51 | needs: build
52 | runs-on: ubuntu-latest
53 | if: github.ref == 'refs/heads/main'
54 | environment:
55 | name: github-pages
56 | url: ${{ steps.deployment.outputs.page_url }}
57 | steps:
58 | - name: Deploy to GitHub Pages
59 | id: deployment
60 | uses: actions/deploy-pages@v4
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # EasilyAI
2 |
3 | A unified Python library for seamless integration with multiple AI services including OpenAI, Anthropic, Google Gemini, and Groq.
4 |
5 | ## Features
6 |
7 | - **Unified Interface**: Single API for multiple AI providers
8 | - **Easy Integration**: Simple setup with minimal configuration
9 | - **Flexible Usage**: Support for chat completions, streaming, and batch processing
10 | - **Type Safety**: Full type hints for better IDE support
11 | - **Error Handling**: Comprehensive error handling with custom exceptions
12 | - **Extensible**: Easy to add custom AI services
13 |
14 | ## Quick Example
15 |
16 | ```python
17 | from easilyai import app_creation
18 |
19 | # Create an app with your API key
20 | app = app_creation(apikey="your-api-key", service="openai")
21 |
22 | # Generate text
23 | response = app.generate_text("Tell me a joke about programming")
24 | print(response)
25 | ```
26 |
27 | ## Supported Services
28 |
29 | | Service | Models | Features |
30 | |---------|--------|----------|
31 | | OpenAI | GPT-4, GPT-3.5-Turbo | Chat, Streaming, Function Calling |
32 | | Anthropic | Claude 3 (Opus, Sonnet, Haiku) | Chat, Streaming, System Messages |
33 | | Google Gemini | Gemini Pro, Gemini Pro Vision | Chat, Multimodal |
34 | | Groq | Mixtral, LLaMA | Fast Inference, Chat |
35 |
36 | ## Installation
37 |
38 | ```bash
39 | pip install easilyai
40 | ```
41 |
42 | For development installation with all dependencies:
43 |
44 | ```bash
45 | pip install easilyai[dev,test,docs]
46 | ```
47 |
48 | ## License
49 |
50 | This project is licensed under the Apache License 2.0 - see the [LICENSE](https://github.com/GustyCube/EasilyAI/blob/main/LICENSE) file for details.
--------------------------------------------------------------------------------
/examples/basic_example.py:
--------------------------------------------------------------------------------
1 | import easilyai
2 | from easilyai.custom_ai import CustomAIService
3 |
4 | # Register a Custom AI
5 | class MyCustomAI(CustomAIService):
6 | def generate_text(self, prompt):
7 | return f"Custom AI Text: {prompt}"
8 |
9 | def text_to_speech(self, text, **kwargs):
10 | return f"Custom TTS Output: {text}"
11 |
12 | easilyai.register_custom_ai("my_custom_ai", MyCustomAI)
13 |
14 | # Create OpenAI App
15 | app = easilyai.create_app(
16 | name="openai_app",
17 | service="openai",
18 | apikey="YOUR_OPENAI_API_KEY",
19 | model="gpt-4"
20 | )
21 |
22 | # Run a pipeline
23 | pipeline = easilyai.EasilyAIPipeline(app)
24 | pipeline.add_task("generate_text", "Tell me a story about a talking car.")
25 | pipeline.add_task("generate_image", "A red futuristic talking car with glowing headlights.")
26 | pipeline.add_task("text_to_speech", "Here is a talking car in a futuristic world!")
27 |
28 | results = pipeline.run()
29 | for task_result in results:
30 | print(f"Task: {task_result['task']}\nResult: {task_result['result']}\n")
31 |
32 | # Run a TTS-specific app with OpenAI
33 | tts_app = easilyai.create_tts_app(
34 | name="tts_app",
35 | service="openai",
36 | apikey="YOUR_OPENAI_API_KEY",
37 | model="tts-1"
38 | )
39 |
40 | # Generate speech with a specific voice
41 | tts_file = tts_app.request_tts(
42 | text="Hello, I am your AI assistant!",
43 | tts_model="tts-1",
44 | voice="onyx",
45 | output_file="hello_ai.mp3"
46 | )
47 | print(f"TTS output saved to: {tts_file}")
48 |
49 | # Example using Custom AI for TTS
50 | custom_tts_app = easilyai.create_tts_app(
51 | name="custom_tts",
52 | service="my_custom_ai",
53 | model="v1"
54 | )
55 | print(custom_tts_app.request_tts("This is a custom AI TTS example."))
56 |
--------------------------------------------------------------------------------
/tests/test_app_creation.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from easilyai.app import create_app
3 |
4 |
5 | class TestAppCreation(unittest.TestCase):
6 | def test_openai_app_creation(self):
7 | app = create_app(name="TestOpenAIApp", service="openai", apikey="fake_api_key", model="gpt-4")
8 | self.assertEqual(app.name, "TestOpenAIApp")
9 | self.assertEqual(app.service, "openai")
10 | self.assertEqual(app.client.model, "gpt-4")
11 |
12 | def test_ollama_app_creation(self):
13 | app = create_app(name="TestOllamaApp", service="ollama", model="ollama-test-model")
14 | self.assertEqual(app.name, "TestOllamaApp")
15 | self.assertEqual(app.service, "ollama")
16 | self.assertEqual(app.client.model, "ollama-test-model")
17 |
18 | def test_anthropic_app_creation(self):
19 | app = create_app(name="TestAnthropicApp", service="anthropic", apikey="fake_api_key", model="claude-3")
20 | self.assertEqual(app.name, "TestAnthropicApp")
21 | self.assertEqual(app.service, "anthropic")
22 | self.assertEqual(app.client.model, "claude-3")
23 |
24 | def test_gemini_app_creation(self):
25 | app = create_app(name="TestGeminiApp", service="gemini", apikey="fake_api_key", model="gemini-1")
26 | self.assertEqual(app.name, "TestGeminiApp")
27 | self.assertEqual(app.service, "gemini")
28 | self.assertEqual(app.client.model.model_name, "models/gemini-1")
29 |
30 |
31 | def test_grok_app_creation(self):
32 | app = create_app(name="TestGrokApp", service="grok", apikey="fake_api_key", model="grok-v1")
33 | self.assertEqual(app.name, "TestGrokApp")
34 | self.assertEqual(app.service, "grok")
35 | self.assertEqual(app.client.model, "grok-v1")
36 |
37 | if __name__ == "__main__":
38 | unittest.main()
39 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.5.0
4 | hooks:
5 | - id: trailing-whitespace
6 | - id: end-of-file-fixer
7 | - id: check-yaml
8 | - id: check-added-large-files
9 | args: ['--maxkb=1000']
10 | - id: check-json
11 | - id: check-toml
12 | - id: check-merge-conflict
13 | - id: check-case-conflict
14 | - id: detect-private-key
15 | - id: debug-statements
16 | - id: mixed-line-ending
17 | args: ['--fix=lf']
18 |
19 | - repo: https://github.com/psf/black
20 | rev: 23.12.1
21 | hooks:
22 | - id: black
23 | language_version: python3
24 | args: ['--line-length=100']
25 |
26 | - repo: https://github.com/pycqa/isort
27 | rev: 5.13.2
28 | hooks:
29 | - id: isort
30 | args: ['--profile', 'black', '--line-length', '100']
31 |
32 | - repo: https://github.com/pycqa/flake8
33 | rev: 7.0.0
34 | hooks:
35 | - id: flake8
36 | additional_dependencies: [flake8-docstrings, flake8-bugbear]
37 | args: ['--config=.flake8']
38 |
39 | - repo: https://github.com/pre-commit/mirrors-mypy
40 | rev: v1.8.0
41 | hooks:
42 | - id: mypy
43 | additional_dependencies: [types-requests]
44 | args: ['--config-file=mypy.ini']
45 | exclude: '^tests/'
46 |
47 | - repo: https://github.com/PyCQA/bandit
48 | rev: 1.7.6
49 | hooks:
50 | - id: bandit
51 | args: ['-r', 'easilyai', '-ll']
52 | exclude: '^tests/'
53 |
54 | - repo: https://github.com/asottile/pyupgrade
55 | rev: v3.15.0
56 | hooks:
57 | - id: pyupgrade
58 | args: ['--py38-plus']
59 |
60 | default_language_version:
61 | python: python3
62 |
63 | ci:
64 | autofix_commit_msg: |
65 | [pre-commit.ci] auto fixes from pre-commit hooks
66 | autofix_prs: true
67 | autoupdate_branch: ''
68 | autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate'
69 | autoupdate_schedule: weekly
70 | skip: [mypy] # mypy requires project dependencies
--------------------------------------------------------------------------------
/.github/workflows/docsdeploy.yml:
--------------------------------------------------------------------------------
1 | # Sample workflow for building and deploying a VitePress site to GitHub Pages
2 | #
3 | name: Deploy Docs
4 |
5 | on:
6 | # Runs on pushes targeting the `main` branch. Change this to `master` if you're
7 | # using the `master` branch as the default branch.
8 | push:
9 | branches: [main]
10 |
11 | # Allows you to run this workflow manually from the Actions tab
12 | workflow_dispatch:
13 |
14 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
15 | permissions:
16 | contents: read
17 | pages: write
18 | id-token: write
19 |
20 | # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
21 | # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
22 | concurrency:
23 | group: pages
24 | cancel-in-progress: false
25 |
26 | jobs:
27 | build:
28 | runs-on: ubuntu-latest
29 | steps:
30 | - name: Checkout
31 | uses: actions/checkout@v4
32 | with:
33 | fetch-depth: 0
34 |
35 | - name: Setup Node
36 | uses: actions/setup-node@v4
37 | with:
38 | node-version: 20
39 | cache: 'npm'
40 | cache-dependency-path: docs/package-lock.json
41 |
42 | - name: Setup Pages
43 | uses: actions/configure-pages@v4
44 |
45 | - name: Install dependencies
46 | run: npm ci
47 | working-directory: ./docs # Ensure this points to /docs
48 |
49 | - name: Build with VitePress
50 | run: npm run docs:build
51 | working-directory: ./docs # Ensure this points to /docs
52 |
53 | - name: Upload artifact
54 | uses: actions/upload-pages-artifact@v3
55 | with:
56 | path: docs/.vitepress/dist
57 |
58 | deploy:
59 | needs: build
60 | runs-on: ubuntu-latest
61 | environment:
62 | name: github-pages
63 | url: ${{ steps.deployment.outputs.page_url }}
64 | steps:
65 | - name: Deploy to GitHub Pages
66 | id: deployment
67 | uses: actions/deploy-pages@v4
68 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | name: CI/CD Pipeline
2 |
3 | on:
4 | release:
5 | types: [published]
6 |
7 | permissions:
8 | contents: read
9 |
10 | jobs:
11 | test:
12 | name: Run Unit Tests
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: Checkout code
17 | uses: actions/checkout@v4
18 |
19 | - name: Set up Python
20 | uses: actions/setup-python@v5
21 | with:
22 | python-version: "3.x"
23 |
24 | - name: Install dependencies
25 | run: |
26 | python -m pip install --upgrade pip
27 | pip install -r requirements.txt
28 | pip install pytest coverage
29 |
30 | - name: Run tests with coverage
31 | run: |
32 | coverage run -m unittest discover -s tests
33 | coverage report -m
34 |
35 | release-build:
36 | name: Build Release Distributions
37 | runs-on: ubuntu-latest
38 | needs: test # Ensures tests pass before building
39 |
40 | steps:
41 | - name: Checkout code
42 | uses: actions/checkout@v4
43 |
44 | - name: Set up Python
45 | uses: actions/setup-python@v5
46 | with:
47 | python-version: "3.x"
48 |
49 | - name: Build release distributions
50 | run: |
51 | python -m pip install build
52 | python -m build
53 |
54 | - name: Upload distributions
55 | uses: actions/upload-artifact@v4
56 | with:
57 | name: release-dists
58 | path: dist/
59 |
60 | pypi-publish:
61 | name: Publish to PyPI
62 | runs-on: ubuntu-latest
63 | needs: release-build
64 | permissions:
65 | id-token: write
66 |
67 | environment:
68 | name: pypi
69 | url: https://pypi.org/project/EasilyAI/
70 |
71 | steps:
72 | - name: Retrieve release distributions
73 | uses: actions/download-artifact@v4
74 | with:
75 | name: release-dists
76 | path: dist/
77 |
78 | - name: Publish distributions to PyPI
79 | uses: pypa/gh-action-pypi-publish@release/v1
80 | with:
81 | packages-dir: dist/
82 |
--------------------------------------------------------------------------------
/easilyai/pipeline.py:
--------------------------------------------------------------------------------
1 | class EasilyAIPipeline:
2 | """
3 | Basic pipeline for sequential task execution.
4 |
5 | WARNING: This is the legacy pipeline implementation.
6 | For new projects, consider using EnhancedPipeline which provides:
7 | - Better error handling
8 | - Parallel execution support
9 | - Variable substitution
10 | - Conditional task execution
11 |
12 | This class is maintained for backward compatibility.
13 | """
14 | def __init__(self, app):
15 | self.app = app
16 | self.tasks = []
17 |
18 | def add_task(self, task_type, data, **kwargs):
19 | """
20 | Add a task to the pipeline.
21 | :param task_type: 'generate_text', 'generate_image', or 'text_to_speech'
22 | :param data: The input for the task (e.g., prompt or text).
23 | """
24 | if not kwargs:
25 | self.tasks.append({"type": task_type, "data": data})
26 | else:
27 | task_data = dict()
28 | task_data["data"] = data
29 | for key, value in kwargs.items():
30 | task_data[key] = value
31 | self.tasks.append({"type": task_type, "data": task_data})
32 |
33 | def run(self):
34 | """
35 | Execute the tasks sequentially.
36 | """
37 | results = []
38 | for i, task in enumerate(self.tasks):
39 | task_type = task["type"]
40 | data = task["data"]
41 |
42 | print(f"Running Task {i + 1}: {task_type}")
43 |
44 | if task_type == "generate_text":
45 | result = self.app.request(task_type, data)
46 | elif task_type == "generate_image":
47 | result = self.app.request(f"Generate an image: {data}")
48 | elif task_type == "text_to_speech":
49 | result = self.app.client.text_to_speech(data)
50 | else:
51 | raise ValueError(f"Unknown task type: {task_type}")
52 |
53 | results.append({"task": task_type, "result": result})
54 |
55 | print("\nPipeline Completed!")
56 | return results
57 |
--------------------------------------------------------------------------------
/easilyai/exceptions.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | class EasilyAIError(Exception):
4 | """Base class for all EasyAI exceptions."""
5 | pass
6 |
7 | # ANSI Color Codes
8 | class Color:
9 | RESET = "\033[0m"
10 | RED = "\033[91m"
11 | GREEN = "\033[92m"
12 | YELLOW = "\033[93m"
13 | BLUE = "\033[94m"
14 | CYAN = "\033[96m"
15 |
16 | class AuthenticationError(EasilyAIError):
17 | def __init__(self, message="Authentication failed!"):
18 | super().__init__(f"{Color.RED}🔑 {message} {Color.RESET}")
19 |
20 | class RateLimitError(EasilyAIError):
21 | def __init__(self, message="API rate limit exceeded! Please slow down."):
22 | super().__init__(f"{Color.YELLOW}⏳ {message} {Color.RESET}")
23 |
24 | class InvalidRequestError(EasilyAIError):
25 | def __init__(self, message="Invalid request!"):
26 | super().__init__(f"{Color.RED}🚫 {message} {Color.RESET}")
27 |
28 | class APIConnectionError(EasilyAIError):
29 | def __init__(self, message="Unable to connect to the API."):
30 | super().__init__(f"{Color.CYAN}🌐 {message} {Color.RESET}")
31 |
32 | class NotFoundError(EasilyAIError):
33 | def __init__(self, message="The requested resource was not found!"):
34 | super().__init__(f"{Color.YELLOW}🔍 {message} {Color.RESET}")
35 |
36 | class ServerError(EasilyAIError):
37 | def __init__(self, message="Server encountered an error!"):
38 | super().__init__(f"{Color.RED}💥 {message} {Color.RESET}")
39 |
40 | class MissingAPIKeyError(EasilyAIError):
41 | def __init__(self, message="No API key provided!"):
42 | super().__init__(f"{Color.RED}🔐 {message} {Color.RESET}")
43 |
44 | class UnsupportedServiceError(EasilyAIError):
45 | def __init__(self, service_name):
46 | super().__init__(
47 | f"{Color.BLUE}❌ Unsupported service '{service_name}'! Use 'openai', 'ollama', or a custom registered service. "
48 | f"Refer to the EasyAI documentation for more information.{Color.RESET}"
49 | )
50 |
51 | class NotImplementedError(EasilyAIError):
52 | def __init__(self, feature="This feature"):
53 | message = f"{Color.CYAN}🛠️ {feature} is not yet implemented! Stay tuned for future updates.{Color.RESET}"
54 | print(message)
55 | sys.exit(1)
56 |
--------------------------------------------------------------------------------
/.github/workflows/claude.yml:
--------------------------------------------------------------------------------
1 | name: Claude Code
2 |
3 | on:
4 | issue_comment:
5 | types: [created]
6 | pull_request_review_comment:
7 | types: [created]
8 | issues:
9 | types: [opened, assigned]
10 | pull_request_review:
11 | types: [submitted]
12 |
13 | jobs:
14 | claude:
15 | if: |
16 | (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
17 | (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
18 | (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
19 | (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
20 | runs-on: ubuntu-latest
21 | permissions:
22 | contents: read
23 | pull-requests: read
24 | issues: read
25 | id-token: write
26 | steps:
27 | - name: Checkout repository
28 | uses: actions/checkout@v4
29 | with:
30 | fetch-depth: 1
31 |
32 | - name: Run Claude Code
33 | id: claude
34 | uses: anthropics/claude-code-action@beta
35 | with:
36 | anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
37 |
38 | # Optional: Specify model (defaults to Claude Sonnet 4, uncomment for Claude Opus 4)
39 | # model: "claude-opus-4-20250514"
40 |
41 | # Optional: Customize the trigger phrase (default: @claude)
42 | # trigger_phrase: "/claude"
43 |
44 | # Optional: Trigger when specific user is assigned to an issue
45 | # assignee_trigger: "claude-bot"
46 |
47 | # Optional: Allow Claude to run specific commands
48 | # allowed_tools: "Bash(npm install),Bash(npm run build),Bash(npm run test:*),Bash(npm run lint:*)"
49 |
50 | # Optional: Add custom instructions for Claude to customize its behavior for your project
51 | # custom_instructions: |
52 | # Follow our coding standards
53 | # Ensure all new code has tests
54 | # Use TypeScript for new files
55 |
56 | # Optional: Custom environment variables for Claude
57 | # claude_env: |
58 | # NODE_ENV: test
59 |
60 |
--------------------------------------------------------------------------------
/easilyai/services/ollama_service.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from easilyai.exceptions import (
3 | APIConnectionError, InvalidRequestError, NotFoundError,
4 | ServerError, MissingAPIKeyError, NotImplementedError
5 | )
6 |
7 | class OllamaService:
8 | def __init__(self, model):
9 | self.model = model
10 | self.base_url = "http://localhost:11434/api"
11 |
12 | def generate_text(self, prompt):
13 | payload = {"model": self.model, "prompt": prompt, "stream": False}
14 | try:
15 | response = requests.post(f"{self.base_url}/generate", json=payload)
16 | response.raise_for_status()
17 | return response.json().get("response", "").strip()
18 | except requests.exceptions.ConnectionError:
19 | raise APIConnectionError(
20 | "Connection error! Unable to connect to Ollama's API. "
21 | "Please ensure Ollama is running locally and accessible. "
22 | "Refer to the EasyAI documentation for more information."
23 | )
24 | except requests.exceptions.HTTPError as e:
25 | if response.status_code == 400:
26 | raise InvalidRequestError(
27 | f"Invalid request! {str(e)}. Please check your request parameters. "
28 | "Refer to the EasyAI documentation for more information."
29 | )
30 | elif response.status_code == 404:
31 | raise NotFoundError(
32 | f"Resource not found! {str(e)}. The requested endpoint does not exist. "
33 | "Refer to the EasyAI documentation for more information."
34 | )
35 | elif response.status_code >= 500:
36 | raise ServerError(
37 | f"Server error! {str(e)}. Ollama's server encountered an error. "
38 | "Please try again later. Refer to the EasyAI documentation for more information."
39 | )
40 | else:
41 | raise ServerError(
42 | f"An unexpected error occurred: {str(e)}. Please try again later. "
43 | "Refer to the EasyAI documentation for more information."
44 | )
45 | except requests.exceptions.RequestException as e:
46 | raise ServerError(
47 | f"An error occurred: {str(e)}. Please try again later. "
48 | "Refer to the EasyAI documentation for more information."
49 | )
50 |
51 | def text_to_speech(self, text, **kwargs):
52 | raise NotImplementedError(
53 | "Text-to-Speech (TTS) is not supported for Ollama models at this time. "
54 | "Refer to the EasyAI documentation for more information."
55 | )
56 |
--------------------------------------------------------------------------------
/tests/test_huggingface_service.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import patch, MagicMock
3 | from easilyai.services.huggingface_service import HuggingFaceService
4 | from easilyai.exceptions import MissingAPIKeyError, AuthenticationError, ServerError
5 |
6 |
7 | class TestHuggingFaceService(unittest.TestCase):
8 | def setUp(self):
9 | self.apikey = "fake_api_key"
10 | self.model = "gpt2"
11 | self.service = HuggingFaceService(apikey=self.apikey, model=self.model)
12 |
13 | def test_missing_api_key(self):
14 | with self.assertRaises(MissingAPIKeyError):
15 | HuggingFaceService(apikey=None, model=self.model)
16 |
17 | @patch('requests.post')
18 | def test_generate_text_success(self, mock_post):
19 | mock_response = MagicMock()
20 | mock_response.status_code = 200
21 | mock_response.json.return_value = [{"generated_text": "Mocked HuggingFace response"}]
22 | mock_post.return_value = mock_response
23 |
24 | response = self.service.generate_text("Test prompt")
25 | self.assertEqual(response, "Mocked HuggingFace response")
26 |
27 | @patch('requests.post')
28 | def test_generate_text_with_parameters(self, mock_post):
29 | mock_response = MagicMock()
30 | mock_response.status_code = 200
31 | mock_response.json.return_value = [{"generated_text": "Custom response"}]
32 | mock_post.return_value = mock_response
33 |
34 | response = self.service.generate_text("Test prompt", max_length=200, temperature=0.5)
35 | self.assertEqual(response, "Custom response")
36 |
37 | @patch('requests.post')
38 | def test_generate_text_authentication_error(self, mock_post):
39 | mock_response = MagicMock()
40 | mock_response.status_code = 401
41 | mock_post.return_value = mock_response
42 |
43 | with self.assertRaises(ServerError): # HuggingFace wraps AuthError in ServerError
44 | self.service.generate_text("Test prompt")
45 |
46 | @patch('requests.post')
47 | def test_generate_text_server_error(self, mock_post):
48 | mock_response = MagicMock()
49 | mock_response.status_code = 500
50 | mock_post.return_value = mock_response
51 |
52 | with self.assertRaises(ServerError):
53 | self.service.generate_text("Test prompt")
54 |
55 | @patch('requests.post')
56 | def test_generate_text_empty_response(self, mock_post):
57 | mock_response = MagicMock()
58 | mock_response.status_code = 200
59 | mock_response.json.return_value = []
60 | mock_post.return_value = mock_response
61 |
62 | response = self.service.generate_text("Test prompt")
63 | self.assertEqual(response, "[]") # HuggingFace returns str(result) for empty lists
64 |
65 |
66 | if __name__ == "__main__":
67 | unittest.main()
--------------------------------------------------------------------------------
/docs/.vitepress/theme/components/CodeComparison.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
7 |
8 |
9 |
10 | ❌
11 | {{ beforeLabel }}
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 | ✅
20 | {{ afterLabel }}
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
43 |
44 |
--------------------------------------------------------------------------------
/tests/test_anthropic_service.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import patch, MagicMock
3 | from easilyai.services.anthropic_service import AnthropicService
4 | from easilyai.exceptions import MissingAPIKeyError, AuthenticationError, ServerError
5 |
6 |
7 | class TestAnthropicService(unittest.TestCase):
8 | def setUp(self):
9 | self.apikey = "fake_api_key"
10 | self.model = "claude-3-sonnet-20240229"
11 |
12 | def test_missing_api_key(self):
13 | with self.assertRaises(MissingAPIKeyError):
14 | AnthropicService(apikey=None, model=self.model)
15 |
16 | @patch.object(AnthropicService, '__init__', lambda x, y, z, **kwargs: None)
17 | @patch('anthropic.Anthropic')
18 | def test_generate_text_success(self, mock_anthropic_class):
19 | mock_client = mock_anthropic_class.return_value
20 | mock_response = MagicMock()
21 | mock_response.content = [MagicMock(text="Mocked Anthropic response")]
22 | mock_client.messages.create.return_value = mock_response
23 |
24 | service = AnthropicService("fake_key", "claude-3-sonnet-20240229")
25 | service.client = mock_client
26 | service.model = "claude-3-sonnet-20240229"
27 | service.max_tokens = 1024
28 |
29 | response = service.generate_text("Test prompt")
30 | self.assertEqual(response, "Mocked Anthropic response")
31 |
32 | @patch.object(AnthropicService, '__init__', lambda x, y, z, **kwargs: None)
33 | @patch('anthropic.Anthropic')
34 | def test_generate_text_with_image(self, mock_anthropic_class):
35 | mock_client = mock_anthropic_class.return_value
36 | mock_response = MagicMock()
37 | mock_response.content = [MagicMock(text="Mocked response with image")]
38 | mock_client.messages.create.return_value = mock_response
39 |
40 | service = AnthropicService("fake_key", "claude-3-sonnet-20240229")
41 | service.client = mock_client
42 | service.model = "claude-3-sonnet-20240229"
43 | service.max_tokens = 1024
44 |
45 | # Mock prepare_image to return None (simulating URL instead of local file)
46 | with patch.object(service, 'prepare_image', return_value=None):
47 | response = service.generate_text("Describe this image", "http://example.com/image.jpg")
48 | self.assertEqual(response, "Mocked response with image")
49 |
50 | @patch.object(AnthropicService, '__init__', lambda x, y, z, **kwargs: None)
51 | @patch('anthropic.Anthropic')
52 | def test_generate_text_authentication_error(self, mock_anthropic_class):
53 | mock_client = mock_anthropic_class.return_value
54 | mock_client.messages.create.side_effect = Exception("Authentication failed")
55 |
56 | service = AnthropicService("fake_key", "claude-3-sonnet-20240229")
57 | service.client = mock_client
58 | service.model = "claude-3-sonnet-20240229"
59 | service.max_tokens = 1024
60 |
61 | with self.assertRaises(ServerError):
62 | service.generate_text("Test prompt")
63 |
64 |
65 | if __name__ == "__main__":
66 | unittest.main()
--------------------------------------------------------------------------------
/.github/workflows/claude-code-review.yml:
--------------------------------------------------------------------------------
1 | name: Claude Code Review
2 |
3 | on:
4 | pull_request:
5 | types: [opened, synchronize]
6 | # Optional: Only run on specific file changes
7 | # paths:
8 | # - "src/**/*.ts"
9 | # - "src/**/*.tsx"
10 | # - "src/**/*.js"
11 | # - "src/**/*.jsx"
12 |
13 | jobs:
14 | claude-review:
15 | # Optional: Filter by PR author
16 | # if: |
17 | # github.event.pull_request.user.login == 'external-contributor' ||
18 | # github.event.pull_request.user.login == 'new-developer' ||
19 | # github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR'
20 |
21 | runs-on: ubuntu-latest
22 | permissions:
23 | contents: read
24 | pull-requests: read
25 | issues: read
26 | id-token: write
27 |
28 | steps:
29 | - name: Checkout repository
30 | uses: actions/checkout@v4
31 | with:
32 | fetch-depth: 1
33 |
34 | - name: Run Claude Code Review
35 | id: claude-review
36 | uses: anthropics/claude-code-action@beta
37 | with:
38 | anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
39 |
40 | # Optional: Specify model (defaults to Claude Sonnet 4, uncomment for Claude Opus 4)
41 | # model: "claude-opus-4-20250514"
42 |
43 | # Direct prompt for automated review (no @claude mention needed)
44 | direct_prompt: |
45 | Please review this pull request and provide feedback on:
46 | - Code quality and best practices
47 | - Potential bugs or issues
48 | - Performance considerations
49 | - Security concerns
50 | - Test coverage
51 |
52 | Be constructive and helpful in your feedback.
53 |
54 | # Optional: Customize review based on file types
55 | # direct_prompt: |
56 | # Review this PR focusing on:
57 | # - For TypeScript files: Type safety and proper interface usage
58 | # - For API endpoints: Security, input validation, and error handling
59 | # - For React components: Performance, accessibility, and best practices
60 | # - For tests: Coverage, edge cases, and test quality
61 |
62 | # Optional: Different prompts for different authors
63 | # direct_prompt: |
64 | # ${{ github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR' &&
65 | # 'Welcome! Please review this PR from a first-time contributor. Be encouraging and provide detailed explanations for any suggestions.' ||
66 | # 'Please provide a thorough code review focusing on our coding standards and best practices.' }}
67 |
68 | # Optional: Add specific tools for running tests or linting
69 | # allowed_tools: "Bash(npm run test),Bash(npm run lint),Bash(npm run typecheck)"
70 |
71 | # Optional: Skip review for certain conditions
72 | # if: |
73 | # !contains(github.event.pull_request.title, '[skip-review]') &&
74 | # !contains(github.event.pull_request.title, '[WIP]')
75 |
76 |
--------------------------------------------------------------------------------
/tests/test_grok_service.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import patch, MagicMock
3 | from easilyai.services.grok_service import GrokService
4 | from easilyai.exceptions import MissingAPIKeyError, AuthenticationError, ServerError
5 |
6 |
7 | class TestGrokService(unittest.TestCase):
8 | def setUp(self):
9 | self.apikey = "fake_api_key"
10 | self.model = "grok-beta"
11 |
12 | def test_missing_api_key(self):
13 | with self.assertRaises(MissingAPIKeyError):
14 | GrokService(apikey=None, model=self.model)
15 |
16 | @patch.object(GrokService, '__init__', lambda x, y, z: None)
17 | @patch('openai.OpenAI')
18 | def test_generate_text_success(self, mock_openai_class):
19 | mock_client = mock_openai_class.return_value
20 | mock_response = MagicMock()
21 | mock_choice = MagicMock()
22 | mock_message = MagicMock()
23 | mock_message.content = "Mocked Grok response"
24 | mock_choice.message = mock_message
25 | mock_response.choices = [mock_choice]
26 |
27 | mock_client.chat.completions.create.return_value = mock_response
28 |
29 | service = GrokService("fake_key", "grok-beta")
30 | service.client = mock_client
31 | service.model = "grok-beta"
32 |
33 | response = service.generate_text("Explain quantum physics")
34 | self.assertEqual(response, "Mocked Grok response")
35 |
36 | @patch.object(GrokService, '__init__', lambda x, y, z: None)
37 | @patch('openai.OpenAI')
38 | def test_generate_text_with_image(self, mock_openai_class):
39 | mock_client = mock_openai_class.return_value
40 | mock_response = MagicMock()
41 | mock_choice = MagicMock()
42 | mock_message = MagicMock()
43 | mock_message.content = "Mocked response with image"
44 | mock_choice.message = mock_message
45 | mock_response.choices = [mock_choice]
46 |
47 | mock_client.chat.completions.create.return_value = mock_response
48 |
49 | service = GrokService("fake_key", "grok-beta")
50 | service.client = mock_client
51 | service.model = "grok-beta"
52 |
53 | # Mock encode_image to return the URL unchanged (simulating URL instead of local file)
54 | with patch.object(service, 'encode_image', return_value="http://example.com/image.jpg"):
55 | response = service.generate_text("What's in this image?", "http://example.com/image.jpg")
56 | self.assertEqual(response, "Mocked response with image")
57 |
58 | @patch.object(GrokService, '__init__', lambda x, y, z: None)
59 | @patch('openai.OpenAI')
60 | def test_generate_text_server_error(self, mock_openai_class):
61 | mock_client = mock_openai_class.return_value
62 | mock_client.chat.completions.create.side_effect = Exception("Server error")
63 |
64 | service = GrokService("fake_key", "grok-beta")
65 | service.client = mock_client
66 | service.model = "grok-beta"
67 |
68 | with self.assertRaises(ServerError):
69 | service.generate_text("Test prompt")
70 |
71 |
72 | if __name__ == "__main__":
73 | unittest.main()
--------------------------------------------------------------------------------
/docs/.vitepress/theme/components/FeatureCard.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
{{ title }}
8 |
{{ description }}
9 |
10 |
11 |
12 |
13 |
14 | {{ badge }}
15 |
16 |
17 |
18 |
19 |
34 |
35 |
--------------------------------------------------------------------------------
/tests/test_custom_ai.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from easilyai.custom_ai import CustomAIService, register_custom_ai, _registered_custom_ais
3 |
4 |
5 | class TestCustomAI(unittest.TestCase):
6 | def setUp(self):
7 | # Clear registered custom AIs before each test
8 | _registered_custom_ais.clear()
9 |
10 | def test_custom_ai_service_init(self):
11 | service = CustomAIService(model="test-model", apikey="test-key")
12 | self.assertEqual(service.model, "test-model")
13 | self.assertEqual(service.apikey, "test-key")
14 |
15 | def test_custom_ai_service_init_without_apikey(self):
16 | service = CustomAIService(model="test-model")
17 | self.assertEqual(service.model, "test-model")
18 | self.assertIsNone(service.apikey)
19 |
20 | def test_custom_ai_service_not_implemented_methods(self):
21 | service = CustomAIService(model="test-model")
22 |
23 | with self.assertRaises(NotImplementedError):
24 | service.generate_text("test prompt")
25 |
26 | with self.assertRaises(NotImplementedError):
27 | service.generate_image("test prompt")
28 |
29 | with self.assertRaises(NotImplementedError):
30 | service.text_to_speech("test text")
31 |
32 | def test_register_valid_custom_ai(self):
33 | class ValidCustomAI(CustomAIService):
34 | def generate_text(self, prompt):
35 | return f"Generated: {prompt}"
36 |
37 | register_custom_ai("valid_ai", ValidCustomAI)
38 | self.assertIn("valid_ai", _registered_custom_ais)
39 | self.assertEqual(_registered_custom_ais["valid_ai"], ValidCustomAI)
40 |
41 | def test_register_invalid_custom_ai(self):
42 | class InvalidCustomAI:
43 | pass
44 |
45 | with self.assertRaises(TypeError) as context:
46 | register_custom_ai("invalid_ai", InvalidCustomAI)
47 |
48 | self.assertIn("Custom service must inherit from CustomAIService", str(context.exception))
49 | self.assertNotIn("invalid_ai", _registered_custom_ais)
50 |
51 | def test_register_multiple_custom_ais(self):
52 | class CustomAI1(CustomAIService):
53 | def generate_text(self, prompt):
54 | return "AI1 response"
55 |
56 | class CustomAI2(CustomAIService):
57 | def generate_text(self, prompt):
58 | return "AI2 response"
59 |
60 | register_custom_ai("ai1", CustomAI1)
61 | register_custom_ai("ai2", CustomAI2)
62 |
63 | self.assertEqual(len(_registered_custom_ais), 2)
64 | self.assertIn("ai1", _registered_custom_ais)
65 | self.assertIn("ai2", _registered_custom_ais)
66 |
67 | def test_custom_ai_implementation_example(self):
68 | class MockCustomAI(CustomAIService):
69 | def generate_text(self, prompt):
70 | return f"Mock response to: {prompt}"
71 |
72 | def generate_image(self, prompt):
73 | return f"Mock image for: {prompt}"
74 |
75 | def text_to_speech(self, text):
76 | return f"Mock audio for: {text}"
77 |
78 | service = MockCustomAI(model="mock-model", apikey="mock-key")
79 |
80 | self.assertEqual(service.generate_text("Hello"), "Mock response to: Hello")
81 | self.assertEqual(service.generate_image("Cat"), "Mock image for: Cat")
82 | self.assertEqual(service.text_to_speech("Speech"), "Mock audio for: Speech")
83 |
84 |
85 | if __name__ == "__main__":
86 | unittest.main()
--------------------------------------------------------------------------------
/easilyai/services/grok_service.py:
--------------------------------------------------------------------------------
1 | import os
2 | import base64
3 | from openai import OpenAI
4 | from easilyai.exceptions import (
5 | AuthenticationError,
6 | RateLimitError,
7 | InvalidRequestError,
8 | APIConnectionError,
9 | NotFoundError,
10 | ServerError,
11 | MissingAPIKeyError,
12 | )
13 |
14 | class GrokService:
15 | def __init__(self, apikey, model):
16 | if not apikey:
17 | raise MissingAPIKeyError(
18 | "Grok API key is missing! Please provide your API key when initializing the service. "
19 | "Refer to the EasyAI documentation for more information."
20 | )
21 | self.model = model
22 | self.client = OpenAI(
23 | api_key=apikey,
24 | base_url="https://api.x.ai/v1",
25 | )
26 |
27 | def encode_image(self, img_url):
28 | """Encodes an image file into Base64 format if it's a local file."""
29 | if os.path.exists(img_url): # Check if it's a local file
30 | with open(img_url, "rb") as f:
31 | encoded_string = base64.b64encode(f.read()).decode("utf-8")
32 | return f"data:image/jpeg;base64,{encoded_string}"
33 | return img_url # Assume it's already a URL if the file doesn't exist locally
34 |
35 | def generate_text(self, prompt, img_url=None):
36 | """Generates text using Grok's chat completion API."""
37 | try:
38 | # Prepare messages
39 | messages = [{"role": "user", "content": prompt}]
40 |
41 | if img_url:
42 | encoded_img = self.encode_image(img_url)
43 | messages = [
44 | {
45 | "role": "user",
46 | "content": [
47 | {"type": "image_url", "image_url": {"url": encoded_img, "detail": "high"}},
48 | {"type": "text", "text": prompt},
49 | ],
50 | }
51 | ]
52 |
53 | # Send request to Grok
54 | response = self.client.chat.completions.create(
55 | model=self.model,
56 | messages=messages,
57 | temperature=0.7, # Adjust based on your needs
58 | )
59 |
60 | # Return response content
61 | return response.choices[0].message.content
62 |
63 | except Exception as e:
64 | self.handle_exception(e)
65 |
66 | @staticmethod
67 | def handle_exception(exception):
68 | """Handles known exceptions and raises custom errors."""
69 | if isinstance(exception, AuthenticationError):
70 | raise AuthenticationError(
71 | "Authentication failed! Please check your Grok API key."
72 | )
73 | elif isinstance(exception, RateLimitError):
74 | raise RateLimitError(
75 | "Rate limit exceeded! You've made too many requests. Please wait and try again later."
76 | )
77 | elif isinstance(exception, InvalidRequestError):
78 | raise InvalidRequestError(
79 | f"Invalid request! {str(exception)}. Please check your request parameters."
80 | )
81 | elif isinstance(exception, APIConnectionError):
82 | raise APIConnectionError(
83 | "Connection error! Unable to connect to Grok's API. Please check your internet connection."
84 | )
85 | else:
86 | raise ServerError(
87 | f"An unknown error occurred while using Grok's API: {str(exception)}"
88 | )
89 |
--------------------------------------------------------------------------------
/easilyai/services/anthropic_service.py:
--------------------------------------------------------------------------------
1 | import os
2 | import base64
3 | import anthropic
4 | from easilyai.exceptions import (
5 | AuthenticationError, RateLimitError, InvalidRequestError,
6 | APIConnectionError, NotFoundError, ServerError, MissingAPIKeyError
7 | )
8 |
9 | class AnthropicService:
10 | def __init__(self, apikey, model, max_tokens=1024):
11 | if not apikey:
12 | raise MissingAPIKeyError(
13 | "Anthropic API key is missing! Please provide your API key when initializing the service. "
14 | "Refer to the EasyAI documentation for more information."
15 | )
16 | self.apikey = apikey
17 | self.model = model
18 | self.max_tokens = max_tokens
19 | self.client = anthropic.Anthropic(api_key=apikey) # Correct initialization
20 |
21 | def prepare_image(self, img_url):
22 | """Prepare image for Claude API - handles both local files and URLs."""
23 | if os.path.exists(img_url): # Local file
24 | with open(img_url, "rb") as f:
25 | image_data = base64.b64encode(f.read()).decode("utf-8")
26 |
27 | # Determine media type based on file extension
28 | if img_url.lower().endswith('.png'):
29 | media_type = 'image/png'
30 | elif img_url.lower().endswith('.jpg') or img_url.lower().endswith('.jpeg'):
31 | media_type = 'image/jpeg'
32 | elif img_url.lower().endswith('.gif'):
33 | media_type = 'image/gif'
34 | elif img_url.lower().endswith('.webp'):
35 | media_type = 'image/webp'
36 | else:
37 | media_type = 'image/jpeg' # Default fallback
38 |
39 | return {
40 | "type": "image",
41 | "source": {
42 | "type": "base64",
43 | "media_type": media_type,
44 | "data": image_data
45 | }
46 | }
47 | else:
48 | # Handle URLs - Claude doesn't support URLs directly, so we'd need to fetch and encode
49 | # For now, return None to indicate URL handling isn't supported
50 | return None
51 |
52 | def generate_text(self, prompt, img_url=None):
53 | try:
54 | if img_url:
55 | # Vision mode - include image with prompt
56 | image_block = self.prepare_image(img_url)
57 | if image_block:
58 | content = [
59 | {"type": "text", "text": prompt},
60 | image_block
61 | ]
62 | else:
63 | # If image preparation failed (e.g., URL), fall back to text-only
64 | content = prompt
65 | else:
66 | # Text-only mode
67 | content = prompt
68 |
69 | response = self.client.messages.create(
70 | model=self.model,
71 | max_tokens=self.max_tokens,
72 | messages=[{"role": "user", "content": content}],
73 | )
74 | # Extract the text content
75 | return response.content[0].text
76 | except anthropic.AuthenticationError:
77 | raise AuthenticationError("Invalid API key. Please check your Anthropic API key.")
78 | except anthropic.RateLimitError:
79 | raise RateLimitError("Rate limit exceeded. Please wait and try again later.")
80 | except anthropic.BadRequestError as e:
81 | raise InvalidRequestError(f"Invalid request: {str(e)}. Check your parameters.")
82 | except anthropic.APIConnectionError:
83 | raise APIConnectionError("Unable to connect to Anthropic API. Check your network.")
84 | except Exception as e:
85 | raise ServerError(
86 | f"An unexpected error occurred: {str(e)}. Please try again later."
87 | )
88 |
--------------------------------------------------------------------------------
/tests/test_pipeline.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import MagicMock, patch
3 | from easilyai.pipeline import EasilyAIPipeline
4 |
5 |
6 | class TestEasilyAIPipeline(unittest.TestCase):
7 | def setUp(self):
8 | self.mock_app = MagicMock()
9 | self.pipeline = EasilyAIPipeline(self.mock_app)
10 |
11 | def test_add_task_simple(self):
12 | self.pipeline.add_task("generate_text", "Hello, world!")
13 | self.assertEqual(len(self.pipeline.tasks), 1)
14 | self.assertEqual(self.pipeline.tasks[0]["type"], "generate_text")
15 | self.assertEqual(self.pipeline.tasks[0]["data"], "Hello, world!")
16 |
17 | def test_add_task_with_kwargs(self):
18 | self.pipeline.add_task("generate_text", "Hello", max_tokens=100, temperature=0.7)
19 | self.assertEqual(len(self.pipeline.tasks), 1)
20 | task = self.pipeline.tasks[0]
21 | self.assertEqual(task["type"], "generate_text")
22 | self.assertEqual(task["data"]["data"], "Hello")
23 | self.assertEqual(task["data"]["max_tokens"], 100)
24 | self.assertEqual(task["data"]["temperature"], 0.7)
25 |
26 | def test_add_multiple_tasks(self):
27 | self.pipeline.add_task("generate_text", "Task 1")
28 | self.pipeline.add_task("generate_image", "Task 2")
29 | self.pipeline.add_task("text_to_speech", "Task 3")
30 | self.assertEqual(len(self.pipeline.tasks), 3)
31 |
32 | @patch('builtins.print')
33 | def test_run_generate_text_task(self, mock_print):
34 | self.mock_app.request.return_value = "Generated text response"
35 | self.pipeline.add_task("generate_text", "Test prompt")
36 |
37 | results = self.pipeline.run()
38 |
39 | self.assertEqual(len(results), 1)
40 | self.assertEqual(results[0]["task"], "generate_text")
41 | self.assertEqual(results[0]["result"], "Generated text response")
42 | self.mock_app.request.assert_called_once_with("generate_text", "Test prompt")
43 |
44 | @patch('builtins.print')
45 | def test_run_generate_image_task(self, mock_print):
46 | self.mock_app.request.return_value = "Generated image path"
47 | self.pipeline.add_task("generate_image", "A beautiful sunset")
48 |
49 | results = self.pipeline.run()
50 |
51 | self.assertEqual(len(results), 1)
52 | self.assertEqual(results[0]["task"], "generate_image")
53 | self.assertEqual(results[0]["result"], "Generated image path")
54 | self.mock_app.request.assert_called_once_with("Generate an image: A beautiful sunset")
55 |
56 | @patch('builtins.print')
57 | def test_run_text_to_speech_task(self, mock_print):
58 | self.mock_app.client.text_to_speech.return_value = "audio_file.mp3"
59 | self.pipeline.add_task("text_to_speech", "Hello world")
60 |
61 | results = self.pipeline.run()
62 |
63 | self.assertEqual(len(results), 1)
64 | self.assertEqual(results[0]["task"], "text_to_speech")
65 | self.assertEqual(results[0]["result"], "audio_file.mp3")
66 | self.mock_app.client.text_to_speech.assert_called_once_with("Hello world")
67 |
68 | @patch('builtins.print')
69 | def test_run_multiple_tasks(self, mock_print):
70 | self.mock_app.request.return_value = "Response"
71 | self.mock_app.client.text_to_speech.return_value = "audio.mp3"
72 |
73 | self.pipeline.add_task("generate_text", "Text task")
74 | self.pipeline.add_task("text_to_speech", "Speech task")
75 |
76 | results = self.pipeline.run()
77 |
78 | self.assertEqual(len(results), 2)
79 | self.assertEqual(results[0]["task"], "generate_text")
80 | self.assertEqual(results[1]["task"], "text_to_speech")
81 |
82 | @patch('builtins.print')
83 | def test_run_unknown_task_type(self, mock_print):
84 | self.pipeline.add_task("unknown_task", "Test data")
85 |
86 | with self.assertRaises(ValueError) as context:
87 | self.pipeline.run()
88 |
89 | self.assertIn("Unknown task type: unknown_task", str(context.exception))
90 |
91 |
92 | if __name__ == "__main__":
93 | unittest.main()
--------------------------------------------------------------------------------
/easilyai/app.py:
--------------------------------------------------------------------------------
1 | from easilyai.services.openai_service import OpenAIService
2 | from easilyai.services.ollama_service import OllamaService
3 | from easilyai.services.gemini_service import GeminiService
4 | from easilyai.services.grok_service import GrokService
5 | from easilyai.services.anthropic_service import AnthropicService
6 | from easilyai.services.huggingface_service import HuggingFaceService
7 | from easilyai.custom_ai import CustomAIService
8 | from easilyai.exceptions import UnsupportedServiceError, NotImplementedError
9 |
10 | _registered_custom_ais = {}
11 |
12 | class EasyAIApp:
13 | def __init__(self, name, service, apikey=None, model=None, max_tokens = None):
14 | self.name = name
15 | self.service = service
16 | self.model = model
17 | self.client = None
18 |
19 | if service == "openai":
20 | self.client = OpenAIService(apikey, model)
21 | elif service == "ollama":
22 | self.client = OllamaService(model)
23 | elif service == "gemini":
24 | self.client = GeminiService(apikey, model)
25 | elif service == "grok":
26 | self.client = GrokService(apikey, model)
27 | elif service == "anthropic":
28 | if max_tokens:
29 | self.client = AnthropicService(apikey, model, max_tokens)
30 | else:
31 | self.client = AnthropicService(apikey, model)
32 | elif service == "huggingface":
33 | self.client = HuggingFaceService(apikey, model)
34 | elif service in _registered_custom_ais:
35 | self.client = _registered_custom_ais[service](model, apikey)
36 | else:
37 | raise UnsupportedServiceError(
38 | f"Unsupported service '{service}'! Use 'openai', 'ollama', 'huggingface', or a registered custom service. "
39 | "Refer to the EasyAI documentation for more information."
40 | )
41 |
42 | def request(self, task_type, task):
43 | # Instead of checking if the task contains "image" or "speech", we should
44 | # check if the task_type is "generate_image" or "text_to_speech"
45 | if task_type == "generate_text":
46 | # If the task is a dictionary, it contains both prompt and image_url
47 | # Currently, should only work with Grok, but need to add support for other services
48 | if isinstance(task, dict):
49 | prompt = task["data"]
50 | img_url = task.get("image_url")
51 | return self.client.generate_text(prompt, img_url)
52 | else:
53 | # If the task is a string, it contains only the prompt, works for all services
54 | return self.client.generate_text(task)
55 | elif task_type == "generate_image":
56 | return self.client.generate_image(task)
57 | elif task_type == "text_to_speech":
58 | return self.client.text_to_speech(task)
59 | else:
60 | raise ValueError(f"Unsupported task type: {task_type}")
61 |
62 | class EasyAITTSApp:
63 | def __init__(self, name, service, apikey=None, model=None):
64 | self.name = name
65 | self.service = service
66 | self.model = model
67 | self.client = None
68 |
69 | if service == "openai":
70 | self.client = OpenAIService(apikey, model)
71 | elif service in _registered_custom_ais:
72 | self.client = _registered_custom_ais[service](model, apikey)
73 | else:
74 | raise ValueError("Unsupported service for TTS. Use 'openai' or a registered custom service.")
75 |
76 | def request_tts(self, text, tts_model="tts-1", voice="onyx", output_file="output.mp3"):
77 | """
78 | Convert text to speech using the selected service.
79 | """
80 | if hasattr(self.client, "text_to_speech"):
81 | return self.client.text_to_speech(text, tts_model=tts_model, voice=voice, output_file=output_file)
82 | else:
83 | raise NotImplementedError("TTS is not supported for this service.")
84 |
85 |
86 | def create_app(name, service, apikey=None, model=None):
87 | return EasyAIApp(name, service, apikey, model)
88 |
89 | def create_tts_app(name, service, apikey=None, model=None):
90 | return EasyAITTSApp(name, service, apikey, model)
91 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: help install install-dev test test-unit test-integration test-cov lint format type-check security-check clean docs serve-docs build publish-test publish dev-setup pre-commit
2 |
3 | help: ## Show this help message
4 | @echo 'Usage: make [target]'
5 | @echo ''
6 | @echo 'Available targets:'
7 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
8 |
9 | install: ## Install the package in production mode
10 | pip install -e .
11 |
12 | install-dev: ## Install the package with all development dependencies
13 | pip install -e ".[dev,test,docs]"
14 | pre-commit install
15 |
16 | test: ## Run all tests
17 | pytest
18 |
19 | test-unit: ## Run unit tests only
20 | pytest -m unit
21 |
22 | test-integration: ## Run integration tests only
23 | pytest -m integration
24 |
25 | test-cov: ## Run tests with coverage report
26 | pytest --cov=easilyai --cov-report=html --cov-report=term
27 |
28 | lint: ## Run all linting checks
29 | @echo "Running flake8..."
30 | flake8 easilyai tests
31 | @echo "Running mypy..."
32 | mypy easilyai
33 | @echo "Running bandit security check..."
34 | bandit -r easilyai -ll
35 |
36 | format: ## Format code with black and isort
37 | @echo "Running isort..."
38 | isort easilyai tests
39 | @echo "Running black..."
40 | black easilyai tests
41 |
42 | format-check: ## Check code formatting without making changes
43 | @echo "Checking isort..."
44 | isort --check-only easilyai tests
45 | @echo "Checking black..."
46 | black --check easilyai tests
47 |
48 | type-check: ## Run type checking with mypy
49 | mypy easilyai
50 |
51 | security-check: ## Run security checks
52 | bandit -r easilyai -ll
53 | safety check --json
54 |
55 | clean: ## Clean up build artifacts and cache files
56 | rm -rf build/
57 | rm -rf dist/
58 | rm -rf *.egg-info
59 | rm -rf .eggs/
60 | rm -rf .pytest_cache/
61 | rm -rf .mypy_cache/
62 | rm -rf .ruff_cache/
63 | rm -rf htmlcov/
64 | rm -rf .coverage
65 | rm -rf coverage.xml
66 | find . -type d -name __pycache__ -exec rm -rf {} + 2>/dev/null || true
67 | find . -type f -name "*.pyc" -delete
68 | find . -type f -name "*.pyo" -delete
69 | find . -type f -name "*~" -delete
70 |
71 | docs: ## Build documentation
72 | mkdocs build
73 |
74 | serve-docs: ## Serve documentation locally
75 | mkdocs serve --dev-addr 127.0.0.1:8000
76 |
77 | build: clean ## Build distribution packages
78 | python -m build
79 |
80 | publish-test: build ## Publish to TestPyPI
81 | python -m twine upload --repository testpypi dist/*
82 |
83 | publish: build ## Publish to PyPI
84 | python -m twine upload dist/*
85 |
86 | dev-setup: ## Complete development environment setup
87 | @echo "Setting up development environment..."
88 | pip install --upgrade pip setuptools wheel
89 | pip install -e ".[dev,test,docs]"
90 | pre-commit install
91 | @echo "Development environment setup complete!"
92 |
93 | pre-commit: ## Run pre-commit hooks on all files
94 | pre-commit run --all-files
95 |
96 | update-deps: ## Update all dependencies to latest versions
97 | pip install --upgrade pip setuptools wheel
98 | pip install --upgrade -e ".[dev,test,docs]"
99 | pre-commit autoupdate
100 |
101 | check: format-check lint type-check test ## Run all checks (format, lint, type, test)
102 |
103 | ci: ## Run CI pipeline locally
104 | @echo "Running CI pipeline..."
105 | $(MAKE) format-check
106 | $(MAKE) lint
107 | $(MAKE) type-check
108 | $(MAKE) test-cov
109 | @echo "CI pipeline complete!"
110 |
111 | watch-test: ## Run tests in watch mode (requires pytest-watch)
112 | pip install pytest-watch
113 | ptw -- --tb=short
114 |
115 | benchmark: ## Run performance benchmarks
116 | pytest tests/benchmarks/ -v --benchmark-only
117 |
118 | profile: ## Profile the code (requires py-spy)
119 | pip install py-spy
120 | py-spy record -o profile.svg -- python -m pytest tests/
121 |
122 | version: ## Show current version
123 | @python -c "from easilyai import __version__; print(__version__)"
124 |
125 | release-patch: ## Create a patch release
126 | bump2version patch
127 | git push && git push --tags
128 |
129 | release-minor: ## Create a minor release
130 | bump2version minor
131 | git push && git push --tags
132 |
133 | release-major: ## Create a major release
134 | bump2version major
135 | git push && git push --tags
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: EasilyAI Documentation
2 | site_description: A unified Python library for AI services
3 | site_author: GustyCube
4 | site_url: https://gustycube.github.io/EasilyAI/
5 | repo_name: GustyCube/EasilyAI
6 | repo_url: https://github.com/GustyCube/EasilyAI
7 | edit_uri: edit/main/docs/
8 |
9 | theme:
10 | name: material
11 | palette:
12 | - media: "(prefers-color-scheme: light)"
13 | scheme: default
14 | primary: indigo
15 | accent: indigo
16 | toggle:
17 | icon: material/brightness-7
18 | name: Switch to dark mode
19 | - media: "(prefers-color-scheme: dark)"
20 | scheme: slate
21 | primary: indigo
22 | accent: indigo
23 | toggle:
24 | icon: material/brightness-4
25 | name: Switch to light mode
26 | features:
27 | - navigation.instant
28 | - navigation.tracking
29 | - navigation.tabs
30 | - navigation.tabs.sticky
31 | - navigation.sections
32 | - navigation.expand
33 | - navigation.indexes
34 | - navigation.top
35 | - search.suggest
36 | - search.highlight
37 | - search.share
38 | - content.code.copy
39 | - content.code.annotate
40 | - content.tabs.link
41 | icon:
42 | repo: fontawesome/brands/github
43 |
44 | plugins:
45 | - search
46 | - mkdocstrings:
47 | handlers:
48 | python:
49 | options:
50 | show_source: true
51 | show_root_heading: true
52 | show_root_toc_entry: true
53 | show_signature_annotations: true
54 | show_symbol_type_heading: true
55 | show_symbol_type_toc: true
56 | docstring_style: google
57 | merge_init_into_class: true
58 | separate_signature: true
59 | - autorefs
60 |
61 | markdown_extensions:
62 | - pymdownx.highlight:
63 | anchor_linenums: true
64 | line_spans: __span
65 | pygments_lang_class: true
66 | - pymdownx.inlinehilite
67 | - pymdownx.snippets
68 | - pymdownx.superfences
69 | - pymdownx.tabbed:
70 | alternate_style: true
71 | - pymdownx.details
72 | - admonition
73 | - tables
74 | - attr_list
75 | - md_in_html
76 | - toc:
77 | permalink: true
78 |
79 | nav:
80 | - Home: index.md
81 | - Getting Started:
82 | - Installation: getting-started/installation.md
83 | - Quick Start: getting-started/quickstart.md
84 | - Configuration: getting-started/configuration.md
85 | - User Guide:
86 | - Basic Usage: guide/basic-usage.md
87 | - Services:
88 | - OpenAI: guide/services/openai.md
89 | - Anthropic: guide/services/anthropic.md
90 | - Google Gemini: guide/services/gemini.md
91 | - Groq: guide/services/groq.md
92 | - Custom AI: guide/services/custom.md
93 | - Advanced Features:
94 | - Pipelines: guide/advanced/pipelines.md
95 | - Batch Processing: guide/advanced/batch.md
96 | - Enhanced App: guide/advanced/enhanced-app.md
97 | - Error Handling: guide/advanced/error-handling.md
98 | - API Reference:
99 | - Overview: api/overview.md
100 | - Services:
101 | - OpenAI Service: api/services/openai.md
102 | - Anthropic Service: api/services/anthropic.md
103 | - Gemini Service: api/services/gemini.md
104 | - Groq Service: api/services/groq.md
105 | - Core:
106 | - App Creation: api/core/app.md
107 | - Pipeline: api/core/pipeline.md
108 | - Batch Processing: api/core/batch.md
109 | - Utilities:
110 | - Utils: api/utils.md
111 | - Exceptions: api/exceptions.md
112 | - Development:
113 | - Contributing: development/contributing.md
114 | - Code of Conduct: development/code-of-conduct.md
115 | - Testing: development/testing.md
116 | - Release Process: development/release.md
117 | - Examples:
118 | - Chat Application: examples/chat.md
119 | - Content Generation: examples/content.md
120 | - Multi-Model Comparison: examples/comparison.md
121 | - Custom Integration: examples/custom.md
122 | - Changelog: changelog.md
123 |
124 | extra:
125 | social:
126 | - icon: fontawesome/brands/github
127 | link: https://github.com/GustyCube/EasilyAI
128 | - icon: fontawesome/brands/python
129 | link: https://pypi.org/project/easilyai/
130 | analytics:
131 | provider: google
132 | property: G-XXXXXXXXXX # Replace with actual Google Analytics ID
133 |
134 | copyright: Copyright © 2024 GustyCube - Apache 2.0 License
--------------------------------------------------------------------------------
/easilyai/services/gemini_service.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pathlib
3 | import google.generativeai as googleai
4 | from google.api_core import exceptions as google_exceptions
5 | from easilyai.exceptions import (
6 | AuthenticationError, RateLimitError, InvalidRequestError,
7 | APIConnectionError, NotFoundError, ServerError, MissingAPIKeyError
8 | )
9 |
10 | class GeminiService:
11 | def __init__(self, apikey, model):
12 | if not apikey:
13 | raise MissingAPIKeyError(
14 | "Gemini API key is missing! Please provide your API key when initializing the service. "
15 | "Refer to the EasilyAI documentation for more information."
16 | )
17 | googleai.configure(api_key=apikey)
18 | # Ensure only the last part of the model name is used
19 | self.model_name = model.split("/")[-1] # Extracts "gemini-1" even if input is "models/gemini-1"
20 | print(self.model_name)
21 | self.full_model_name = model # Full name (e.g., "models/gemini-1")
22 | self.model = googleai.GenerativeModel(self.full_model_name)
23 |
24 | def prepare_image(self, img_url):
25 | """Prepare image for Gemini API - handles both local files and URLs."""
26 | if os.path.exists(img_url): # Local file
27 | image_data = pathlib.Path(img_url).read_bytes()
28 | # Determine mime type based on file extension
29 | if img_url.lower().endswith('.png'):
30 | mime_type = 'image/png'
31 | elif img_url.lower().endswith('.jpg') or img_url.lower().endswith('.jpeg'):
32 | mime_type = 'image/jpeg'
33 | elif img_url.lower().endswith('.gif'):
34 | mime_type = 'image/gif'
35 | elif img_url.lower().endswith('.webp'):
36 | mime_type = 'image/webp'
37 | else:
38 | mime_type = 'image/jpeg' # Default fallback
39 |
40 | return {
41 | 'mime_type': mime_type,
42 | 'data': image_data
43 | }
44 | else:
45 | # Assume it's a URL - Gemini can handle URLs directly
46 | return img_url
47 |
48 | def generate_text(self, prompt, img_url=None):
49 | try:
50 | if img_url:
51 | # Vision mode - include image with prompt
52 | image_data = self.prepare_image(img_url)
53 | response = self.model.generate_content([prompt, image_data])
54 | else:
55 | # Text-only mode
56 | response = self.model.generate_content(prompt)
57 | return response.text
58 | except google_exceptions.Unauthenticated:
59 | raise AuthenticationError(
60 | "Authentication failed! Please check your Gemini API key and ensure it's correct. "
61 | "Refer to the EasilyAI documentation for more information."
62 | )
63 | except google_exceptions.ResourceExhausted:
64 | raise RateLimitError(
65 | "Rate limit exceeded! You've made too many requests in a short period. "
66 | "Please wait and try again later. Refer to the EasilyAI documentation for more information."
67 | )
68 | except google_exceptions.InvalidArgument as e:
69 | raise InvalidRequestError(
70 | f"Invalid request! {str(e)}. Please check your request parameters. "
71 | "Refer to the EasilyAI documentation for more information."
72 | )
73 | except google_exceptions.DeadlineExceeded:
74 | raise APIConnectionError(
75 | "Request timeout! The request took too long to complete. "
76 | "Please try again later. Refer to the EasilyAI documentation for more information."
77 | )
78 | except google_exceptions.NotFound:
79 | raise NotFoundError(
80 | "Model not found! Please check your model name and ensure it's correct. "
81 | "Refer to the EasilyAI documentation for more information."
82 | )
83 | except google_exceptions.GoogleAPIError as e:
84 | raise ServerError(
85 | f"An error occurred on Google's side: {str(e)}. Please try again later. "
86 | "Refer to the EasilyAI documentation for more information."
87 | )
88 | except Exception as e:
89 | raise ServerError(
90 | f"An unexpected error occurred: {str(e)}. Please try again later. "
91 | "Refer to the EasilyAI documentation for more information."
92 | )
93 |
--------------------------------------------------------------------------------
/docs/.vitepress/config.mts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vitepress'
2 |
3 | // https://vitepress.dev/reference/site-config
4 | export default defineConfig({
5 | title: "EasilyAI",
6 | description: "A unified Python library for seamless AI integration across multiple providers",
7 | base: "/EasilyAI/",
8 | head: [
9 | ['link', { rel: 'icon', href: '/EasilyAI/favicon.ico' }],
10 | ['meta', { name: 'theme-color', content: '#3eaf7c' }],
11 | ['meta', { name: 'apple-mobile-web-app-capable', content: 'yes' }],
12 | ['meta', { name: 'apple-mobile-web-app-status-bar-style', content: 'black' }]
13 | ],
14 |
15 | markdown: {
16 | lineNumbers: true,
17 | container: {
18 | tipLabel: '💡 Tip',
19 | warningLabel: '⚠️ Warning',
20 | dangerLabel: '❌ Danger',
21 | infoLabel: 'ℹ️ Info',
22 | detailsLabel: '📋 Details'
23 | }
24 | },
25 | themeConfig: {
26 | // https://vitepress.dev/reference/default-theme-config
27 | logo: '/logo.svg',
28 |
29 | search: {
30 | provider: 'local'
31 | },
32 |
33 | editLink: {
34 | pattern: 'https://github.com/GustyCube/EasilyAI/edit/main/docs/:path',
35 | text: 'Edit this page on GitHub'
36 | },
37 |
38 | footer: {
39 | message: 'Released under the MIT License.',
40 | copyright: 'Copyright © 2024 EasilyAI Contributors'
41 | },
42 |
43 | lastUpdated: {
44 | text: 'Updated at',
45 | formatOptions: {
46 | dateStyle: 'full',
47 | timeStyle: 'medium'
48 | }
49 | },
50 | nav: [
51 | { text: 'Home', link: '/' },
52 | { text: 'Get Started', link: '/overview' },
53 | { text: 'Guide', link: '/guide' },
54 | { text: 'API Reference', link: '/api' },
55 | { text: 'Examples', link: '/examples' },
56 | ],
57 |
58 | sidebar: [
59 | {
60 | text: 'Getting Started',
61 | items: [
62 | { text: 'Overview', link: '/overview' },
63 | { text: 'Installation', link: '/installation' },
64 | { text: 'Quick Start', link: '/quickstart' },
65 | { text: 'Basic Examples', link: '/examples' },
66 | ]
67 | },
68 | {
69 | text: 'User Guide',
70 | items: [
71 | { text: 'Creating Apps', link: '/appcreation' },
72 | { text: 'Working with Services', link: '/services' },
73 | { text: 'Text Generation', link: '/textgeneration' },
74 | { text: 'Image Generation', link: '/imagegeneration' },
75 | { text: 'Text to Speech', link: '/texttospeech' },
76 | { text: 'Pipelines', link: '/pipelines' },
77 | { text: 'Error Handling', link: '/errorhandling' },
78 | ]
79 | },
80 | {
81 | text: 'Advanced Features',
82 | items: [
83 | { text: 'Custom AI Services', link: '/customai' },
84 | { text: 'Configuration', link: '/configuration' },
85 | { text: 'Performance Tips', link: '/performance' },
86 | ]
87 | },
88 | {
89 | text: 'AI Services',
90 | items: [
91 | { text: 'OpenAI', link: '/openai' },
92 | { text: 'Anthropic (Claude)', link: '/anthropic' },
93 | { text: 'Google Gemini', link: '/gemini' },
94 | { text: 'X.AI Grok', link: '/grok' },
95 | { text: 'Ollama', link: '/ollama' },
96 | { text: 'Hugging Face', link: '/huggingface' },
97 | ]
98 | },
99 | {
100 | text: 'API Reference',
101 | items: [
102 | { text: 'Core Classes', link: '/api' },
103 | { text: 'Service Classes', link: '/api/services' },
104 | { text: 'Pipeline System', link: '/api/pipelines' },
105 | { text: 'Custom AI Framework', link: '/api/customai' },
106 | ]
107 | },
108 | ],
109 |
110 | socialLinks: [
111 | { icon: 'github', link: 'https://github.com/GustyCube/EasilyAI' },
112 | {
113 | icon: {
114 | svg: ''
115 | },
116 | link: 'https://pypi.org/project/EasilyAI/'
117 | }
118 | ]
119 | }
120 | })
121 |
--------------------------------------------------------------------------------
/docs/.vitepress/theme/components/ApiTable.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
7 |
8 |
9 |
10 |
11 | |
12 | {{ header.label }}
13 | |
14 |
15 |
16 |
17 |
18 | |
19 |
20 | {{ row[header.key] }}
21 | *
22 |
23 |
24 | {{ row[header.key] }}
25 |
26 |
27 | {{ row[header.key] }}
28 |
29 |
30 | {{ row[header.key] }}
31 |
32 | Example: {{ row.example }}
33 |
34 |
35 | {{ row[header.key] }}
36 | |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
66 |
67 |
--------------------------------------------------------------------------------
/docs/quickstart.md:
--------------------------------------------------------------------------------
1 | # Quick Start Guide
2 |
3 | Get up and running with EasilyAI in just a few minutes! This guide will walk you through the basics of using EasilyAI to generate text, images, and speech.
4 |
5 | ## Prerequisites
6 |
7 | - Python 3.7 or higher
8 | - An API key from at least one AI service (OpenAI, Anthropic, Google, etc.)
9 |
10 | ## Your First AI App
11 |
12 | Let's create your first AI application that can generate text:
13 |
14 | ### 1. Create a Text Generation App
15 |
16 | ```python
17 | from easilyai import create_app
18 |
19 | # Create an app with OpenAI (you can also use 'anthropic', 'gemini', 'grok', or 'ollama')
20 | app = create_app(
21 | name="MyFirstApp",
22 | service="openai",
23 | api_key="your-openai-api-key-here",
24 | model="gpt-3.5-turbo"
25 | )
26 |
27 | # Generate some text
28 | response = app.request("Tell me a short joke about programming")
29 | print(response)
30 | ```
31 |
32 | ### 2. Try Different AI Services
33 |
34 | EasilyAI supports multiple AI services. Here's how to use each:
35 |
36 | ```python
37 | from easilyai import create_app
38 |
39 | # OpenAI (GPT models)
40 | openai_app = create_app("OpenAI", "openai", "your-openai-key", "gpt-3.5-turbo")
41 |
42 | # Anthropic (Claude models)
43 | claude_app = create_app("Claude", "anthropic", "your-anthropic-key", "claude-3-haiku-20240307")
44 |
45 | # Google Gemini
46 | gemini_app = create_app("Gemini", "gemini", "your-gemini-key", "gemini-1.5-flash")
47 |
48 | # X.AI Grok
49 | grok_app = create_app("Grok", "grok", "your-grok-key", "grok-beta")
50 |
51 | # Ollama (local models)
52 | ollama_app = create_app("Ollama", "ollama", "", "llama2") # No API key needed for local
53 | ```
54 |
55 | ### 3. Generate Images
56 |
57 | If you're using OpenAI, you can also generate images:
58 |
59 | ```python
60 | from easilyai import create_app
61 |
62 | app = create_app("ImageApp", "openai", "your-openai-key", "dall-e-3")
63 |
64 | # Generate an image
65 | image_response = app.request(
66 | "A cute robot learning to code",
67 | task_type="generate_image",
68 | size="1024x1024"
69 | )
70 |
71 | print(f"Image URL: {image_response}")
72 | ```
73 |
74 | ### 4. Text-to-Speech
75 |
76 | Convert text to speech using OpenAI's TTS:
77 |
78 | ```python
79 | from easilyai import create_tts_app
80 |
81 | tts_app = create_tts_app("TTSApp", "openai", "your-openai-key", "tts-1")
82 |
83 | # Convert text to speech
84 | audio_response = tts_app.request(
85 | "Hello! Welcome to EasilyAI!",
86 | voice="alloy",
87 | output_file="welcome.mp3"
88 | )
89 |
90 | print(f"Audio saved to: {audio_response}")
91 | ```
92 |
93 | ## Next Steps
94 |
95 | Now that you've got the basics down, here are some next steps to explore:
96 |
97 | 1. **[Learn about different AI services →](/services)** - Discover the capabilities of each supported AI service
98 | 2. **[Explore pipelines →](/pipelines)** - Chain multiple AI operations together
99 | 3. **[Handle errors gracefully →](/errorhandling)** - Learn how to handle API errors and rate limits
100 | 4. **[Create custom AI services →](/customai)** - Extend EasilyAI with your own AI providers
101 |
102 | ## Common Patterns
103 |
104 | ### Safe API Key Management
105 |
106 | Never hardcode your API keys! Use environment variables instead:
107 |
108 | ```python
109 | import os
110 | from easilyai import create_app
111 |
112 | app = create_app(
113 | name="SecureApp",
114 | service="openai",
115 | api_key=os.getenv("OPENAI_API_KEY"), # Set this in your environment
116 | model="gpt-3.5-turbo"
117 | )
118 | ```
119 |
120 | ### Basic Error Handling
121 |
122 | ```python
123 | from easilyai import create_app
124 | from easilyai.exceptions import EasilyAIException
125 |
126 | try:
127 | app = create_app("MyApp", "openai", "your-key", "gpt-3.5-turbo")
128 | response = app.request("Hello, world!")
129 | print(response)
130 | except EasilyAIException as e:
131 | print(f"AI Error: {e}")
132 | except Exception as e:
133 | print(f"Unexpected error: {e}")
134 | ```
135 |
136 | ### Multiple Requests
137 |
138 | ```python
139 | from easilyai import create_app
140 |
141 | app = create_app("ChatApp", "openai", "your-key", "gpt-3.5-turbo")
142 |
143 | questions = [
144 | "What is Python?",
145 | "How do I install packages?",
146 | "What are virtual environments?"
147 | ]
148 |
149 | for question in questions:
150 | response = app.request(question)
151 | print(f"Q: {question}")
152 | print(f"A: {response}\n")
153 | ```
154 |
155 | ## Tips for Beginners
156 |
157 | 1. **Start Simple**: Begin with basic text generation before moving to advanced features
158 | 2. **Test with Different Models**: Each AI service has different strengths - experiment to find what works best for your use case
159 | 3. **Read the Error Messages**: EasilyAI provides helpful error messages to guide you
160 | 4. **Use Environment Variables**: Keep your API keys secure by using environment variables
161 | 5. **Check the Examples**: Look at the `/examples` directory for more code samples
162 |
163 | Ready to dive deeper? Check out our [comprehensive guide](/guide) or explore specific [AI services](/services).
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
EasilyAI
3 |
A unified Python library for AI services
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 | ## Overview
18 |
19 | **EasilyAI** is a powerful Python library that simplifies AI application development by providing a unified interface for multiple AI services including **OpenAI**, **Anthropic**, **Google Gemini**, **X.AI Grok**, and **Ollama**. Whether you need text generation, image creation, or text-to-speech functionality, EasilyAI offers a consistent API that makes switching between providers effortless.
20 |
21 | ---
22 |
23 | ## 🚀 Key Features
24 |
25 | - **🔄 Multi-Provider Support**: Seamlessly switch between OpenAI, Anthropic, Google Gemini, X.AI Grok, and Ollama
26 | - **📝 Text Generation**: Advanced language models for chat, completion, and creative writing
27 | - **🎨 Image Generation**: Create stunning visuals with DALL-E and other image models
28 | - **🗣️ Text-to-Speech**: High-quality voice synthesis with multiple voice options
29 | - **🔗 Pipeline System**: Chain multiple AI operations into powerful workflows
30 | - **🛠️ Custom AI Integration**: Easily extend with your own AI services
31 | - **⚡ Unified API**: One consistent interface for all providers and tasks
32 | - **🎯 Auto Task Detection**: Intelligent request routing based on content type
33 |
34 | ---
35 |
36 | ## 📦 Installation
37 |
38 | ```bash
39 | pip install easilyai
40 | ```
41 |
42 | ## 🚀 Quick Start
43 |
44 | Get up and running in minutes with these simple examples:
45 |
46 | ### Basic Text Generation
47 | ```python
48 | import easilyai
49 |
50 | # Create an app with your preferred provider
51 | app = easilyai.create_app(
52 | name="my_ai_app",
53 | service="openai", # or "anthropic", "gemini", "grok", "ollama"
54 | apikey="YOUR_API_KEY",
55 | model="gpt-4"
56 | )
57 |
58 | # Generate text
59 | response = app.request("Explain quantum computing in simple terms")
60 | print(response)
61 | ```
62 |
63 | ### Text-to-Speech
64 | ```python
65 | # Create a TTS app
66 | tts_app = easilyai.create_tts_app(
67 | name="my_tts_app",
68 | service="openai",
69 | apikey="YOUR_API_KEY",
70 | model="tts-1"
71 | )
72 |
73 | # Convert text to speech
74 | tts_app.request_tts(
75 | text="Hello from EasilyAI!",
76 | voice="onyx",
77 | output_file="greeting.mp3"
78 | )
79 | ```
80 |
81 | ### AI Pipeline
82 | ```python
83 | # Chain multiple AI operations
84 | pipeline = easilyai.EasilyAIPipeline(app)
85 | pipeline.add_task("generate_text", "Write a haiku about coding")
86 | pipeline.add_task("generate_image", "A serene coding environment")
87 |
88 | results = pipeline.run()
89 | ```
90 |
91 | ## 🛠️ Supported AI Providers
92 |
93 | | Provider | Text Generation | Image Generation | Text-to-Speech |
94 | |----------|:---------------:|:----------------:|:--------------:|
95 | | **OpenAI** | ✅ | ✅ | ✅ |
96 | | **Anthropic** | ✅ | ❌ | ❌ |
97 | | **Google Gemini** | ✅ | ❌ | ❌ |
98 | | **X.AI Grok** | ✅ | ❌ | ❌ |
99 | | **Ollama** | ✅ | ❌ | ❌ |
100 | | **Custom AI** | ✅ | ✅ | ✅ |
101 |
102 | ## 📚 Documentation
103 |
104 | For comprehensive guides, API reference, and advanced usage examples, visit our documentation:
105 |
106 | **[📖 View Full Documentation →](https://gustycube.github.io/EasilyAI/)**
107 |
108 | ## 🤝 Contributing
109 |
110 | We welcome contributions! Please see our [Contributing Guidelines](CONTRIBUTING.md) for details.
111 |
112 | ## 📄 License
113 |
114 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
115 |
116 | ## 🔗 Links
117 |
118 | - **Documentation**: https://gustycube.github.io/EasilyAI/overview.html
119 | - **PyPI Package**: https://pypi.org/project/easilyai
120 | - **GitHub Repository**: https://github.com/GustyCube/EasilyAI
121 | - **Issues & Support**: https://github.com/GustyCube/EasilyAI/issues
122 |
--------------------------------------------------------------------------------
/tests/test_exceptions.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from easilyai.exceptions import (
3 | EasilyAIError, AuthenticationError, RateLimitError, InvalidRequestError,
4 | APIConnectionError, NotFoundError, ServerError, MissingAPIKeyError,
5 | UnsupportedServiceError, Color
6 | )
7 |
8 |
9 | class TestExceptions(unittest.TestCase):
10 |
11 | def test_base_exception(self):
12 | error = EasilyAIError("Base error message")
13 | self.assertIsInstance(error, Exception)
14 | self.assertEqual(str(error), "Base error message")
15 |
16 | def test_authentication_error_default_message(self):
17 | error = AuthenticationError()
18 | self.assertIn("Authentication failed!", str(error))
19 | self.assertIn(Color.RED, str(error))
20 | self.assertIn(Color.RESET, str(error))
21 | self.assertIn("🔑", str(error))
22 |
23 | def test_authentication_error_custom_message(self):
24 | custom_message = "Invalid API key"
25 | error = AuthenticationError(custom_message)
26 | self.assertIn(custom_message, str(error))
27 | self.assertIn(Color.RED, str(error))
28 |
29 | def test_rate_limit_error_default_message(self):
30 | error = RateLimitError()
31 | self.assertIn("API rate limit exceeded!", str(error))
32 | self.assertIn(Color.YELLOW, str(error))
33 | self.assertIn("⏳", str(error))
34 |
35 | def test_rate_limit_error_custom_message(self):
36 | custom_message = "Too many requests"
37 | error = RateLimitError(custom_message)
38 | self.assertIn(custom_message, str(error))
39 |
40 | def test_invalid_request_error(self):
41 | error = InvalidRequestError("Bad request format")
42 | self.assertIn("Bad request format", str(error))
43 | self.assertIn(Color.RED, str(error))
44 | self.assertIn("🚫", str(error))
45 |
46 | def test_api_connection_error(self):
47 | error = APIConnectionError("Network timeout")
48 | self.assertIn("Network timeout", str(error))
49 | self.assertIn(Color.CYAN, str(error))
50 | self.assertIn("🌐", str(error))
51 |
52 | def test_not_found_error(self):
53 | error = NotFoundError("Model not found")
54 | self.assertIn("Model not found", str(error))
55 | self.assertIn(Color.YELLOW, str(error))
56 | self.assertIn("🔍", str(error))
57 |
58 | def test_server_error(self):
59 | error = ServerError("Internal server error")
60 | self.assertIn("Internal server error", str(error))
61 | self.assertIn(Color.RED, str(error))
62 | self.assertIn("💥", str(error))
63 |
64 | def test_missing_api_key_error(self):
65 | error = MissingAPIKeyError("Please provide API key")
66 | self.assertIn("Please provide API key", str(error))
67 | self.assertIn(Color.RED, str(error))
68 | self.assertIn("🔐", str(error))
69 |
70 | def test_unsupported_service_error(self):
71 | service_name = "unknown_service"
72 | error = UnsupportedServiceError(service_name)
73 | self.assertIn(service_name, str(error))
74 | self.assertIn(Color.BLUE, str(error))
75 | self.assertIn("❌", str(error))
76 | self.assertIn("Unsupported service", str(error))
77 |
78 | def test_color_constants(self):
79 | self.assertEqual(Color.RESET, "\033[0m")
80 | self.assertEqual(Color.RED, "\033[91m")
81 | self.assertEqual(Color.GREEN, "\033[92m")
82 | self.assertEqual(Color.YELLOW, "\033[93m")
83 | self.assertEqual(Color.BLUE, "\033[94m")
84 | self.assertEqual(Color.CYAN, "\033[96m")
85 |
86 | def test_exception_inheritance(self):
87 | # Test that all custom exceptions inherit from EasilyAIError
88 | self.assertTrue(issubclass(AuthenticationError, EasilyAIError))
89 | self.assertTrue(issubclass(RateLimitError, EasilyAIError))
90 | self.assertTrue(issubclass(InvalidRequestError, EasilyAIError))
91 | self.assertTrue(issubclass(APIConnectionError, EasilyAIError))
92 | self.assertTrue(issubclass(NotFoundError, EasilyAIError))
93 | self.assertTrue(issubclass(ServerError, EasilyAIError))
94 | self.assertTrue(issubclass(MissingAPIKeyError, EasilyAIError))
95 | self.assertTrue(issubclass(UnsupportedServiceError, EasilyAIError))
96 |
97 | def test_exception_can_be_caught(self):
98 | # Test that exceptions can be caught properly
99 | with self.assertRaises(EasilyAIError):
100 | raise AuthenticationError("Test error")
101 |
102 | with self.assertRaises(AuthenticationError):
103 | raise AuthenticationError("Test error")
104 |
105 | def test_all_exceptions_have_colored_output(self):
106 | # Test that all exception messages contain ANSI reset code
107 | exceptions = [
108 | AuthenticationError(),
109 | RateLimitError(),
110 | InvalidRequestError(),
111 | APIConnectionError(),
112 | NotFoundError(),
113 | ServerError(),
114 | MissingAPIKeyError(),
115 | UnsupportedServiceError("test")
116 | ]
117 |
118 | for exception in exceptions:
119 | self.assertIn(Color.RESET, str(exception))
120 |
121 |
122 | if __name__ == "__main__":
123 | unittest.main()
--------------------------------------------------------------------------------
/easilyai/services/openai_service.py:
--------------------------------------------------------------------------------
1 | import os
2 | import base64
3 | import openai
4 | from openai import OpenAI
5 | from easilyai.exceptions import (
6 | AuthenticationError, RateLimitError, InvalidRequestError,
7 | APIConnectionError, NotFoundError, ServerError, MissingAPIKeyError
8 | )
9 |
10 | class OpenAIService:
11 | def __init__(self, apikey, model):
12 | if not apikey:
13 | raise MissingAPIKeyError(
14 | "OpenAI API key is missing! Please provide your API key when initializing the service. "
15 | "Refer to the EasyAI documentation for more information."
16 | )
17 | openai.api_key = apikey
18 | self.client = OpenAI(api_key=apikey)
19 | self.model = model
20 |
21 | def encode_image(self, img_url):
22 | """Encodes an image file into Base64 format if it's a local file."""
23 | if os.path.exists(img_url): # Check if it's a local file
24 | with open(img_url, "rb") as f:
25 | encoded_string = base64.b64encode(f.read()).decode("utf-8")
26 | return f"data:image/jpeg;base64,{encoded_string}"
27 | return img_url # Assume it's already a URL if the file doesn't exist locally
28 |
29 | def generate_text(self, prompt, img_url=None):
30 | try:
31 | # Prepare messages for vision or text-only
32 | if img_url:
33 | encoded_img = self.encode_image(img_url)
34 | messages = [
35 | {
36 | "role": "user",
37 | "content": [
38 | {"type": "image_url", "image_url": {"url": encoded_img, "detail": "high"}},
39 | {"type": "text", "text": prompt},
40 | ],
41 | }
42 | ]
43 | else:
44 | messages = [{"role": "user", "content": prompt}]
45 |
46 | response = self.client.chat.completions.create(
47 | model=self.model,
48 | messages=messages
49 | )
50 | return response.choices[0].message.content
51 | except openai.AuthenticationError:
52 | raise AuthenticationError(
53 | "Authentication failed! Please check your OpenAI API key and ensure it's correct. "
54 | "Refer to the EasyAI documentation for more information."
55 | )
56 | except openai.RateLimitError:
57 | raise RateLimitError(
58 | "Rate limit exceeded! You've made too many requests in a short period. "
59 | "Please wait and try again later. Refer to the EasyAI documentation for more information."
60 | )
61 | except openai.BadRequestError as e:
62 | raise InvalidRequestError(
63 | f"Invalid request! {str(e)}. Please check your request parameters. "
64 | "Refer to the EasyAI documentation for more information."
65 | )
66 | except openai.APIConnectionError:
67 | raise APIConnectionError(
68 | "Connection error! Unable to connect to OpenAI's API. "
69 | "Please check your internet connection and try again. "
70 | "Refer to the EasyAI documentation for more information."
71 | )
72 | except openai.OpenAIError as e:
73 | raise ServerError(
74 | f"An error occurred on OpenAI's side: {str(e)}. Please try again later. "
75 | "Refer to the EasyAI documentation for more information."
76 | )
77 |
78 | def generate_image(self, prompt, model="dall-e-3", size="1024x1024", quality="standard", n=1):
79 | try:
80 | response = self.client.images.generate(
81 | model=model,
82 | prompt=prompt,
83 | size=size,
84 | quality=quality,
85 | n=n
86 | )
87 | return response.data[0].url
88 | except openai.AuthenticationError:
89 | raise AuthenticationError(
90 | "Authentication failed! Please check your OpenAI API key and ensure it's correct. "
91 | "Refer to the EasyAI documentation for more information."
92 | )
93 | except openai.RateLimitError:
94 | raise RateLimitError(
95 | "Rate limit exceeded! You've made too many requests in a short period. "
96 | "Please wait and try again later. Refer to the EasyAI documentation for more information."
97 | )
98 | except openai.BadRequestError as e:
99 | raise InvalidRequestError(
100 | f"Invalid request! {str(e)}. Please check your request parameters. "
101 | "Refer to the EasyAI documentation for more information."
102 | )
103 | except openai.APIConnectionError:
104 | raise APIConnectionError(
105 | "Connection error! Unable to connect to OpenAI's API. "
106 | "Please check your internet connection and try again. "
107 | "Refer to the EasyAI documentation for more information."
108 | )
109 | except openai.OpenAIError as e:
110 | raise ServerError(
111 | f"An error occurred on OpenAI's side: {str(e)}. Please try again later. "
112 | "Refer to the EasyAI documentation for more information."
113 | )
114 |
--------------------------------------------------------------------------------
/easilyai/services/huggingface_service.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import os
3 | from easilyai.exceptions import (
4 | AuthenticationError, RateLimitError, InvalidRequestError,
5 | APIConnectionError, NotFoundError, ServerError, MissingAPIKeyError
6 | )
7 |
8 | class HuggingFaceService:
9 | def __init__(self, apikey, model):
10 | if not apikey:
11 | raise MissingAPIKeyError(
12 | "Hugging Face API key is missing! Please provide your API key when initializing the service. "
13 | "Refer to the EasilyAI documentation for more information."
14 | )
15 | self.apikey = apikey
16 | self.model = model
17 | self.base_url = "https://api-inference.huggingface.co/models"
18 | self.headers = {"Authorization": f"Bearer {apikey}"}
19 |
20 | def generate_text(self, prompt, max_length=100, temperature=0.7, top_p=0.9):
21 | """Generate text using Hugging Face Inference API."""
22 | try:
23 | url = f"{self.base_url}/{self.model}"
24 | payload = {
25 | "inputs": prompt,
26 | "parameters": {
27 | "max_length": max_length,
28 | "temperature": temperature,
29 | "top_p": top_p,
30 | "return_full_text": False
31 | }
32 | }
33 |
34 | response = requests.post(url, headers=self.headers, json=payload)
35 |
36 | if response.status_code == 200:
37 | result = response.json()
38 | if isinstance(result, list) and len(result) > 0:
39 | return result[0].get("generated_text", "")
40 | else:
41 | return str(result)
42 | else:
43 | self._handle_error(response)
44 |
45 | except requests.exceptions.RequestException as e:
46 | raise APIConnectionError(
47 | f"Connection error! Unable to connect to Hugging Face API. {str(e)}"
48 | )
49 | except Exception as e:
50 | raise ServerError(
51 | f"An unexpected error occurred: {str(e)}. Please try again later."
52 | )
53 |
54 | def generate_image(self, prompt):
55 | """Generate image using Hugging Face text-to-image models."""
56 | try:
57 | url = f"{self.base_url}/{self.model}"
58 | payload = {"inputs": prompt}
59 |
60 | response = requests.post(url, headers=self.headers, json=payload)
61 |
62 | if response.status_code == 200:
63 | # For image generation, the response is typically binary data
64 | # We'd need to handle this differently - for now, return status
65 | return f"Image generated successfully for prompt: {prompt}"
66 | else:
67 | self._handle_error(response)
68 |
69 | except requests.exceptions.RequestException as e:
70 | raise APIConnectionError(
71 | f"Connection error! Unable to connect to Hugging Face API. {str(e)}"
72 | )
73 | except Exception as e:
74 | raise ServerError(
75 | f"An unexpected error occurred: {str(e)}. Please try again later."
76 | )
77 |
78 | def text_to_speech(self, text):
79 | """Convert text to speech using Hugging Face TTS models."""
80 | try:
81 | url = f"{self.base_url}/{self.model}"
82 | payload = {"inputs": text}
83 |
84 | response = requests.post(url, headers=self.headers, json=payload)
85 |
86 | if response.status_code == 200:
87 | # For TTS, the response is typically audio data
88 | # We'd need to handle this differently - for now, return status
89 | return f"Speech generated successfully for text: {text}"
90 | else:
91 | self._handle_error(response)
92 |
93 | except requests.exceptions.RequestException as e:
94 | raise APIConnectionError(
95 | f"Connection error! Unable to connect to Hugging Face API. {str(e)}"
96 | )
97 | except Exception as e:
98 | raise ServerError(
99 | f"An unexpected error occurred: {str(e)}. Please try again later."
100 | )
101 |
102 | def _handle_error(self, response):
103 | """Handle HTTP errors from Hugging Face API."""
104 | if response.status_code == 401:
105 | raise AuthenticationError(
106 | "Authentication failed! Please check your Hugging Face API key."
107 | )
108 | elif response.status_code == 429:
109 | raise RateLimitError(
110 | "Rate limit exceeded! Please wait and try again later."
111 | )
112 | elif response.status_code == 400:
113 | raise InvalidRequestError(
114 | f"Invalid request! Please check your parameters. Error: {response.text}"
115 | )
116 | elif response.status_code == 404:
117 | raise NotFoundError(
118 | f"Model not found! Please check your model name: {self.model}"
119 | )
120 | elif response.status_code >= 500:
121 | raise ServerError(
122 | f"Server error from Hugging Face API: {response.status_code} - {response.text}"
123 | )
124 | else:
125 | raise ServerError(
126 | f"Unknown error from Hugging Face API: {response.status_code} - {response.text}"
127 | )
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | gc@gustycube.xyz.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | [homepage]: https://www.contributor-covenant.org
125 |
126 | For answers to common questions about this code of conduct, see the FAQ at
127 | https://www.contributor-covenant.org/faq. Translations are available at
128 | https://www.contributor-covenant.org/translations.
129 |
--------------------------------------------------------------------------------
/tests/test_enhanced_pipeline.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import MagicMock, patch
3 | from easilyai.enhanced_pipeline import (
4 | EnhancedPipeline, TaskStatus, ExecutionMode, TaskResult, PipelineTask
5 | )
6 |
7 |
8 | class TestEnhancedPipeline(unittest.TestCase):
9 | def setUp(self):
10 | self.mock_app = MagicMock()
11 | self.pipeline = EnhancedPipeline("test_pipeline")
12 |
13 | def test_pipeline_init(self):
14 | self.assertEqual(self.pipeline.name, "test_pipeline")
15 | self.assertEqual(len(self.pipeline.tasks), 0)
16 | self.assertEqual(self.pipeline.execution_mode, ExecutionMode.SEQUENTIAL)
17 |
18 | def test_add_task_simple(self):
19 | task_id = self.pipeline.add_task("task1", self.mock_app, "generate_text", "Hello")
20 | self.assertEqual(len(self.pipeline.tasks), 1)
21 | self.assertIsInstance(task_id, str)
22 |
23 | task = self.pipeline.tasks[task_id]
24 | self.assertEqual(task.task_type, "generate_text")
25 | self.assertEqual(task.prompt, "Hello")
26 | self.assertEqual(task.status, TaskStatus.PENDING)
27 |
28 | def test_add_task_with_dependencies(self):
29 | task1_id = self.pipeline.add_task("task1", self.mock_app, "generate_text", "Task 1")
30 | task2_id = self.pipeline.add_task("task2", self.mock_app, "generate_text", "Task 2", dependencies=[task1_id])
31 |
32 | self.assertEqual(len(self.pipeline.tasks), 2)
33 | task2 = self.pipeline.tasks[task2_id]
34 | self.assertEqual(task2.dependencies, [task1_id])
35 |
36 | def test_add_task_with_condition(self):
37 | condition = lambda results: True
38 | task_id = self.pipeline.add_task("task1", self.mock_app, "generate_text", "Conditional task", condition=condition)
39 |
40 | task = self.pipeline.tasks[task_id]
41 | self.assertEqual(task.condition, condition)
42 |
43 | def test_task_status_enum(self):
44 | self.assertEqual(TaskStatus.PENDING.value, "pending")
45 | self.assertEqual(TaskStatus.RUNNING.value, "running")
46 | self.assertEqual(TaskStatus.COMPLETED.value, "completed")
47 | self.assertEqual(TaskStatus.FAILED.value, "failed")
48 | self.assertEqual(TaskStatus.SKIPPED.value, "skipped")
49 |
50 | def test_execution_mode_enum(self):
51 | self.assertEqual(ExecutionMode.SEQUENTIAL.value, "sequential")
52 | self.assertEqual(ExecutionMode.PARALLEL.value, "parallel")
53 | self.assertEqual(ExecutionMode.CONDITIONAL.value, "conditional")
54 |
55 | def test_task_result_creation(self):
56 | result = TaskResult(
57 | task_id="test-task",
58 | status=TaskStatus.COMPLETED,
59 | result="Test result",
60 | duration=1.5
61 | )
62 |
63 | self.assertEqual(result.task_id, "test-task")
64 | self.assertEqual(result.status, TaskStatus.COMPLETED)
65 | self.assertEqual(result.result, "Test result")
66 | self.assertEqual(result.duration, 1.5)
67 | self.assertIsNone(result.error)
68 | self.assertEqual(result.metadata, {})
69 |
70 | def test_set_execution_mode(self):
71 | self.pipeline.set_execution_mode(ExecutionMode.PARALLEL)
72 | self.assertEqual(self.pipeline.execution_mode, ExecutionMode.PARALLEL)
73 |
74 | def test_clear_tasks(self):
75 | self.pipeline.add_task("task1", self.mock_app, "generate_text", "Task 1")
76 | self.pipeline.add_task("task2", self.mock_app, "generate_text", "Task 2")
77 | self.assertEqual(len(self.pipeline.tasks), 2)
78 |
79 | self.pipeline.tasks.clear() # Simplified clear method
80 | self.assertEqual(len(self.pipeline.tasks), 0)
81 |
82 | @patch('easilyai.enhanced_pipeline.time.time')
83 | def test_simple_execution_simulation(self, mock_time):
84 | # Mock time for duration calculation
85 | mock_time.side_effect = [0.0, 1.0] # Start and end times
86 |
87 | # Mock app response
88 | self.mock_app.request.return_value = "Generated response"
89 |
90 | # Add a simple task
91 | task_id = self.pipeline.add_task("task1", self.mock_app, "generate_text", "Hello")
92 |
93 | # This test verifies the structure, but actual execution would require
94 | # the full implementation which might involve async/threading
95 | self.assertEqual(len(self.pipeline.tasks), 1)
96 | task = self.pipeline.tasks[task_id]
97 | self.assertEqual(task.task_id, task_id)
98 | self.assertEqual(task.status, TaskStatus.PENDING)
99 |
100 | def test_variable_substitution_pattern(self):
101 | # Test that the pipeline can handle variable patterns
102 | prompt_with_vars = "Hello {name}, how are you?"
103 | task_id = self.pipeline.add_task("task1", self.mock_app, "generate_text", prompt_with_vars)
104 |
105 | task = self.pipeline.tasks[task_id]
106 | self.assertIn("{name}", task.prompt)
107 |
108 | def test_pipeline_task_attributes(self):
109 | task_id = self.pipeline.add_task(
110 | "task1",
111 | self.mock_app,
112 | "generate_text",
113 | "Test",
114 | dependencies=["dep1"],
115 | condition=lambda x: True,
116 | retry_count=3,
117 | timeout=30
118 | )
119 |
120 | task = self.pipeline.tasks[task_id]
121 | self.assertEqual(task.task_type, "generate_text")
122 | self.assertEqual(task.dependencies, ["dep1"])
123 | self.assertIsNotNone(task.condition)
124 | self.assertEqual(task.retry_count, 3)
125 | self.assertEqual(task.timeout, 30)
126 |
127 |
128 | if __name__ == "__main__":
129 | unittest.main()
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """
2 | Pytest configuration and shared fixtures for all tests.
3 | """
4 | import os
5 | import sys
6 | from pathlib import Path
7 | from unittest.mock import Mock, MagicMock
8 | import pytest
9 |
10 | # Add project root to path
11 | sys.path.insert(0, str(Path(__file__).parent.parent))
12 |
13 |
14 | @pytest.fixture
15 | def mock_api_key():
16 | """Provide a mock API key for testing."""
17 | return "test-api-key-123"
18 |
19 |
20 | @pytest.fixture
21 | def mock_openai_client():
22 | """Mock OpenAI client for testing."""
23 | mock_client = MagicMock()
24 | mock_response = MagicMock()
25 | mock_choice = MagicMock()
26 | mock_message = MagicMock()
27 | mock_message.content = "Mocked OpenAI response"
28 | mock_choice.message = mock_message
29 | mock_response.choices = [mock_choice]
30 | mock_client.chat.completions.create.return_value = mock_response
31 | return mock_client
32 |
33 |
34 | @pytest.fixture
35 | def mock_anthropic_client():
36 | """Mock Anthropic client for testing."""
37 | mock_client = MagicMock()
38 | mock_response = MagicMock()
39 | mock_response.content = [MagicMock(text="Mocked Anthropic response")]
40 | mock_client.messages.create.return_value = mock_response
41 | return mock_client
42 |
43 |
44 | @pytest.fixture
45 | def mock_gemini_client():
46 | """Mock Google Gemini client for testing."""
47 | mock_client = MagicMock()
48 | mock_response = MagicMock()
49 | mock_response.text = "Mocked Gemini response"
50 | mock_client.generate_content.return_value = mock_response
51 | return mock_client
52 |
53 |
54 | @pytest.fixture
55 | def mock_groq_client():
56 | """Mock Groq client for testing."""
57 | mock_client = MagicMock()
58 | mock_response = MagicMock()
59 | mock_choice = MagicMock()
60 | mock_message = MagicMock()
61 | mock_message.content = "Mocked Groq response"
62 | mock_choice.message = mock_message
63 | mock_response.choices = [mock_choice]
64 | mock_client.chat.completions.create.return_value = mock_response
65 | return mock_client
66 |
67 |
68 | @pytest.fixture
69 | def sample_messages():
70 | """Sample chat messages for testing."""
71 | return [
72 | {"role": "system", "content": "You are a helpful assistant."},
73 | {"role": "user", "content": "Hello, how are you?"},
74 | {"role": "assistant", "content": "I'm doing well, thank you!"},
75 | {"role": "user", "content": "What's the weather like?"}
76 | ]
77 |
78 |
79 | @pytest.fixture
80 | def sample_prompt():
81 | """Sample prompt for testing."""
82 | return "Tell me a short story about a robot."
83 |
84 |
85 | @pytest.fixture
86 | def mock_env_vars(monkeypatch):
87 | """Mock environment variables for testing."""
88 | env_vars = {
89 | "OPENAI_API_KEY": "test-openai-key",
90 | "ANTHROPIC_API_KEY": "test-anthropic-key",
91 | "GEMINI_API_KEY": "test-gemini-key",
92 | "GROQ_API_KEY": "test-groq-key",
93 | }
94 | for key, value in env_vars.items():
95 | monkeypatch.setenv(key, value)
96 | return env_vars
97 |
98 |
99 | @pytest.fixture(autouse=True)
100 | def reset_environment(monkeypatch):
101 | """Reset environment variables after each test."""
102 | # Store original environment
103 | original_env = dict(os.environ)
104 | yield
105 | # Restore original environment
106 | os.environ.clear()
107 | os.environ.update(original_env)
108 |
109 |
110 | @pytest.fixture
111 | def temp_config_file(tmp_path):
112 | """Create a temporary configuration file for testing."""
113 | config_file = tmp_path / "config.json"
114 | config_data = {
115 | "api_keys": {
116 | "openai": "test-key-1",
117 | "anthropic": "test-key-2",
118 | "gemini": "test-key-3",
119 | "groq": "test-key-4"
120 | },
121 | "default_models": {
122 | "openai": "gpt-4",
123 | "anthropic": "claude-3-opus",
124 | "gemini": "gemini-pro",
125 | "groq": "mixtral-8x7b"
126 | }
127 | }
128 | import json
129 | config_file.write_text(json.dumps(config_data))
130 | return config_file
131 |
132 |
133 | @pytest.fixture
134 | def mock_response_stream():
135 | """Mock streaming response for testing."""
136 | def stream_generator():
137 | responses = ["Hello", " ", "world", "!"]
138 | for response in responses:
139 | mock_chunk = MagicMock()
140 | mock_choice = MagicMock()
141 | mock_delta = MagicMock()
142 | mock_delta.content = response
143 | mock_choice.delta = mock_delta
144 | mock_chunk.choices = [mock_choice]
145 | yield mock_chunk
146 | return stream_generator()
147 |
148 |
149 | class MockException(Exception):
150 | """Mock exception for testing error handling."""
151 | pass
152 |
153 |
154 | @pytest.fixture
155 | def mock_rate_limit_error():
156 | """Mock rate limit error for testing."""
157 | return MockException("Rate limit exceeded")
158 |
159 |
160 | @pytest.fixture
161 | def mock_auth_error():
162 | """Mock authentication error for testing."""
163 | return MockException("Invalid API key")
164 |
165 |
166 | @pytest.fixture
167 | def mock_timeout_error():
168 | """Mock timeout error for testing."""
169 | return MockException("Request timeout")
170 |
171 |
172 | # Markers for test categorization
173 | def pytest_configure(config):
174 | """Configure pytest with custom markers."""
175 | config.addinivalue_line(
176 | "markers", "unit: mark test as a unit test"
177 | )
178 | config.addinivalue_line(
179 | "markers", "slow: mark test as slow running"
180 | )
181 | config.addinivalue_line(
182 | "markers", "requires_api_key: mark test as requiring API keys"
183 | )
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=45", "wheel", "setuptools_scm[toml]>=6.2"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "easilyai"
7 | dynamic = ["version"]
8 | description = "A unified Python library for AI services including OpenAI, Anthropic, Google Gemini, and Groq"
9 | readme = "README.md"
10 | license = {text = "Apache-2.0"}
11 | authors = [{name = "GustyCube", email = "gc@gustycube.xyz"}]
12 | maintainers = [{name = "GustyCube", email = "gc@gustycube.xyz"}]
13 | classifiers = [
14 | "Development Status :: 4 - Beta",
15 | "Intended Audience :: Developers",
16 | "License :: OSI Approved :: Apache Software License",
17 | "Operating System :: OS Independent",
18 | "Programming Language :: Python :: 3",
19 | "Programming Language :: Python :: 3.8",
20 | "Programming Language :: Python :: 3.9",
21 | "Programming Language :: Python :: 3.10",
22 | "Programming Language :: Python :: 3.11",
23 | "Programming Language :: Python :: 3.12",
24 | "Topic :: Software Development :: Libraries :: Python Modules",
25 | "Topic :: Scientific/Engineering :: Artificial Intelligence",
26 | ]
27 | requires-python = ">=3.8"
28 | dependencies = [
29 | "openai>=1.0.0",
30 | "requests>=2.25.0",
31 | "google-generativeai>=0.8.3",
32 | "anthropic>=0.42.0",
33 | ]
34 |
35 | [project.optional-dependencies]
36 | dev = [
37 | "pytest>=7.4.0",
38 | "pytest-cov>=4.1.0",
39 | "pytest-asyncio>=0.21.0",
40 | "pytest-mock>=3.11.0",
41 | "black>=23.7.0",
42 | "isort>=5.12.0",
43 | "flake8>=6.1.0",
44 | "mypy>=1.5.0",
45 | "pre-commit>=3.3.0",
46 | "responses>=0.23.0",
47 | "types-requests>=2.31.0",
48 | ]
49 | docs = [
50 | "mkdocs>=1.5.0",
51 | "mkdocs-material>=9.2.0",
52 | "mkdocstrings[python]>=0.22.0",
53 | "mkdocs-autorefs>=0.5.0",
54 | ]
55 | test = [
56 | "pytest>=7.4.0",
57 | "pytest-cov>=4.1.0",
58 | "pytest-mock>=3.11.0",
59 | "pytest-asyncio>=0.21.0",
60 | "responses>=0.23.0",
61 | "python-dotenv>=1.0.0",
62 | ]
63 | all = [
64 | "easilyai[dev,docs,test]",
65 | ]
66 |
67 | [project.urls]
68 | Homepage = "https://github.com/GustyCube/EasilyAI"
69 | Documentation = "https://gustycube.github.io/EasilyAI/"
70 | Repository = "https://github.com/GustyCube/EasilyAI"
71 | Issues = "https://github.com/GustyCube/EasilyAI/issues"
72 | Changelog = "https://github.com/GustyCube/EasilyAI/releases"
73 |
74 | [tool.setuptools]
75 | packages = ["easilyai"]
76 | include-package-data = true
77 |
78 | [tool.setuptools.package-data]
79 | easilyai = ["py.typed"]
80 |
81 | [tool.setuptools_scm]
82 | write_to = "easilyai/_version.py"
83 | version_scheme = "post-release"
84 | local_scheme = "no-local-version"
85 |
86 | [tool.black]
87 | line-length = 100
88 | target-version = ['py38', 'py39', 'py310', 'py311', 'py312']
89 | include = '\.pyi?$'
90 | extend-exclude = '''
91 | /(
92 | \.eggs
93 | | \.git
94 | | \.hg
95 | | \.mypy_cache
96 | | \.tox
97 | | \.venv
98 | | _build
99 | | buck-out
100 | | build
101 | | dist
102 | | easilyai/_version.py
103 | )/
104 | '''
105 |
106 | [tool.isort]
107 | profile = "black"
108 | line_length = 100
109 | multi_line_output = 3
110 | include_trailing_comma = true
111 | force_grid_wrap = 0
112 | use_parentheses = true
113 | ensure_newline_before_comments = true
114 | split_on_trailing_comma = true
115 | skip_glob = ["*/easilyai/_version.py"]
116 |
117 | [tool.mypy]
118 | python_version = "3.8"
119 | warn_return_any = true
120 | warn_unused_configs = true
121 | disallow_untyped_defs = false
122 | disallow_any_generics = false
123 | check_untyped_defs = true
124 | no_implicit_optional = true
125 | warn_redundant_casts = true
126 | warn_unused_ignores = true
127 | warn_no_return = true
128 | show_error_codes = true
129 | strict_equality = true
130 | ignore_missing_imports = true
131 | exclude = [
132 | "build/",
133 | "dist/",
134 | "easilyai/_version.py",
135 | ]
136 |
137 | [tool.pytest.ini_options]
138 | minversion = "7.0"
139 | testpaths = ["tests"]
140 | pythonpath = ["."]
141 | addopts = [
142 | "--cov=easilyai",
143 | "--cov-report=html",
144 | "--cov-report=term-missing",
145 | "--cov-report=xml",
146 | "--cov-fail-under=80",
147 | "--strict-markers",
148 | "--strict-config",
149 | "--verbose",
150 | ]
151 | markers = [
152 | "unit: Unit tests that don't require external services",
153 | "integration: Integration tests requiring API keys",
154 | "slow: Slow tests that should be run less frequently",
155 | "requires_api_key: Tests that require specific API keys",
156 | ]
157 | filterwarnings = [
158 | "ignore::DeprecationWarning",
159 | "ignore::PendingDeprecationWarning",
160 | ]
161 |
162 | [tool.coverage.run]
163 | source = ["easilyai"]
164 | omit = [
165 | "*/tests/*",
166 | "*/_version.py",
167 | "*/test_*.py",
168 | ]
169 |
170 | [tool.coverage.report]
171 | exclude_lines = [
172 | "pragma: no cover",
173 | "def __repr__",
174 | "def __str__",
175 | "raise AssertionError",
176 | "raise NotImplementedError",
177 | "if __name__ == .__main__.:",
178 | "if TYPE_CHECKING:",
179 | "if typing.TYPE_CHECKING:",
180 | "@abstractmethod",
181 | "@abc.abstractmethod",
182 | ]
183 |
184 | [tool.coverage.html]
185 | directory = "htmlcov"
186 |
187 | [tool.ruff]
188 | line-length = 100
189 | target-version = "py38"
190 | select = [
191 | "E", # pycodestyle errors
192 | "W", # pycodestyle warnings
193 | "F", # pyflakes
194 | "I", # isort
195 | "B", # flake8-bugbear
196 | "C4", # flake8-comprehensions
197 | "UP", # pyupgrade
198 | ]
199 | ignore = [
200 | "E501", # line too long (handled by black)
201 | "B008", # do not perform function calls in argument defaults
202 | "C901", # too complex
203 | ]
204 | exclude = [
205 | ".git",
206 | ".ruff_cache",
207 | ".venv",
208 | "build",
209 | "dist",
210 | "easilyai/_version.py",
211 | ]
--------------------------------------------------------------------------------
/easilyai/__init__.py:
--------------------------------------------------------------------------------
1 | # Core functionality - always available
2 | try:
3 | from easilyai.app import create_app, create_tts_app
4 | from easilyai.custom_ai import register_custom_ai
5 | from easilyai.pipeline import EasilyAIPipeline
6 | _core_available = True
7 | except ImportError:
8 | _core_available = False
9 |
10 | # Advanced features - available when dependencies are met
11 | _enhanced_available = False
12 | _config_available = False
13 | _cache_available = False
14 | _rate_limit_available = False
15 | _metrics_available = False
16 | _cost_tracking_available = False
17 | _batch_available = False
18 |
19 | try:
20 | from easilyai.config import (
21 | EasilyAIConfig,
22 | get_config,
23 | set_config,
24 | reset_config
25 | )
26 | _config_available = True
27 | except ImportError:
28 | pass
29 |
30 | try:
31 | from easilyai.cache import (
32 | ResponseCache,
33 | MemoryCache,
34 | FileCache,
35 | get_cache,
36 | set_cache,
37 | reset_cache
38 | )
39 | _cache_available = True
40 | except ImportError:
41 | pass
42 |
43 | try:
44 | from easilyai.rate_limit import (
45 | RateLimiter,
46 | ServiceRateLimiter,
47 | get_rate_limiter,
48 | set_rate_limiter,
49 | reset_rate_limiter
50 | )
51 | _rate_limit_available = True
52 | except ImportError:
53 | pass
54 |
55 | try:
56 | from easilyai.metrics import (
57 | MetricsCollector,
58 | PerformanceMonitor,
59 | get_metrics_collector,
60 | get_performance_monitor,
61 | set_metrics_collector,
62 | reset_metrics
63 | )
64 | _metrics_available = True
65 | except ImportError:
66 | pass
67 |
68 | try:
69 | from easilyai.cost_tracking import (
70 | CostTracker,
71 | PricingModel,
72 | get_cost_tracker,
73 | set_cost_tracker,
74 | reset_cost_tracker
75 | )
76 | _cost_tracking_available = True
77 | except ImportError:
78 | pass
79 |
80 | try:
81 | from easilyai.batch import (
82 | BatchProcessor,
83 | BatchRequest,
84 | ProcessingMode,
85 | StreamingBatchProcessor
86 | )
87 | _batch_available = True
88 | except ImportError:
89 | pass
90 |
91 | try:
92 | from easilyai.enhanced_pipeline import (
93 | EnhancedPipeline,
94 | PipelineTemplate,
95 | TaskStatus,
96 | ExecutionMode
97 | )
98 | _enhanced_pipeline_available = True
99 | except ImportError:
100 | _enhanced_pipeline_available = False
101 |
102 | try:
103 | from easilyai.enhanced_app import (
104 | EasyAIEnhancedApp,
105 | EasyAIManager,
106 | create_enhanced_app,
107 | create_manager
108 | )
109 | _enhanced_available = True
110 | except ImportError:
111 | pass
112 |
113 | # Build __all__ dynamically based on available features
114 | __all__ = []
115 |
116 | if _core_available:
117 | __all__.extend([
118 | "create_app",
119 | "create_tts_app",
120 | "register_custom_ai",
121 | "EasilyAIPipeline" # Basic pipeline - use EnhancedPipeline for new projects
122 | ])
123 |
124 | if _enhanced_available:
125 | __all__.extend([
126 | "EasyAIEnhancedApp",
127 | "EasyAIManager",
128 | "create_enhanced_app",
129 | "create_manager"
130 | ])
131 |
132 | if _enhanced_pipeline_available:
133 | __all__.extend([
134 | "EnhancedPipeline", # Recommended pipeline implementation
135 | "PipelineTemplate",
136 | "TaskStatus",
137 | "ExecutionMode"
138 | ])
139 |
140 | if _config_available:
141 | __all__.extend([
142 | "EasilyAIConfig",
143 | "get_config",
144 | "set_config",
145 | "reset_config"
146 | ])
147 |
148 | if _cache_available:
149 | __all__.extend([
150 | "ResponseCache",
151 | "MemoryCache",
152 | "FileCache",
153 | "get_cache",
154 | "set_cache",
155 | "reset_cache"
156 | ])
157 |
158 | if _rate_limit_available:
159 | __all__.extend([
160 | "RateLimiter",
161 | "ServiceRateLimiter",
162 | "get_rate_limiter",
163 | "set_rate_limiter",
164 | "reset_rate_limiter"
165 | ])
166 |
167 | if _metrics_available:
168 | __all__.extend([
169 | "MetricsCollector",
170 | "PerformanceMonitor",
171 | "get_metrics_collector",
172 | "get_performance_monitor",
173 | "set_metrics_collector",
174 | "reset_metrics"
175 | ])
176 |
177 | if _cost_tracking_available:
178 | __all__.extend([
179 | "CostTracker",
180 | "PricingModel",
181 | "get_cost_tracker",
182 | "set_cost_tracker",
183 | "reset_cost_tracker"
184 | ])
185 |
186 | if _batch_available:
187 | __all__.extend([
188 | "BatchProcessor",
189 | "BatchRequest",
190 | "ProcessingMode",
191 | "StreamingBatchProcessor"
192 | ])
193 |
194 |
195 | def get_available_features():
196 | """Get a list of available advanced features."""
197 | features = {
198 | "core": _core_available,
199 | "enhanced_app": _enhanced_available,
200 | "enhanced_pipeline": _enhanced_pipeline_available,
201 | "configuration": _config_available,
202 | "caching": _cache_available,
203 | "rate_limiting": _rate_limit_available,
204 | "metrics": _metrics_available,
205 | "cost_tracking": _cost_tracking_available,
206 | "batch_processing": _batch_available
207 | }
208 | return {name: available for name, available in features.items() if available}
209 |
210 |
211 | def check_dependencies():
212 | """Check which dependencies are missing for advanced features."""
213 | missing = []
214 |
215 | if not _core_available:
216 | missing.append("Core dependencies (openai, anthropic, etc.) for basic functionality")
217 |
218 | if not _config_available:
219 | missing.append("python-dotenv for configuration management")
220 |
221 | return missing
222 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to EasilyAI
2 |
3 | Thank you for your interest in contributing to EasilyAI! We welcome contributions from the community and are grateful for any help you can provide.
4 |
5 | ## Code of Conduct
6 |
7 | Please note that this project is released with a [Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
8 |
9 | ## How to Contribute
10 |
11 | ### Reporting Bugs
12 |
13 | Before creating bug reports, please check existing issues to avoid duplicates. When you create a bug report, include as many details as possible:
14 |
15 | - **Use a clear and descriptive title**
16 | - **Describe the exact steps to reproduce the problem**
17 | - **Provide specific examples to demonstrate the steps**
18 | - **Describe the behavior you observed and what you expected**
19 | - **Include screenshots if relevant**
20 | - **Include your environment details** (OS, Python version, package versions)
21 |
22 | ### Suggesting Enhancements
23 |
24 | Enhancement suggestions are tracked as GitHub issues. When creating an enhancement suggestion:
25 |
26 | - **Use a clear and descriptive title**
27 | - **Provide a detailed description of the suggested enhancement**
28 | - **Provide specific examples to demonstrate the enhancement**
29 | - **Describe the current behavior and the expected behavior**
30 | - **Explain why this enhancement would be useful**
31 |
32 | ### Pull Requests
33 |
34 | 1. **Fork the repository** and create your branch from `main`
35 | 2. **Set up your development environment**:
36 | ```bash
37 | git clone https://github.com/yourusername/EasilyAI.git
38 | cd EasilyAI
39 | make dev-setup
40 | ```
41 |
42 | 3. **Make your changes**:
43 | - Write clear, commented code
44 | - Follow the existing code style
45 | - Add or update tests as needed
46 | - Update documentation as needed
47 |
48 | 4. **Run quality checks**:
49 | ```bash
50 | # Format your code
51 | make format
52 |
53 | # Run linting
54 | make lint
55 |
56 | # Run tests
57 | make test
58 |
59 | # Run all checks
60 | make check
61 | ```
62 |
63 | 5. **Commit your changes**:
64 | - Use clear and meaningful commit messages
65 | - Follow conventional commit format when possible:
66 | - `feat:` for new features
67 | - `fix:` for bug fixes
68 | - `docs:` for documentation changes
69 | - `test:` for test changes
70 | - `refactor:` for code refactoring
71 | - `chore:` for maintenance tasks
72 |
73 | 6. **Push to your fork** and submit a pull request
74 |
75 | 7. **Pull Request Guidelines**:
76 | - Provide a clear description of the problem and solution
77 | - Include the relevant issue number if applicable
78 | - Make sure all tests pass
79 | - Update documentation as needed
80 | - Request review from maintainers
81 |
82 | ## Development Setup
83 |
84 | ### Prerequisites
85 |
86 | - Python 3.8 or higher
87 | - pip
88 | - git
89 |
90 | ### Installation
91 |
92 | 1. Clone the repository:
93 | ```bash
94 | git clone https://github.com/GustyCube/EasilyAI.git
95 | cd EasilyAI
96 | ```
97 |
98 | 2. Install development dependencies:
99 | ```bash
100 | make dev-setup
101 | ```
102 |
103 | Or manually:
104 | ```bash
105 | pip install -e ".[dev,test,docs]"
106 | pre-commit install
107 | ```
108 |
109 | ### Running Tests
110 |
111 | ```bash
112 | # Run all tests
113 | make test
114 |
115 | # Run unit tests only
116 | make test-unit
117 |
118 | # Run with coverage
119 | make test-cov
120 |
121 | # Run specific test file
122 | pytest tests/test_openai_service.py
123 |
124 | # Run specific test
125 | pytest tests/test_openai_service.py::TestOpenAIService::test_generate_text
126 | ```
127 |
128 | ### Code Quality
129 |
130 | We use several tools to maintain code quality:
131 |
132 | - **black**: Code formatting
133 | - **isort**: Import sorting
134 | - **flake8**: Linting
135 | - **mypy**: Type checking
136 | - **bandit**: Security checking
137 |
138 | Run all quality checks:
139 | ```bash
140 | make check
141 | ```
142 |
143 | ### Documentation
144 |
145 | Build documentation locally:
146 | ```bash
147 | make docs
148 | ```
149 |
150 | Serve documentation locally:
151 | ```bash
152 | make serve-docs
153 | ```
154 |
155 | Documentation will be available at http://localhost:8000
156 |
157 | ## Project Structure
158 |
159 | ```
160 | EasilyAI/
161 | ├── easilyai/ # Main package
162 | │ ├── services/ # AI service implementations
163 | │ ├── utils/ # Utility functions
164 | │ └── exceptions.py # Custom exceptions
165 | ├── tests/ # Test suite
166 | ├── docs/ # Documentation
167 | ├── examples/ # Example scripts
168 | └── .github/ # GitHub workflows
169 | ```
170 |
171 | ## Adding a New AI Service
172 |
173 | To add support for a new AI service:
174 |
175 | 1. Create a new service file in `easilyai/services/`
176 | 2. Implement the base service interface
177 | 3. Add tests in `tests/`
178 | 4. Update documentation
179 | 5. Add example usage in `examples/`
180 |
181 | Example service implementation:
182 |
183 | ```python
184 | from easilyai.services.base import BaseService
185 |
186 | class NewAIService(BaseService):
187 | def __init__(self, api_key: str, model: str):
188 | self.api_key = api_key
189 | self.model = model
190 | # Initialize client
191 |
192 | def generate_text(self, prompt: str, **kwargs):
193 | # Implement text generation
194 | pass
195 |
196 | def chat_complete(self, messages: list, **kwargs):
197 | # Implement chat completion
198 | pass
199 | ```
200 |
201 | ## Release Process
202 |
203 | Releases are automated through GitHub Actions when a tag is pushed:
204 |
205 | ```bash
206 | # Create a new tag
207 | git tag -a v1.0.0 -m "Release version 1.0.0"
208 |
209 | # Push the tag
210 | git push origin v1.0.0
211 | ```
212 |
213 | ## Getting Help
214 |
215 | If you need help, you can:
216 |
217 | - Open an issue on GitHub
218 | - Check existing documentation
219 | - Look at existing code examples
220 | - Ask questions in discussions
221 |
222 | ## Recognition
223 |
224 | Contributors will be recognized in:
225 | - The project's README
226 | - Release notes
227 | - GitHub contributors page
228 |
229 | Thank you for contributing to EasilyAI!
--------------------------------------------------------------------------------
/docs/.vitepress/theme/components/InstallationSteps.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
7 |
8 |
14 |
{{ index + 1 }}
15 |
16 |
{{ step.title }}
17 |
18 | {{ step.description }}
19 |
20 |
21 |
22 |
31 |
32 |
40 |
41 |
42 |
43 |
💡
44 |
{{ step.note }}
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
93 |
94 |
--------------------------------------------------------------------------------
/docs/.vitepress/cache/deps/vue.js:
--------------------------------------------------------------------------------
1 | import {
2 | BaseTransition,
3 | BaseTransitionPropsValidators,
4 | Comment,
5 | DeprecationTypes,
6 | EffectScope,
7 | ErrorCodes,
8 | ErrorTypeStrings,
9 | Fragment,
10 | KeepAlive,
11 | ReactiveEffect,
12 | Static,
13 | Suspense,
14 | Teleport,
15 | Text,
16 | TrackOpTypes,
17 | Transition,
18 | TransitionGroup,
19 | TriggerOpTypes,
20 | VueElement,
21 | assertNumber,
22 | callWithAsyncErrorHandling,
23 | callWithErrorHandling,
24 | camelize,
25 | capitalize,
26 | cloneVNode,
27 | compatUtils,
28 | compile,
29 | computed,
30 | createApp,
31 | createBaseVNode,
32 | createBlock,
33 | createCommentVNode,
34 | createElementBlock,
35 | createHydrationRenderer,
36 | createPropsRestProxy,
37 | createRenderer,
38 | createSSRApp,
39 | createSlots,
40 | createStaticVNode,
41 | createTextVNode,
42 | createVNode,
43 | customRef,
44 | defineAsyncComponent,
45 | defineComponent,
46 | defineCustomElement,
47 | defineEmits,
48 | defineExpose,
49 | defineModel,
50 | defineOptions,
51 | defineProps,
52 | defineSSRCustomElement,
53 | defineSlots,
54 | devtools,
55 | effect,
56 | effectScope,
57 | getCurrentInstance,
58 | getCurrentScope,
59 | getCurrentWatcher,
60 | getTransitionRawChildren,
61 | guardReactiveProps,
62 | h,
63 | handleError,
64 | hasInjectionContext,
65 | hydrate,
66 | hydrateOnIdle,
67 | hydrateOnInteraction,
68 | hydrateOnMediaQuery,
69 | hydrateOnVisible,
70 | initCustomFormatter,
71 | initDirectivesForSSR,
72 | inject,
73 | isMemoSame,
74 | isProxy,
75 | isReactive,
76 | isReadonly,
77 | isRef,
78 | isRuntimeOnly,
79 | isShallow,
80 | isVNode,
81 | markRaw,
82 | mergeDefaults,
83 | mergeModels,
84 | mergeProps,
85 | nextTick,
86 | normalizeClass,
87 | normalizeProps,
88 | normalizeStyle,
89 | onActivated,
90 | onBeforeMount,
91 | onBeforeUnmount,
92 | onBeforeUpdate,
93 | onDeactivated,
94 | onErrorCaptured,
95 | onMounted,
96 | onRenderTracked,
97 | onRenderTriggered,
98 | onScopeDispose,
99 | onServerPrefetch,
100 | onUnmounted,
101 | onUpdated,
102 | onWatcherCleanup,
103 | openBlock,
104 | popScopeId,
105 | provide,
106 | proxyRefs,
107 | pushScopeId,
108 | queuePostFlushCb,
109 | reactive,
110 | readonly,
111 | ref,
112 | registerRuntimeCompiler,
113 | render,
114 | renderList,
115 | renderSlot,
116 | resolveComponent,
117 | resolveDirective,
118 | resolveDynamicComponent,
119 | resolveFilter,
120 | resolveTransitionHooks,
121 | setBlockTracking,
122 | setDevtoolsHook,
123 | setTransitionHooks,
124 | shallowReactive,
125 | shallowReadonly,
126 | shallowRef,
127 | ssrContextKey,
128 | ssrUtils,
129 | stop,
130 | toDisplayString,
131 | toHandlerKey,
132 | toHandlers,
133 | toRaw,
134 | toRef,
135 | toRefs,
136 | toValue,
137 | transformVNodeArgs,
138 | triggerRef,
139 | unref,
140 | useAttrs,
141 | useCssModule,
142 | useCssVars,
143 | useHost,
144 | useId,
145 | useModel,
146 | useSSRContext,
147 | useShadowRoot,
148 | useSlots,
149 | useTemplateRef,
150 | useTransitionState,
151 | vModelCheckbox,
152 | vModelDynamic,
153 | vModelRadio,
154 | vModelSelect,
155 | vModelText,
156 | vShow,
157 | version,
158 | warn,
159 | watch,
160 | watchEffect,
161 | watchPostEffect,
162 | watchSyncEffect,
163 | withAsyncContext,
164 | withCtx,
165 | withDefaults,
166 | withDirectives,
167 | withKeys,
168 | withMemo,
169 | withModifiers,
170 | withScopeId
171 | } from "./chunk-VJWGEPT5.js";
172 | export {
173 | BaseTransition,
174 | BaseTransitionPropsValidators,
175 | Comment,
176 | DeprecationTypes,
177 | EffectScope,
178 | ErrorCodes,
179 | ErrorTypeStrings,
180 | Fragment,
181 | KeepAlive,
182 | ReactiveEffect,
183 | Static,
184 | Suspense,
185 | Teleport,
186 | Text,
187 | TrackOpTypes,
188 | Transition,
189 | TransitionGroup,
190 | TriggerOpTypes,
191 | VueElement,
192 | assertNumber,
193 | callWithAsyncErrorHandling,
194 | callWithErrorHandling,
195 | camelize,
196 | capitalize,
197 | cloneVNode,
198 | compatUtils,
199 | compile,
200 | computed,
201 | createApp,
202 | createBlock,
203 | createCommentVNode,
204 | createElementBlock,
205 | createBaseVNode as createElementVNode,
206 | createHydrationRenderer,
207 | createPropsRestProxy,
208 | createRenderer,
209 | createSSRApp,
210 | createSlots,
211 | createStaticVNode,
212 | createTextVNode,
213 | createVNode,
214 | customRef,
215 | defineAsyncComponent,
216 | defineComponent,
217 | defineCustomElement,
218 | defineEmits,
219 | defineExpose,
220 | defineModel,
221 | defineOptions,
222 | defineProps,
223 | defineSSRCustomElement,
224 | defineSlots,
225 | devtools,
226 | effect,
227 | effectScope,
228 | getCurrentInstance,
229 | getCurrentScope,
230 | getCurrentWatcher,
231 | getTransitionRawChildren,
232 | guardReactiveProps,
233 | h,
234 | handleError,
235 | hasInjectionContext,
236 | hydrate,
237 | hydrateOnIdle,
238 | hydrateOnInteraction,
239 | hydrateOnMediaQuery,
240 | hydrateOnVisible,
241 | initCustomFormatter,
242 | initDirectivesForSSR,
243 | inject,
244 | isMemoSame,
245 | isProxy,
246 | isReactive,
247 | isReadonly,
248 | isRef,
249 | isRuntimeOnly,
250 | isShallow,
251 | isVNode,
252 | markRaw,
253 | mergeDefaults,
254 | mergeModels,
255 | mergeProps,
256 | nextTick,
257 | normalizeClass,
258 | normalizeProps,
259 | normalizeStyle,
260 | onActivated,
261 | onBeforeMount,
262 | onBeforeUnmount,
263 | onBeforeUpdate,
264 | onDeactivated,
265 | onErrorCaptured,
266 | onMounted,
267 | onRenderTracked,
268 | onRenderTriggered,
269 | onScopeDispose,
270 | onServerPrefetch,
271 | onUnmounted,
272 | onUpdated,
273 | onWatcherCleanup,
274 | openBlock,
275 | popScopeId,
276 | provide,
277 | proxyRefs,
278 | pushScopeId,
279 | queuePostFlushCb,
280 | reactive,
281 | readonly,
282 | ref,
283 | registerRuntimeCompiler,
284 | render,
285 | renderList,
286 | renderSlot,
287 | resolveComponent,
288 | resolveDirective,
289 | resolveDynamicComponent,
290 | resolveFilter,
291 | resolveTransitionHooks,
292 | setBlockTracking,
293 | setDevtoolsHook,
294 | setTransitionHooks,
295 | shallowReactive,
296 | shallowReadonly,
297 | shallowRef,
298 | ssrContextKey,
299 | ssrUtils,
300 | stop,
301 | toDisplayString,
302 | toHandlerKey,
303 | toHandlers,
304 | toRaw,
305 | toRef,
306 | toRefs,
307 | toValue,
308 | transformVNodeArgs,
309 | triggerRef,
310 | unref,
311 | useAttrs,
312 | useCssModule,
313 | useCssVars,
314 | useHost,
315 | useId,
316 | useModel,
317 | useSSRContext,
318 | useShadowRoot,
319 | useSlots,
320 | useTemplateRef,
321 | useTransitionState,
322 | vModelCheckbox,
323 | vModelDynamic,
324 | vModelRadio,
325 | vModelSelect,
326 | vModelText,
327 | vShow,
328 | version,
329 | warn,
330 | watch,
331 | watchEffect,
332 | watchPostEffect,
333 | watchSyncEffect,
334 | withAsyncContext,
335 | withCtx,
336 | withDefaults,
337 | withDirectives,
338 | withKeys,
339 | withMemo,
340 | withModifiers,
341 | withScopeId
342 | };
343 | //# sourceMappingURL=vue.js.map
344 |
--------------------------------------------------------------------------------
/easilyai/config.py:
--------------------------------------------------------------------------------
1 | """
2 | Configuration management for EasilyAI.
3 |
4 | This module provides centralized configuration management including:
5 | - API key management
6 | - Model configuration
7 | - Rate limiting settings
8 | - Cache configuration
9 | - Performance settings
10 | """
11 |
12 | import os
13 | import json
14 | from pathlib import Path
15 | from dataclasses import dataclass, field
16 | from typing import Optional, Dict, Any
17 |
18 | # Try to load dotenv if available
19 | try:
20 | from dotenv import load_dotenv
21 | load_dotenv()
22 | except ImportError:
23 | # dotenv not available, environment variables should be set manually
24 | pass
25 |
26 |
27 | @dataclass
28 | class ServiceConfig:
29 | """Configuration for a specific AI service."""
30 | api_key: Optional[str] = None
31 | default_model: Optional[str] = None
32 | rate_limit: int = 60 # requests per minute
33 | timeout: int = 30 # seconds
34 | max_retries: int = 3
35 |
36 | def __post_init__(self):
37 | # Load from environment if not provided
38 | if not self.api_key:
39 | service_name = self.__class__.__name__.replace("Config", "").upper()
40 | self.api_key = os.getenv(f"{service_name}_API_KEY")
41 |
42 |
43 | @dataclass
44 | class OpenAIConfig(ServiceConfig):
45 | """OpenAI-specific configuration."""
46 | default_model: str = "gpt-3.5-turbo"
47 | default_image_model: str = "dall-e-3"
48 | default_tts_model: str = "tts-1"
49 | default_tts_voice: str = "alloy"
50 |
51 |
52 | @dataclass
53 | class AnthropicConfig(ServiceConfig):
54 | """Anthropic-specific configuration."""
55 | default_model: str = "claude-3-haiku-20240307"
56 |
57 |
58 | @dataclass
59 | class GeminiConfig(ServiceConfig):
60 | """Google Gemini-specific configuration."""
61 | default_model: str = "gemini-1.5-flash"
62 |
63 |
64 | @dataclass
65 | class GrokConfig(ServiceConfig):
66 | """X.AI Grok-specific configuration."""
67 | default_model: str = "grok-beta"
68 |
69 |
70 | @dataclass
71 | class OllamaConfig(ServiceConfig):
72 | """Ollama-specific configuration."""
73 | default_model: str = "llama2"
74 | base_url: str = "http://localhost:11434"
75 |
76 |
77 | @dataclass
78 | class CacheConfig:
79 | """Cache configuration."""
80 | enabled: bool = True
81 | backend: str = "memory" # "memory" or "file"
82 | ttl: int = 3600 # Time to live in seconds
83 | max_size: int = 1000 # Max items for memory cache
84 | cache_dir: str = "cache" # Directory for file cache
85 |
86 |
87 | @dataclass
88 | class PerformanceConfig:
89 | """Performance-related configuration."""
90 | max_workers: int = 5 # For parallel processing
91 | batch_delay: float = 0.1 # Delay between batch requests
92 | enable_metrics: bool = True
93 | enable_cost_tracking: bool = True
94 |
95 |
96 | @dataclass
97 | class EasilyAIConfig:
98 | """Main configuration class for EasilyAI."""
99 | openai: OpenAIConfig = field(default_factory=OpenAIConfig)
100 | anthropic: AnthropicConfig = field(default_factory=AnthropicConfig)
101 | gemini: GeminiConfig = field(default_factory=GeminiConfig)
102 | grok: GrokConfig = field(default_factory=GrokConfig)
103 | ollama: OllamaConfig = field(default_factory=OllamaConfig)
104 |
105 | cache: CacheConfig = field(default_factory=CacheConfig)
106 | performance: PerformanceConfig = field(default_factory=PerformanceConfig)
107 |
108 | default_service: str = "openai"
109 | log_level: str = "INFO"
110 |
111 | @classmethod
112 | def from_json(cls, file_path: str) -> "EasilyAIConfig":
113 | """Load configuration from JSON file."""
114 | with open(file_path, 'r') as f:
115 | data = json.load(f)
116 | return cls._from_dict(data)
117 |
118 | @classmethod
119 | def from_env(cls) -> "EasilyAIConfig":
120 | """Load configuration from environment variables."""
121 | config = cls()
122 |
123 | # Override with environment variables if present
124 | if default_service := os.getenv("EASILYAI_DEFAULT_SERVICE"):
125 | config.default_service = default_service
126 |
127 | if log_level := os.getenv("EASILYAI_LOG_LEVEL"):
128 | config.log_level = log_level
129 |
130 | # Cache settings from env
131 | if cache_enabled := os.getenv("EASILYAI_CACHE_ENABLED"):
132 | config.cache.enabled = cache_enabled.lower() == "true"
133 |
134 | if cache_backend := os.getenv("EASILYAI_CACHE_BACKEND"):
135 | config.cache.backend = cache_backend
136 |
137 | if cache_ttl := os.getenv("EASILYAI_CACHE_TTL"):
138 | config.cache.ttl = int(cache_ttl)
139 |
140 | return config
141 |
142 | def to_json(self, file_path: str):
143 | """Save configuration to JSON file."""
144 | data = self._to_dict()
145 | with open(file_path, 'w') as f:
146 | json.dump(data, f, indent=2)
147 |
148 | def get_service_config(self, service: str) -> ServiceConfig:
149 | """Get configuration for a specific service."""
150 | return getattr(self, service.lower(), None)
151 |
152 | def get_api_key(self, service: str) -> Optional[str]:
153 | """Get API key for a specific service."""
154 | service_config = self.get_service_config(service)
155 | if service_config:
156 | return service_config.api_key
157 | return None
158 |
159 | def get_default_model(self, service: str) -> Optional[str]:
160 | """Get default model for a specific service."""
161 | service_config = self.get_service_config(service)
162 | if service_config:
163 | return service_config.default_model
164 | return None
165 |
166 | def _to_dict(self) -> dict:
167 | """Convert configuration to dictionary."""
168 | result = {}
169 | for field_name, field_value in self.__dict__.items():
170 | if hasattr(field_value, '__dict__'):
171 | result[field_name] = {k: v for k, v in field_value.__dict__.items()
172 | if not k.startswith('_') and v is not None}
173 | else:
174 | result[field_name] = field_value
175 | return result
176 |
177 | @classmethod
178 | def _from_dict(cls, data: dict) -> "EasilyAIConfig":
179 | """Create configuration from dictionary."""
180 | config = cls()
181 |
182 | for key, value in data.items():
183 | if hasattr(config, key):
184 | if isinstance(value, dict) and hasattr(getattr(config, key), '__dict__'):
185 | # Update nested config objects
186 | for sub_key, sub_value in value.items():
187 | setattr(getattr(config, key), sub_key, sub_value)
188 | else:
189 | setattr(config, key, value)
190 |
191 | return config
192 |
193 |
194 | # Global configuration instance
195 | _config = None
196 |
197 |
198 | def get_config() -> EasilyAIConfig:
199 | """Get the global configuration instance."""
200 | global _config
201 | if _config is None:
202 | # Try loading from JSON file first
203 | config_file = Path("easilyai_config.json")
204 | if config_file.exists():
205 | _config = EasilyAIConfig.from_json(str(config_file))
206 | else:
207 | # Fall back to environment variables
208 | _config = EasilyAIConfig.from_env()
209 | return _config
210 |
211 |
212 | def set_config(config: EasilyAIConfig):
213 | """Set the global configuration instance."""
214 | global _config
215 | _config = config
216 |
217 |
218 | def reset_config():
219 | """Reset the global configuration instance."""
220 | global _config
221 | _config = None
--------------------------------------------------------------------------------
/docs/.vitepress/theme/style.css:
--------------------------------------------------------------------------------
1 | /**
2 | * Customize default theme styling by overriding CSS variables:
3 | * https://github.com/vuejs/vitepress/blob/main/src/client/theme-default/styles/vars.css
4 | */
5 |
6 | /**
7 | * Colors
8 | *
9 | * Each colors have exact same color scale system with 3 levels of solid
10 | * colors with different brightness, and 1 soft color.
11 | *
12 | * - `XXX-1`: The most solid color used mainly for colored text. It must
13 | * satisfy the contrast ratio against when used on top of `XXX-soft`.
14 | *
15 | * - `XXX-2`: The color used mainly for hover state of the button.
16 | *
17 | * - `XXX-3`: The color for solid background, such as bg color of the button.
18 | * It must satisfy the contrast ratio with pure white (#ffffff) text on
19 | * top of it.
20 | *
21 | * - `XXX-soft`: The color used for subtle background such as custom container
22 | * or badges. It must satisfy the contrast ratio when putting `XXX-1` colors
23 | * on top of it.
24 | *
25 | * The soft color must be semi transparent alpha channel. This is crucial
26 | * because it allows adding multiple "soft" colors on top of each other
27 | * to create a accent, such as when having inline code block inside
28 | * custom containers.
29 | *
30 | * - `default`: The color used purely for subtle indication without any
31 | * special meanings attched to it such as bg color for menu hover state.
32 | *
33 | * - `brand`: Used for primary brand colors, such as link text, button with
34 | * brand theme, etc.
35 | *
36 | * - `tip`: Used to indicate useful information. The default theme uses the
37 | * brand color for this by default.
38 | *
39 | * - `warning`: Used to indicate warning to the users. Used in custom
40 | * container, badges, etc.
41 | *
42 | * - `danger`: Used to indicate dangerous message to the users. Used in custom
43 | * container, badges, etc.
44 | * -------------------------------------------------------------------------- */
45 |
46 | :root {
47 | --vp-c-default-1: var(--vp-c-gray-1);
48 | --vp-c-default-2: var(--vp-c-gray-2);
49 | --vp-c-default-3: var(--vp-c-gray-3);
50 | --vp-c-default-soft: var(--vp-c-gray-soft);
51 |
52 | --vp-c-brand-1: #3eaf7c;
53 | --vp-c-brand-2: #4abf8a;
54 | --vp-c-brand-3: #56cf98;
55 | --vp-c-brand-soft: rgba(62, 175, 124, 0.14);
56 |
57 | --vp-c-tip-1: var(--vp-c-brand-1);
58 | --vp-c-tip-2: var(--vp-c-brand-2);
59 | --vp-c-tip-3: var(--vp-c-brand-3);
60 | --vp-c-tip-soft: var(--vp-c-brand-soft);
61 |
62 | --vp-c-warning-1: #e7c547;
63 | --vp-c-warning-2: #ebc862;
64 | --vp-c-warning-3: #efcb7d;
65 | --vp-c-warning-soft: rgba(231, 197, 71, 0.14);
66 |
67 | --vp-c-danger-1: #ed5e5e;
68 | --vp-c-danger-2: #f07272;
69 | --vp-c-danger-3: #f38686;
70 | --vp-c-danger-soft: rgba(237, 94, 94, 0.14);
71 | }
72 |
73 | /**
74 | * Component: Button
75 | * -------------------------------------------------------------------------- */
76 |
77 | :root {
78 | --vp-button-brand-border: transparent;
79 | --vp-button-brand-text: var(--vp-c-white);
80 | --vp-button-brand-bg: var(--vp-c-brand-3);
81 | --vp-button-brand-hover-border: transparent;
82 | --vp-button-brand-hover-text: var(--vp-c-white);
83 | --vp-button-brand-hover-bg: var(--vp-c-brand-2);
84 | --vp-button-brand-active-border: transparent;
85 | --vp-button-brand-active-text: var(--vp-c-white);
86 | --vp-button-brand-active-bg: var(--vp-c-brand-1);
87 | }
88 |
89 | /**
90 | * Component: Home
91 | * -------------------------------------------------------------------------- */
92 |
93 | :root {
94 | --vp-home-hero-name-color: transparent;
95 | --vp-home-hero-name-background: -webkit-linear-gradient(
96 | 120deg,
97 | #3eaf7c 30%,
98 | #56cf98
99 | );
100 |
101 | --vp-home-hero-image-background-image: linear-gradient(
102 | -45deg,
103 | #3eaf7c 50%,
104 | #56cf98 50%
105 | );
106 | --vp-home-hero-image-filter: blur(44px);
107 | }
108 |
109 | @media (min-width: 640px) {
110 | :root {
111 | --vp-home-hero-image-filter: blur(56px);
112 | }
113 | }
114 |
115 | @media (min-width: 960px) {
116 | :root {
117 | --vp-home-hero-image-filter: blur(68px);
118 | }
119 | }
120 |
121 | /**
122 | * Component: Custom Block
123 | * -------------------------------------------------------------------------- */
124 |
125 | :root {
126 | --vp-custom-block-tip-border: transparent;
127 | --vp-custom-block-tip-text: var(--vp-c-text-1);
128 | --vp-custom-block-tip-bg: var(--vp-c-brand-soft);
129 | --vp-custom-block-tip-code-bg: var(--vp-c-brand-soft);
130 | }
131 |
132 | /**
133 | * Component: Algolia
134 | * -------------------------------------------------------------------------- */
135 |
136 | .DocSearch {
137 | --docsearch-primary-color: var(--vp-c-brand-1) !important;
138 | }
139 |
140 | /**
141 | * Custom Enhancements
142 | * -------------------------------------------------------------------------- */
143 |
144 | /* Enhanced code blocks */
145 | .vp-doc div[class*='language-'] {
146 | position: relative;
147 | margin: 16px 0;
148 | background-color: var(--vp-code-block-bg);
149 | overflow-x: auto;
150 | border-radius: 8px;
151 | transition: background-color 0.1s;
152 | }
153 |
154 | .vp-doc div[class*='language-'] code {
155 | color: var(--vp-code-block-color);
156 | padding: 0;
157 | background-color: transparent;
158 | border-radius: 0;
159 | }
160 |
161 | /* Enhanced tables */
162 | .vp-doc table {
163 | border-collapse: collapse;
164 | margin: 20px 0;
165 | overflow-x: auto;
166 | border-radius: 8px;
167 | overflow: hidden;
168 | box-shadow: 0 1px 3px rgba(0, 0, 0, 0.12), 0 1px 2px rgba(0, 0, 0, 0.24);
169 | }
170 |
171 | .vp-doc th {
172 | background: var(--vp-c-brand-soft);
173 | font-weight: 600;
174 | }
175 |
176 | .vp-doc th,
177 | .vp-doc td {
178 | border: 1px solid var(--vp-c-divider);
179 | padding: 12px 16px;
180 | }
181 |
182 | /* Enhanced badges/pills */
183 | .badge {
184 | display: inline-block;
185 | padding: 0.25rem 0.5rem;
186 | font-size: 0.75rem;
187 | font-weight: 500;
188 | line-height: 1;
189 | text-align: center;
190 | white-space: nowrap;
191 | vertical-align: baseline;
192 | border-radius: 0.375rem;
193 | margin: 0.125rem;
194 | }
195 |
196 | .badge-success {
197 | color: #fff;
198 | background-color: var(--vp-c-brand-1);
199 | }
200 |
201 | .badge-warning {
202 | color: #fff;
203 | background-color: var(--vp-c-warning-1);
204 | }
205 |
206 | .badge-danger {
207 | color: #fff;
208 | background-color: var(--vp-c-danger-1);
209 | }
210 |
211 | /* Feature cards enhancement */
212 | .VPFeature {
213 | border: 1px solid var(--vp-c-border);
214 | border-radius: 12px;
215 | height: 100%;
216 | transition: all 0.2s ease;
217 | }
218 |
219 | .VPFeature:hover {
220 | border-color: var(--vp-c-brand-1);
221 | box-shadow: 0 8px 30px rgba(62, 175, 124, 0.12);
222 | transform: translateY(-2px);
223 | }
224 |
225 | /* Custom containers enhancement */
226 | .custom-container {
227 | border-radius: 8px;
228 | padding: 16px;
229 | margin: 16px 0;
230 | }
231 |
232 | .custom-container-title {
233 | font-weight: 600;
234 | margin-bottom: 8px;
235 | }
236 |
237 | /* Copy code button styling */
238 | .vp-copy-code {
239 | border-radius: 4px !important;
240 | background: var(--vp-c-brand-1) !important;
241 | }
242 |
243 | .vp-copy-code:hover {
244 | background: var(--vp-c-brand-2) !important;
245 | }
246 |
247 | /* Navigation enhancements */
248 | .VPNav {
249 | backdrop-filter: blur(12px);
250 | }
251 |
252 | /* Sidebar enhancements */
253 | .VPSidebarItem.level-0 .items .link {
254 | padding-left: 32px !important;
255 | }
256 |
257 | /* Search enhancements */
258 | .VPNavBarSearch {
259 | justify-content: flex-end;
260 | }
261 |
262 | /* Mobile responsiveness */
263 | @media (max-width: 768px) {
264 | .vp-doc div[class*='language-'] {
265 | margin: 16px -24px;
266 | border-radius: 0;
267 | }
268 |
269 | .vp-doc table {
270 | font-size: 14px;
271 | }
272 |
273 | .vp-doc th,
274 | .vp-doc td {
275 | padding: 8px 12px;
276 | }
277 | }
--------------------------------------------------------------------------------
/easilyai/utils/retry.py:
--------------------------------------------------------------------------------
1 | """
2 | Retry logic and rate limiting utilities for API calls.
3 | """
4 | import time
5 | import random
6 | from functools import wraps
7 | from typing import Callable, Type, Tuple, Optional, Any
8 | import logging
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | class RateLimitError(Exception):
14 | """Raised when rate limit is exceeded."""
15 | pass
16 |
17 |
18 | class RetryableError(Exception):
19 | """Base class for errors that should trigger a retry."""
20 | pass
21 |
22 |
23 | def exponential_backoff(
24 | max_retries: int = 3,
25 | base_delay: float = 1.0,
26 | max_delay: float = 60.0,
27 | exponential_base: float = 2.0,
28 | jitter: bool = True,
29 | retryable_exceptions: Tuple[Type[Exception], ...] = (Exception,),
30 | ) -> Callable:
31 | """
32 | Decorator for implementing exponential backoff retry logic.
33 |
34 | Args:
35 | max_retries: Maximum number of retry attempts
36 | base_delay: Initial delay in seconds
37 | max_delay: Maximum delay in seconds
38 | exponential_base: Base for exponential backoff calculation
39 | jitter: Whether to add random jitter to delays
40 | retryable_exceptions: Tuple of exception types that should trigger retries
41 |
42 | Returns:
43 | Decorated function with retry logic
44 | """
45 | def decorator(func: Callable) -> Callable:
46 | @wraps(func)
47 | def wrapper(*args, **kwargs) -> Any:
48 | last_exception = None
49 |
50 | for attempt in range(max_retries + 1):
51 | try:
52 | return func(*args, **kwargs)
53 | except retryable_exceptions as e:
54 | last_exception = e
55 |
56 | if attempt == max_retries:
57 | logger.error(
58 | f"Function {func.__name__} failed after {max_retries} retries. "
59 | f"Last error: {str(e)}"
60 | )
61 | raise e
62 |
63 | # Calculate delay with exponential backoff
64 | delay = min(
65 | base_delay * (exponential_base ** attempt),
66 | max_delay
67 | )
68 |
69 | # Add jitter to prevent thundering herd
70 | if jitter:
71 | delay = delay * (0.5 + random.random() * 0.5)
72 |
73 | logger.warning(
74 | f"Function {func.__name__} failed on attempt {attempt + 1}. "
75 | f"Retrying in {delay:.2f} seconds. Error: {str(e)}"
76 | )
77 |
78 | time.sleep(delay)
79 |
80 | # This should never be reached due to the raise in the except block
81 | if last_exception:
82 | raise last_exception
83 |
84 | return wrapper
85 | return decorator
86 |
87 |
88 | class RateLimiter:
89 | """
90 | Token bucket rate limiter implementation.
91 | """
92 |
93 | def __init__(self, rate: float, burst: int = 1):
94 | """
95 | Initialize rate limiter.
96 |
97 | Args:
98 | rate: Requests per second allowed
99 | burst: Maximum burst size (token bucket capacity)
100 | """
101 | self.rate = rate
102 | self.burst = burst
103 | self.tokens = burst
104 | self.last_update = time.time()
105 |
106 | def acquire(self, tokens: int = 1) -> bool:
107 | """
108 | Acquire tokens from the bucket.
109 |
110 | Args:
111 | tokens: Number of tokens to acquire
112 |
113 | Returns:
114 | True if tokens were acquired, False otherwise
115 | """
116 | now = time.time()
117 |
118 | # Add tokens based on elapsed time
119 | elapsed = now - self.last_update
120 | self.tokens = min(self.burst, self.tokens + elapsed * self.rate)
121 | self.last_update = now
122 |
123 | if self.tokens >= tokens:
124 | self.tokens -= tokens
125 | return True
126 |
127 | return False
128 |
129 | def wait_time(self, tokens: int = 1) -> float:
130 | """
131 | Calculate how long to wait for tokens to be available.
132 |
133 | Args:
134 | tokens: Number of tokens needed
135 |
136 | Returns:
137 | Wait time in seconds
138 | """
139 | if self.tokens >= tokens:
140 | return 0.0
141 |
142 | needed_tokens = tokens - self.tokens
143 | return needed_tokens / self.rate
144 |
145 |
146 | def rate_limited(rate: float, burst: int = 1) -> Callable:
147 | """
148 | Decorator for rate limiting function calls.
149 |
150 | Args:
151 | rate: Requests per second allowed
152 | burst: Maximum burst size
153 |
154 | Returns:
155 | Decorated function with rate limiting
156 | """
157 | limiter = RateLimiter(rate, burst)
158 |
159 | def decorator(func: Callable) -> Callable:
160 | @wraps(func)
161 | def wrapper(*args, **kwargs) -> Any:
162 | if not limiter.acquire():
163 | wait_time = limiter.wait_time()
164 | logger.info(
165 | f"Rate limit reached for {func.__name__}. "
166 | f"Waiting {wait_time:.2f} seconds."
167 | )
168 | time.sleep(wait_time)
169 | limiter.acquire() # Should succeed after waiting
170 |
171 | return func(*args, **kwargs)
172 | return wrapper
173 | return decorator
174 |
175 |
176 | def retry_with_rate_limit(
177 | max_retries: int = 3,
178 | base_delay: float = 1.0,
179 | rate: float = 1.0,
180 | burst: int = 1,
181 | retryable_exceptions: Tuple[Type[Exception], ...] = (Exception,),
182 | ) -> Callable:
183 | """
184 | Combined decorator for retry logic with rate limiting.
185 |
186 | Args:
187 | max_retries: Maximum number of retry attempts
188 | base_delay: Initial delay in seconds for retries
189 | rate: Requests per second for rate limiting
190 | burst: Maximum burst size for rate limiting
191 | retryable_exceptions: Exception types that should trigger retries
192 |
193 | Returns:
194 | Decorated function with both retry and rate limiting
195 | """
196 | def decorator(func: Callable) -> Callable:
197 | # Apply rate limiting first, then retry logic
198 | rate_limited_func = rate_limited(rate, burst)(func)
199 | retry_func = exponential_backoff(
200 | max_retries=max_retries,
201 | base_delay=base_delay,
202 | retryable_exceptions=retryable_exceptions,
203 | )(rate_limited_func)
204 | return retry_func
205 | return decorator
206 |
207 |
208 | # Common retry configurations for different services
209 | OPENAI_RETRY_CONFIG = {
210 | "max_retries": 3,
211 | "base_delay": 1.0,
212 | "retryable_exceptions": (Exception,), # Will be replaced with actual OpenAI exceptions
213 | }
214 |
215 | ANTHROPIC_RETRY_CONFIG = {
216 | "max_retries": 3,
217 | "base_delay": 2.0,
218 | "retryable_exceptions": (Exception,), # Will be replaced with actual Anthropic exceptions
219 | }
220 |
221 | GEMINI_RETRY_CONFIG = {
222 | "max_retries": 2,
223 | "base_delay": 1.5,
224 | "retryable_exceptions": (Exception,), # Will be replaced with actual Gemini exceptions
225 | }
226 |
227 | GROQ_RETRY_CONFIG = {
228 | "max_retries": 3,
229 | "base_delay": 0.5,
230 | "retryable_exceptions": (Exception,), # Will be replaced with actual Groq exceptions
231 | }
232 |
233 |
234 | def create_service_retry_decorator(service_name: str) -> Callable:
235 | """
236 | Create a retry decorator configured for a specific service.
237 |
238 | Args:
239 | service_name: Name of the AI service (openai, anthropic, gemini, groq)
240 |
241 | Returns:
242 | Configured retry decorator
243 | """
244 | configs = {
245 | "openai": OPENAI_RETRY_CONFIG,
246 | "anthropic": ANTHROPIC_RETRY_CONFIG,
247 | "gemini": GEMINI_RETRY_CONFIG,
248 | "groq": GROQ_RETRY_CONFIG,
249 | }
250 |
251 | config = configs.get(service_name.lower(), OPENAI_RETRY_CONFIG)
252 | return exponential_backoff(**config)
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI/CD Pipeline
2 |
3 | on:
4 | push:
5 | branches: [ main, develop ]
6 | tags:
7 | - 'v*'
8 | pull_request:
9 | branches: [ main, develop ]
10 | schedule:
11 | # Run nightly at 2 AM UTC
12 | - cron: '0 2 * * *'
13 | workflow_dispatch:
14 |
15 | env:
16 | PYTHON_VERSION: '3.11'
17 |
18 | jobs:
19 | lint:
20 | name: Lint Code
21 | runs-on: ubuntu-latest
22 | steps:
23 | - uses: actions/checkout@v4
24 |
25 | - name: Set up Python
26 | uses: actions/setup-python@v5
27 | with:
28 | python-version: ${{ env.PYTHON_VERSION }}
29 |
30 | - name: Cache dependencies
31 | uses: actions/cache@v3
32 | with:
33 | path: ~/.cache/pip
34 | key: ${{ runner.os }}-pip-lint-${{ hashFiles('pyproject.toml') }}
35 | restore-keys: |
36 | ${{ runner.os }}-pip-lint-
37 | ${{ runner.os }}-pip-
38 |
39 | - name: Install dependencies
40 | run: |
41 | python -m pip install --upgrade pip
42 | pip install black flake8 isort mypy bandit safety
43 | pip install types-requests
44 |
45 | - name: Run black
46 | run: black --check easilyai tests
47 |
48 | - name: Run isort
49 | run: isort --check-only easilyai tests
50 |
51 | - name: Run flake8
52 | run: flake8 easilyai tests
53 |
54 | - name: Run mypy
55 | run: mypy easilyai
56 |
57 | security:
58 | name: Security Scan
59 | runs-on: ubuntu-latest
60 | steps:
61 | - uses: actions/checkout@v4
62 |
63 | - name: Set up Python
64 | uses: actions/setup-python@v5
65 | with:
66 | python-version: ${{ env.PYTHON_VERSION }}
67 |
68 | - name: Install dependencies
69 | run: |
70 | python -m pip install --upgrade pip
71 | pip install bandit safety pip-audit
72 |
73 | - name: Run bandit
74 | run: bandit -r easilyai -ll -f json -o bandit-report.json
75 |
76 | - name: Upload bandit results
77 | if: always()
78 | uses: actions/upload-artifact@v3
79 | with:
80 | name: bandit-report
81 | path: bandit-report.json
82 |
83 | - name: Run safety check
84 | run: safety check --json
85 | continue-on-error: true
86 |
87 | - name: Run pip-audit
88 | run: pip-audit
89 | continue-on-error: true
90 |
91 | test:
92 | name: Test Python ${{ matrix.python-version }} on ${{ matrix.os }}
93 | runs-on: ${{ matrix.os }}
94 | strategy:
95 | fail-fast: false
96 | matrix:
97 | os: [ubuntu-latest, windows-latest, macos-latest]
98 | python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
99 | exclude:
100 | # Exclude some combinations to save CI time
101 | - os: windows-latest
102 | python-version: '3.8'
103 | - os: windows-latest
104 | python-version: '3.9'
105 |
106 | steps:
107 | - uses: actions/checkout@v4
108 |
109 | - name: Set up Python ${{ matrix.python-version }}
110 | uses: actions/setup-python@v5
111 | with:
112 | python-version: ${{ matrix.python-version }}
113 |
114 | - name: Cache dependencies
115 | uses: actions/cache@v3
116 | with:
117 | path: ~/.cache/pip
118 | key: ${{ runner.os }}-pip-test-${{ matrix.python-version }}-${{ hashFiles('pyproject.toml') }}
119 | restore-keys: |
120 | ${{ runner.os }}-pip-test-${{ matrix.python-version }}-
121 | ${{ runner.os }}-pip-
122 |
123 | - name: Install dependencies
124 | run: |
125 | python -m pip install --upgrade pip
126 | pip install -e ".[test]"
127 |
128 | - name: Run tests with coverage
129 | run: |
130 | pytest --cov=easilyai --cov-report=xml --cov-report=term-missing -v
131 |
132 | - name: Upload coverage reports
133 | if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'
134 | uses: codecov/codecov-action@v3
135 | with:
136 | file: ./coverage.xml
137 | flags: unittests
138 | name: codecov-umbrella
139 | fail_ci_if_error: false
140 |
141 | build:
142 | name: Build Distribution
143 | runs-on: ubuntu-latest
144 | needs: [lint, test]
145 | steps:
146 | - uses: actions/checkout@v4
147 |
148 | - name: Set up Python
149 | uses: actions/setup-python@v5
150 | with:
151 | python-version: ${{ env.PYTHON_VERSION }}
152 |
153 | - name: Install build dependencies
154 | run: |
155 | python -m pip install --upgrade pip
156 | pip install build twine
157 |
158 | - name: Build package
159 | run: python -m build
160 |
161 | - name: Check distribution
162 | run: |
163 | twine check dist/*
164 | ls -lh dist/
165 |
166 | - name: Upload artifacts
167 | uses: actions/upload-artifact@v3
168 | with:
169 | name: dist
170 | path: dist/
171 |
172 | docs:
173 | name: Build Documentation
174 | runs-on: ubuntu-latest
175 | steps:
176 | - uses: actions/checkout@v4
177 |
178 | - name: Set up Python
179 | uses: actions/setup-python@v5
180 | with:
181 | python-version: ${{ env.PYTHON_VERSION }}
182 |
183 | - name: Install dependencies
184 | run: |
185 | python -m pip install --upgrade pip
186 | pip install -e ".[docs]"
187 |
188 | - name: Build docs
189 | run: mkdocs build --strict
190 |
191 | - name: Upload docs artifact
192 | uses: actions/upload-artifact@v3
193 | with:
194 | name: docs
195 | path: site/
196 |
197 | publish-test:
198 | name: Publish to TestPyPI
199 | runs-on: ubuntu-latest
200 | needs: [build]
201 | if: github.event_name == 'push' && github.ref == 'refs/heads/develop'
202 | environment:
203 | name: test-pypi
204 | url: https://test.pypi.org/project/easilyai/
205 | steps:
206 | - uses: actions/checkout@v4
207 |
208 | - name: Download artifacts
209 | uses: actions/download-artifact@v3
210 | with:
211 | name: dist
212 | path: dist/
213 |
214 | - name: Set up Python
215 | uses: actions/setup-python@v5
216 | with:
217 | python-version: ${{ env.PYTHON_VERSION }}
218 |
219 | - name: Install twine
220 | run: pip install twine
221 |
222 | - name: Publish to TestPyPI
223 | env:
224 | TWINE_USERNAME: __token__
225 | TWINE_PASSWORD: ${{ secrets.TEST_PYPI_API_TOKEN }}
226 | run: |
227 | twine upload --repository testpypi dist/* --skip-existing
228 |
229 | publish:
230 | name: Publish to PyPI
231 | runs-on: ubuntu-latest
232 | needs: [build, security]
233 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')
234 | environment:
235 | name: pypi
236 | url: https://pypi.org/project/easilyai/
237 | steps:
238 | - uses: actions/checkout@v4
239 |
240 | - name: Download artifacts
241 | uses: actions/download-artifact@v3
242 | with:
243 | name: dist
244 | path: dist/
245 |
246 | - name: Set up Python
247 | uses: actions/setup-python@v5
248 | with:
249 | python-version: ${{ env.PYTHON_VERSION }}
250 |
251 | - name: Install twine
252 | run: pip install twine
253 |
254 | - name: Publish to PyPI
255 | env:
256 | TWINE_USERNAME: __token__
257 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
258 | run: |
259 | twine upload dist/*
260 |
261 | release:
262 | name: Create GitHub Release
263 | runs-on: ubuntu-latest
264 | needs: [publish]
265 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')
266 | steps:
267 | - uses: actions/checkout@v4
268 |
269 | - name: Download artifacts
270 | uses: actions/download-artifact@v3
271 | with:
272 | name: dist
273 | path: dist/
274 |
275 | - name: Create Release
276 | uses: softprops/action-gh-release@v1
277 | with:
278 | files: dist/*
279 | generate_release_notes: true
280 | draft: false
281 | prerelease: ${{ contains(github.ref, 'rc') || contains(github.ref, 'beta') || contains(github.ref, 'alpha') }}
--------------------------------------------------------------------------------