├── .bumpversion.cfg ├── .clear_envs.sh ├── .github └── workflows │ ├── docs-deploy.yml │ ├── python-package.yml │ └── tests.yml ├── .gitignore ├── .vscode └── launch.json ├── LICENSE ├── README.md ├── bump_version.sh ├── cel ├── __init__.py ├── assets │ ├── celai_connectors.png │ ├── celai_diagram.png │ ├── celai_middlewares.png │ ├── celai_router_diagram.png │ ├── celia_logo.png │ └── celia_overview1.png ├── assistants │ ├── base_assistant.py │ ├── common.py │ ├── context.py │ ├── function_context.py │ ├── function_response.py │ ├── macaw │ │ ├── custom_chat_models │ │ │ └── chat_open_router.py │ │ ├── macaw_assistant.py │ │ ├── macaw_history_adapter.py │ │ ├── macaw_inference_context.py │ │ ├── macaw_nlp.py │ │ ├── macaw_settings.py │ │ ├── macaw_test_chat_model.py │ │ └── macaw_utils.py │ ├── request_context.py │ ├── router │ │ ├── agentic_router.py │ │ ├── logic_router.py │ │ └── utils.py │ ├── state_manager.py │ └── stream_content_chunk.py ├── cache.py ├── comms │ ├── client_command.py │ ├── sentense_detection.py │ └── utils.py ├── config.py ├── connectors │ ├── cli │ │ ├── cli_connector.py │ │ └── model │ │ │ ├── cli_lead.py │ │ │ └── cli_message.py │ ├── livekit │ │ ├── livekit_adapter.py │ │ ├── livekit_connector.py │ │ └── model │ │ │ ├── livekit_lead.py │ │ │ └── livekit_message.py │ ├── telegram │ │ ├── __init__.py │ │ ├── model │ │ │ ├── telegram_attachment.py │ │ │ ├── telegram_lead.py │ │ │ └── telegram_message.py │ │ ├── run_mode.py │ │ ├── samples │ │ │ ├── hi.mp3 │ │ │ ├── hi.oga │ │ │ ├── hi.ogg │ │ │ ├── message.json │ │ │ ├── message_audio.json │ │ │ ├── message_callback_query.json │ │ │ ├── message_img.json │ │ │ ├── message_imgx2.json │ │ │ ├── message_location.json │ │ │ ├── message_location_place.json │ │ │ └── sample_message.json │ │ └── telegram_connector.py │ ├── vapi │ │ ├── README.md │ │ ├── model │ │ │ ├── vapi_lead.py │ │ │ └── vapi_message.py │ │ ├── sample_data │ │ │ ├── chunk_vapi_1.json │ │ │ ├── chunk_vapi_2.json │ │ │ ├── chunk_vapi_3.json │ │ │ ├── chunk_vapi_last.json │ │ │ └── request.json │ │ ├── utils.py │ │ └── vapi_connector.py │ └── whatsapp │ │ ├── __init__.py │ │ ├── components │ │ ├── component.py │ │ ├── document.py │ │ ├── document_url.py │ │ ├── image.py │ │ ├── list_item.py │ │ ├── reply_button.py │ │ ├── text.py │ │ ├── utils.py │ │ └── video.py │ │ ├── constants.py │ │ ├── functions │ │ ├── buttons.py │ │ └── utils.py │ │ ├── model │ │ ├── media_utils.py │ │ ├── samples │ │ │ ├── img_caption_message.json │ │ │ ├── img_message.json │ │ │ ├── react_message.json │ │ │ └── text_message.json │ │ ├── whatsapp_attachment.py │ │ ├── whatsapp_lead.py │ │ └── whatsapp_message.py │ │ ├── phone_utils.py │ │ ├── utils.py │ │ └── whatsapp_connector.py ├── decoders │ └── gcp_geocoding.py ├── gateway │ ├── __init__.py │ ├── http_callbacks.py │ ├── message_gateway.py │ ├── model │ │ ├── __init__.py │ │ ├── attachment.py │ │ ├── base_connector.py │ │ ├── conversation_lead.py │ │ ├── conversation_peer.py │ │ ├── message.py │ │ ├── message_gateway_context.py │ │ ├── middleware.py │ │ └── outgoing │ │ │ ├── __init__.py │ │ │ ├── outgoing_message.py │ │ │ ├── outgoing_message_buttons.py │ │ │ ├── outgoing_message_factory.py │ │ │ ├── outgoing_message_link.py │ │ │ ├── outgoing_message_select.py │ │ │ └── outgoing_message_text.py │ └── utils.py ├── message_enhancers │ ├── default_message_enhancer.py │ └── smart_message_enhancer_openai.py ├── middlewares │ ├── __init__.py │ ├── chatwoot │ │ ├── chatwoot_client.py │ │ ├── conversation_manager.py │ │ ├── middleware.py │ │ ├── model.py │ │ └── phone_util.py │ ├── deepgram_stt.py │ ├── geodecoding.py │ ├── in_mem_blacklist.py │ ├── invitation_guard.py │ ├── moderation │ │ ├── llama3_guard_togetherai.py │ │ ├── moderation_events.py │ │ └── openai_mod_endpoint.py │ ├── redis_blacklist.py │ ├── redis_blacklist_async.py │ └── session_middleware.py ├── model │ └── common.py ├── prompt │ └── prompt_template.py ├── rag │ ├── __init__.py │ ├── providers │ │ ├── enhanced_rag.py │ │ ├── markdown_rag.py │ │ └── rag_retriever.py │ ├── slicers │ │ ├── __init__.py │ │ ├── base_slicer.py │ │ ├── markdown │ │ │ ├── __init__.py │ │ │ ├── markdown.py │ │ │ └── utils.py │ │ └── utils.py │ ├── stores │ │ ├── chroma │ │ │ └── chroma_store.py │ │ ├── mongo │ │ │ └── mongo_store.py │ │ └── vector_store.py │ └── text2vec │ │ ├── cache │ │ ├── base_cache.py │ │ ├── disk_cache.py │ │ └── redis_cache.py │ │ ├── cached_ollama.py │ │ ├── cached_openai.py │ │ └── utils.py ├── stores │ ├── common │ │ ├── async_cache_aside_redis.py │ │ ├── key_value_store.py │ │ ├── list_redis_store_async.py │ │ └── memory_cache.py │ ├── history │ │ ├── base_history_provider.py │ │ ├── history_inmemory_provider.py │ │ ├── history_redis_provider.py │ │ └── history_redis_provider_async.py │ └── state │ │ ├── base_state_provider.py │ │ ├── state_inmemory_provider.py │ │ └── state_redis_provider.py └── voice │ ├── base_voice_provider.py │ ├── deepgram_adapter.py │ └── elevenlabs_adapter.py ├── docs ├── assets │ ├── celia_fastapi.png │ ├── celia_logo.png │ ├── celia_overview1.png │ └── celia_stream_modes1.png ├── connectors │ ├── index.md │ ├── stream_mode.md │ ├── webhook_url.md │ └── whatsapp.md ├── getting_started.md ├── index.md ├── middlewares │ └── index.md ├── prompt │ └── index.md └── state │ └── state_manager.md ├── examples ├── 0_hello_world_cli │ └── assistant.py ├── 10_mongo_atlas_rag │ ├── assistant.py │ └── qa.md ├── 11_moderation_demo │ └── assistant.py ├── 12_insights │ └── assistant.py ├── 13_agentic_router_experimental │ ├── assistant.py │ ├── balance_agent.py │ └── transfer_agent.py ├── 14_logic_router_experimental │ ├── assistant.py │ ├── balance_agent.py │ └── onboarding.py ├── 15_voice │ └── assistant.py ├── 16_callbacks │ ├── assistant.py │ └── callbacks.py ├── 17_chatwoot_middleware │ ├── assistant.py │ └── callbacks.py ├── 18_enhancer_shortcuts │ ├── custom_message_enhancer_openai.py │ ├── telegram.py │ └── whatsapp.py ├── 1_1_hello_world_telegram_ngrok │ └── assistant.py ├── 1_hello_world_telegram │ └── assistant.py ├── 1_hello_world_whatsapp │ └── assistant.py ├── 2_qa_rag │ ├── assistant.py │ └── qa.md ├── 3_1_ollama │ ├── assistant.py │ ├── qa.md │ └── utils.py ├── 3_2_tooling_crypto │ ├── assistant.py │ └── util.py ├── 3_2_tooling_crypto_redis │ ├── assistant.py │ └── util.py ├── 3_3_tooling_tasks │ ├── assistant.py │ └── task_manager.py ├── 3_clerk_tooling │ ├── assistant.py │ └── qa.md ├── 4_1_state │ └── assistant.py ├── 4_events │ └── assistant.py ├── 5_session_middleware │ └── assistant.py ├── 6_vapi │ ├── assistant.py │ └── qa.md ├── 7_openrouter_models │ └── assistant.py ├── 8_openrouter_models_tooling_broken │ └── assistant.py ├── 9_invitation_sample │ └── assistant.py ├── delete_pickle_experiment.py ├── sample.md ├── simple_assistant.py ├── simple_assistant_cli_cmds.py └── smoothy.md ├── mkdocs.yml ├── poetry.lock ├── pyproject.toml └── tests ├── assistants ├── macaw │ ├── macaw_blend_test.py │ ├── macaw_history_adapter_test.py │ ├── macaw_history_trim.py │ ├── macaw_insights_test.py │ ├── macaw_new_message_test.py │ └── macaw_tooling_test.py └── stream_content_chunk_test.py ├── connectors └── registry.py ├── functions └── function_context_test.py ├── history ├── history_in_memory.py ├── history_redis.py └── history_redis_async.py ├── messages ├── lead │ ├── test_lead_serialization.py │ └── test_leads.py ├── message_adapter_test.py ├── outgoing │ └── test_factory.py ├── telegram │ ├── img_message.py │ ├── text_message.py │ └── voice_message.py ├── vapi │ └── message.py └── whatsapp │ ├── image_message.py │ ├── lead.py │ └── text_message.py ├── middlewares ├── in_mem_blacklist_test.py ├── llama_guard_moderation.py ├── openai_moderation.py ├── redis_blacklist_async_test.py └── redis_blacklist_test.py ├── prompt_template └── prompt_template.py ├── rag_stores └── chroma_store_test.py ├── slicers ├── markdown_test.py ├── sample.md └── smoothy.md ├── state └── state_manager_test.py ├── stores ├── common │ ├── async_cache_aside_redis_test.py │ ├── list_redis_store_async_test.py │ └── memory_cache_test.py └── state │ ├── state_inmemory_provider_test.py │ └── state_redis_provider_test.py ├── text2vec ├── ollama_test.py ├── ollama_test_redis.py ├── openai_test.py └── openai_test_redis.py └── voice └── deepgram_tests.py /.bumpversion.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 0.3.18 3 | commit = True 4 | tag = True 5 | 6 | [bumpversion:file:pyproject.toml] 7 | -------------------------------------------------------------------------------- /.clear_envs.sh: -------------------------------------------------------------------------------- 1 | # !#/bin/bash 2 | # this is a bash script to clear all the environment variables 3 | # to run this script, type in the terminal: source .clear_envs.sh 4 | # or you can also use the command: . .clear_envs.sh 5 | 6 | # clear all the environment variables 7 | unset REDIS_URL 8 | unset OPENAI_API_KEY 9 | unset OPENROUTER_API_KEY 10 | unset DEEPGRAM_API_KEY 11 | unset LANGCHAIN_TRACING_V2 12 | unset LANGCHAIN_API_KEY 13 | unset GOOGLE_GEOCODING_API_KEY 14 | unset WHATSAPP_TOKEN 15 | unset WHATSAPP_PHONE_NUMBER_ID 16 | unset WHATSAPP_DISPLAY_PHONE_NUMBER 17 | unset TELEGRAM_TOKEN 18 | unset WEBHOOK_URL 19 | unset HOST 20 | unset PORT 21 | 22 | # check if the environment variables are cleared 23 | echo "Environment variables are cleared" 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /.github/workflows/docs-deploy.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python 3 | # Poetry install: https://python-poetry.org/docs/cli/#install 4 | # https://stackoverflow.com/questions/77446605/running-python-poetry-unit-test-in-github-actions 5 | 6 | name: Docs Deploy 7 | 8 | on: 9 | push: 10 | branches: [ "docs" ] 11 | pull_request: 12 | branches: [ "docs" ] 13 | 14 | jobs: 15 | build: 16 | runs-on: ubuntu-latest 17 | environment: github-pages 18 | permissions: 19 | # IMPORTANT: this permission is mandatory 20 | contents: write 21 | 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | python-version: ["3.11"] 26 | 27 | steps: 28 | - uses: actions/checkout@v4 29 | - name: Set up Python ${{ matrix.python-version }} 30 | uses: actions/setup-python@v3 31 | with: 32 | python-version: ${{ matrix.python-version }} 33 | - name: Update pip 34 | run: python -m pip install --upgrade pip 35 | 36 | - name: Configure Git Credentials 37 | run: | 38 | git config user.name github-actions[bot] 39 | git config user.email 41898282+github-actions[bot]@users.noreply.github.com 40 | 41 | - name: Install MkDocs 42 | run: pip install mkdocs-material 43 | 44 | - name: Build and Deploy to GitHub Pages 45 | run: mkdocs gh-deploy --force 46 | 47 | # - name: Deploy to GitHub Pages 48 | # uses: peaceiris/actions-gh-pages@v3 49 | # with: 50 | # github_token: ${{ secrets.GITHUB_TOKEN }} 51 | # publish_dir: ./site 52 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | # execute the workflow on push events from any branch 5 | push: 6 | branches: 7 | - "*" 8 | 9 | pull_request: 10 | branches: 11 | - "*" 12 | 13 | 14 | 15 | jobs: 16 | run_test: 17 | name: Run tests 18 | runs-on: ubuntu-latest 19 | environment: dev 20 | 21 | strategy: 22 | fail-fast: false 23 | matrix: 24 | python-version: ["3.11"] 25 | 26 | steps: 27 | 28 | # # Exit if a commit is made by GitHub Actions bot 29 | # # Skip automatic updates. 30 | # # There is an action that updates dev branch with main branch 31 | # - name: Exit if triggered by GitHub Actions bot 32 | # run: | 33 | # if [ "${{ github.actor }}" = "github-actions[bot]" ]; then 34 | # echo "Commit made by GitHub Actions bot, exiting." 35 | # exit 0 36 | # fi 37 | 38 | - name: Checkout code 39 | uses: actions/checkout@v4 40 | 41 | - name: Set up Python ${{ matrix.python-version }} 42 | uses: actions/setup-python@v3 43 | with: 44 | python-version: ${{ matrix.python-version }} 45 | - name: Install dependencies 46 | run: | 47 | python -m pip install --upgrade pip 48 | python -m pip install flake8 pytest 49 | pip install poetry 50 | poetry install --with dev 51 | - name: Lint with flake8 52 | run: | 53 | # stop the build if there are Python syntax errors or undefined names 54 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 55 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 56 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 57 | - name: Test with pytest 58 | run: | 59 | poetry run pytest 60 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Python Debugger: Current File", 9 | "type": "debugpy", 10 | "request": "launch", 11 | "program": "${file}", 12 | "console": "integratedTerminal" 13 | }, 14 | // pytest current file 15 | { 16 | "name": "Python: Pytest Current File", 17 | "type": "debugpy", 18 | "request": "launch", 19 | "program": "${workspaceFolder}/.venv/bin/pytest", 20 | "args": [ 21 | "${file}" 22 | ], 23 | "console": "integratedTerminal", 24 | "justMyCode": false, 25 | "cwd": "${workspaceRoot}", 26 | "env": { 27 | "PYTHONPATH": "${workspaceFolder}" 28 | } 29 | }, 30 | 31 | // pytest run current test 32 | { 33 | "name": "Python: Pytest Current Test", 34 | "type": "debugpy", 35 | "request": "launch", 36 | "program": "${workspaceFolder}/.venv/bin/pytest", 37 | "args": [ 38 | "${file}::${selectedText}" 39 | ], 40 | "console": "integratedTerminal", 41 | "justMyCode": false, 42 | "cwd": "${workspaceRoot}", 43 | "env": { 44 | "PYTHONPATH": "${workspaceFolder}" 45 | } 46 | }, 47 | 48 | // pytest all 49 | { 50 | "name": "Python: Pytest All", 51 | "type": "debugpy", 52 | "request": "launch", 53 | "program": "${workspaceFolder}/.venv/bin/pytest", 54 | "args": [ 55 | "${workspaceFolder}/tests" 56 | ], 57 | "console": "integratedTerminal", 58 | "justMyCode": true, 59 | "cwd": "${workspaceRoot}", 60 | "env": { 61 | "PYTHONPATH": "${workspaceFolder}" 62 | } 63 | } 64 | ] 65 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Cel.ai 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /bump_version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | # 1. **Bump the Patch Version Without Publishing:** 5 | 6 | # If you want to bump the patch version but do not want to commit or tag the changes yet, you can run: 7 | # ```bash 8 | # ./bump_version.sh patch 9 | # ``` 10 | # This will update the version in `pyproject.toml` but will not commit or tag the changes. 11 | 12 | # 2. **Bump the Minor Version and Publish:** 13 | # To bump the minor version and also commit and tag the changes, use the `--publish` flag: 14 | # ```bash 15 | # ./bump_version.sh minor --publish 16 | # ``` 17 | # This will update the version, commit the changes with a message, create a tag, and push the tag to the remote repository. 18 | 19 | # 3. **Bump the Major Version Without Publishing:** 20 | # If you want to bump the major version but hold off on committing and tagging, you can run: 21 | # ```bash 22 | # ./bump_version.sh major 23 | # ``` 24 | # This will only update the version in `pyproject.toml`. 25 | 26 | # 4. **Bump the Major Version and Publish:** 27 | # To bump the major version and immediately commit and tag the changes, use: 28 | # ```bash 29 | # ./bump_version.sh major --publish 30 | # ``` 31 | # This will perform all the actions: version bump, commit, tag creation, and pushing the tag. 32 | 33 | # 5. **Invalid Usage:** 34 | # If you provide an invalid version type, the script will display a usage message: 35 | # ```bash 36 | # ./bump_version.sh invalid 37 | # ``` 38 | # Output: 39 | # ``` 40 | # Usage: ./bump_version.sh [patch|minor|major] [--publish] 41 | # ``` 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | # Set the default version bump type to 'patch' 50 | VERSION_TYPE=${1:-patch} 51 | 52 | # Check if the provided argument is valid 53 | if [[ ! "$VERSION_TYPE" =~ ^(patch|minor|major)$ ]]; then 54 | echo "Usage: $0 [patch|minor|major] [--publish]" 55 | exit 1 56 | fi 57 | 58 | # Bump the version using the specified type 59 | poetry version $VERSION_TYPE 60 | 61 | # Get the new version 62 | NEW_VERSION=$(poetry version -s) 63 | 64 | # Check if the --publish flag is present 65 | PUBLISH=false 66 | for arg in "$@"; do 67 | if [[ "$arg" == "--publish" ]]; then 68 | PUBLISH=true 69 | break 70 | fi 71 | done 72 | 73 | # Commit and create a tag if --publish is present 74 | if $PUBLISH; then 75 | git add pyproject.toml 76 | git commit -m "Bump version to $NEW_VERSION" 77 | git tag "v$NEW_VERSION" 78 | git push origin --tags 79 | else 80 | echo "Version bumped to $NEW_VERSION, but not committed or tagged. Use --publish to commit and tag." 81 | fi -------------------------------------------------------------------------------- /cel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/__init__.py -------------------------------------------------------------------------------- /cel/assets/celai_connectors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/assets/celai_connectors.png -------------------------------------------------------------------------------- /cel/assets/celai_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/assets/celai_diagram.png -------------------------------------------------------------------------------- /cel/assets/celai_middlewares.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/assets/celai_middlewares.png -------------------------------------------------------------------------------- /cel/assets/celai_router_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/assets/celai_router_diagram.png -------------------------------------------------------------------------------- /cel/assets/celia_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/assets/celia_logo.png -------------------------------------------------------------------------------- /cel/assets/celia_overview1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/assets/celia_overview1.png -------------------------------------------------------------------------------- /cel/assistants/common.py: -------------------------------------------------------------------------------- 1 | 2 | from abc import ABC 3 | from dataclasses import dataclass 4 | 5 | 6 | @dataclass 7 | class Param(ABC): 8 | """ Tool/Function parameter definition, mind that the enum and required 9 | fields may not be present in models other than the OpenAI. """ 10 | name: str 11 | type: str 12 | description: str 13 | required: bool = True 14 | enum: list[str] = None 15 | 16 | @dataclass 17 | class FunctionDefinition(ABC): 18 | name: str 19 | description: str 20 | parameters: list[Param] 21 | 22 | @dataclass 23 | class EventResponse(ABC): 24 | # text: str = None 25 | # image: str = None 26 | # audio: str = None 27 | # video: str = None 28 | disable_ai_response: bool = False 29 | # blend: bool = False 30 | # is_private: bool = False 31 | # append_to_history: bool = True 32 | -------------------------------------------------------------------------------- /cel/assistants/function_response.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Callable 3 | 4 | 5 | class RequestMode: 6 | STREAM = "stream" 7 | SINGLE = "single" 8 | 9 | 10 | @dataclass 11 | class FunctionResponse: 12 | text: str 13 | 14 | """ the execution function response will be sent as a stream of chunks or as a single response """ 15 | request_mode: str = RequestMode.SINGLE 16 | 17 | """ If you want to disable direct response from Asiistant AI, use callback to handle the response. 18 | It is useful when you want to handle the response in a different way, like sending the response to a different service. 19 | Another use case is when oyu need to use specific platform features to send the response, like interactives messages, buttons, etc. 20 | """ 21 | callback: Callable = None 22 | 23 | -------------------------------------------------------------------------------- /cel/assistants/macaw/custom_chat_models/chat_open_router.py: -------------------------------------------------------------------------------- 1 | # https://medium.com/@gal.peretz/openrouter-langchain-leverage-opensource-models-without-the-ops-hassle-9ffbf0016da7 2 | 3 | import os 4 | from typing import Optional 5 | # from langchain_community.chat_models import ChatOpenAI 6 | from langchain_openai import ChatOpenAI 7 | from langchain_core.prompts import ChatPromptTemplate 8 | 9 | 10 | OPENROUTER_API_BASE = "https://openrouter.ai/api/v1" 11 | 12 | 13 | # TODO: OpenRouter Models has problems with Langchain tooling 14 | # This LLM model wrapper throws an error when used with Langchain tooling 15 | # specifically with bind_tools. The error is due to the fact that the 16 | # OpenRouter models do not support tooling??? I am not sure about this. 17 | # -------------------------------------------------------------------- 18 | class ChatOpenRouter(ChatOpenAI): 19 | openai_api_base: str 20 | openai_api_key: str 21 | model_name: str 22 | 23 | def __init__(self, 24 | model: str, 25 | openai_api_key: Optional[str] = None, 26 | openai_api_base: str = OPENROUTER_API_BASE, 27 | **kwargs): 28 | openai_api_key = openai_api_key or os.getenv('OPENROUTER_API_KEY') 29 | super().__init__(openai_api_base=openai_api_base, 30 | openai_api_key=openai_api_key, 31 | model_name=model, **kwargs) 32 | 33 | 34 | def ChatOpenAIOpenRouter(**kwargs): 35 | return ChatOpenAI( 36 | **kwargs, 37 | openai_api_base=OPENROUTER_API_BASE, 38 | openai_api_key = os.getenv('OPENROUTER_API_KEY') 39 | ) 40 | # -------------------------------------------------------------------- 41 | 42 | 43 | -------------------------------------------------------------------------------- /cel/assistants/macaw/macaw_history_adapter.py: -------------------------------------------------------------------------------- 1 | from cel.gateway.model.conversation_lead import ConversationLead 2 | from langchain.load.dump import dumpd 3 | from langchain.load.load import load 4 | from langchain_core.messages import BaseMessage 5 | from cel.stores.history.base_history_provider import BaseHistoryProvider 6 | 7 | 8 | class MacawHistoryAdapter: 9 | def __init__(self, store: BaseHistoryProvider): 10 | self.store = store 11 | 12 | async def append_to_history(self, lead: ConversationLead, entry: BaseMessage, metadata=None, ttl=None): 13 | assert isinstance(lead, ConversationLead), f"Expected ConversationLead, got {type (lead)}" 14 | aux = dumpd(entry) 15 | await self.store.append_to_history(lead.get_session_id(), aux, metadata, ttl) 16 | 17 | async def get_history(self, lead: ConversationLead) -> list[BaseMessage]: 18 | assert isinstance(lead, ConversationLead), f"Expected ConversationLead, got {type (lead)}" 19 | history = await self.store.get_history(lead.get_session_id()) 20 | return [load(h) for h in history] 21 | 22 | 23 | async def clear_history(self, lead: ConversationLead, keep_last_messages=None): 24 | assert isinstance(lead, ConversationLead), f"Expected ConversationLead, got {type (lead)}" 25 | await self.store.clear_history(lead.get_session_id(), keep_last_messages) 26 | 27 | async def get_last_messages(self, lead: ConversationLead, count) -> list[BaseMessage]: 28 | assert isinstance(lead, ConversationLead), f"Expected ConversationLead, got {type (lead)}" 29 | msgs = await self.store.get_last_messages(lead.get_session_id(), count) 30 | return [load(m) for m in msgs] 31 | 32 | async def close_conversation(self, lead: ConversationLead): 33 | raise NotImplementedError -------------------------------------------------------------------------------- /cel/assistants/macaw/macaw_inference_context.py: -------------------------------------------------------------------------------- 1 | from cel.assistants.common import FunctionDefinition 2 | from cel.assistants.macaw.macaw_settings import MacawSettings 3 | from cel.gateway.model.conversation_lead import ConversationLead 4 | from cel.prompt.prompt_template import PromptTemplate 5 | from cel.rag.providers.rag_retriever import RAGRetriever 6 | from cel.stores.history.base_history_provider import BaseHistoryProvider 7 | from cel.stores.state.base_state_provider import BaseChatStateProvider 8 | 9 | 10 | from abc import ABC 11 | from dataclasses import dataclass 12 | from typing import Any 13 | 14 | 15 | @dataclass 16 | class MacawNlpInferenceContext(ABC): 17 | lead: ConversationLead 18 | settings: MacawSettings 19 | prompt: PromptTemplate = '' 20 | init_state: dict = None 21 | local_state: dict = None 22 | functions: list[FunctionDefinition] = None 23 | rag_retriever: RAGRetriever = None 24 | # default value {} is used to avoid mutable default arguments 25 | llm_kwargs: dict[str, Any] = None 26 | history_store: BaseHistoryProvider = None 27 | state_store: BaseChatStateProvider = None 28 | llm: Any = None -------------------------------------------------------------------------------- /cel/assistants/macaw/macaw_test_chat_model.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from dataclasses import dataclass 3 | import json 4 | from loguru import logger as log 5 | from langchain_openai import ChatOpenAI 6 | from langchain_core.messages import HumanMessage, ToolMessage, AIMessageChunk 7 | from langchain_core.messages import ( 8 | SystemMessage, 9 | message_to_dict, 10 | messages_from_dict, 11 | ) 12 | from langsmith import traceable 13 | 14 | DEFAULT_PROMPT = "Create an assistant funny and sarcastic, dark humor chatbot." 15 | LLM_DEFAULT_KWARGS = { 16 | "model": "gpt-4o", 17 | "temperature": 0, 18 | "max_tokens": None, 19 | "timeout": 20, 20 | "max_retries": 3, 21 | "streaming": True, 22 | "verbose": True 23 | } 24 | 25 | @dataclass 26 | class MacawFunctionCall: 27 | name: str 28 | args: dict 29 | id: str 30 | 31 | 32 | chat_model_provider = ChatOpenAI 33 | 34 | # merge kwargs 35 | llm = chat_model_provider( 36 | **{**LLM_DEFAULT_KWARGS} 37 | ) -------------------------------------------------------------------------------- /cel/assistants/request_context.py: -------------------------------------------------------------------------------- 1 | from cel.assistants.context import Context 2 | 3 | class RequestContext(Context): 4 | pass -------------------------------------------------------------------------------- /cel/assistants/router/utils.py: -------------------------------------------------------------------------------- 1 | from cel.gateway.model.conversation_lead import ConversationLead 2 | from cel.stores.history.base_history_provider import BaseHistoryProvider 3 | from langchain.load.load import load 4 | 5 | 6 | 7 | async def build_router_query (history_store: BaseHistoryProvider, lead: ConversationLead, text: str, length: int = 5): 8 | history = await history_store.get_history(lead.get_session_id()) 9 | # Create a list of the last N messages without tools and tool_calls 10 | messages = [] 11 | for h in history: 12 | aux = load(h) 13 | role = aux.type 14 | text = aux.content 15 | 16 | # Skip tools and tool_calls 17 | if role == "tool" or role == "tool_call": 18 | continue 19 | 20 | messages.append({ 21 | "role": role, 22 | "text": text 23 | }) 24 | 25 | # Get the last N messages 26 | last_messages = messages[-length:] 27 | return last_messages 28 | 29 | 30 | -------------------------------------------------------------------------------- /cel/assistants/state_manager.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Any 3 | from loguru import logger as log 4 | from cel.gateway.model.conversation_lead import ConversationLead 5 | from cel.stores.state.base_state_provider import BaseChatStateProvider 6 | 7 | 8 | class AsyncStateManager: 9 | 10 | def __init__(self, 11 | lead: ConversationLead, 12 | store: BaseChatStateProvider, 13 | commit_on_error: bool = False 14 | ): 15 | """ StateManager is a context manager to manage the state of a conversation. 16 | 17 | :param lead: ConversationLead 18 | :param store: BaseChatStateProvider 19 | :param commit_on_error: if True, the state will be saved on error, default is False 20 | """ 21 | 22 | if not isinstance(store, BaseChatStateProvider): 23 | raise ValueError("StateManager: store must be an instance of BaseChatStateProvider") 24 | if not isinstance(lead, ConversationLead): 25 | raise ValueError("StateManager: lead must be an instance of ConversationLead") 26 | 27 | self.store = store 28 | self.lead = lead 29 | self.state = {} 30 | self.commit_on_error = commit_on_error 31 | 32 | 33 | async def __aenter__(self): 34 | await self.load_state() 35 | return self 36 | 37 | async def __aexit__(self, exc_type, exc_value, traceback): 38 | if exc_type is not None: 39 | log.error(f"StateManager: error on context execution {exc_type} {exc_value}") 40 | if self.commit_on_error: 41 | await self.save_state() 42 | # Return False to raise the exception 43 | return False 44 | else: 45 | await self.save_state() 46 | return True 47 | 48 | 49 | async def load_state(self): 50 | self.state = await self.store.get_store(self.lead.get_session_id()) or {} 51 | return self.state 52 | 53 | async def save_state(self): 54 | await self.store.set_store(self.lead.get_session_id(), self.state) 55 | 56 | def get(self, key, default=None): 57 | return self.state.get(key, default) 58 | 59 | def set(self, key, value): 60 | self.state[key] = value 61 | 62 | def __getitem__(self, key): 63 | return self.state.get(key) 64 | 65 | def __setitem__(self, key, value): 66 | self.state[key] = value 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /cel/assistants/stream_content_chunk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Any 3 | 4 | 5 | @dataclass 6 | class StreamContentChunk: 7 | content: str 8 | is_partial: bool = True 9 | 10 | 11 | def __add__(self, other: Any) -> "StreamContentChunk": 12 | assert isinstance(other, StreamContentChunk),\ 13 | "StreamContentChunk can only be added to another StreamContentChunk" 14 | 15 | return self.__class__( 16 | content=self.content + other.content, 17 | is_partial=other.is_partial 18 | ) 19 | 20 | def __str__(self): 21 | return self.content -------------------------------------------------------------------------------- /cel/cache.py: -------------------------------------------------------------------------------- 1 | import threading 2 | from diskcache import Cache 3 | import functools 4 | from cel.config import CACHE_DEFAULT_SETTINGS, CACHE_DIRECTORY 5 | 6 | 7 | 8 | def singleton(func): 9 | @functools.wraps(func) 10 | def wrapper(*args, **kwargs): 11 | if wrapper.instance is None: 12 | with wrapper.lock: 13 | if wrapper.instance is None: 14 | wrapper.instance = func(*args, **kwargs) 15 | return wrapper.instance 16 | 17 | wrapper.lock = threading.Lock() 18 | wrapper.instance = None 19 | return wrapper 20 | 21 | @singleton 22 | def get_cache(directory: str = CACHE_DIRECTORY, **settings): 23 | return Cache(directory, **{**CACHE_DEFAULT_SETTINGS, **settings}) -------------------------------------------------------------------------------- /cel/comms/client_command.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | 4 | @dataclass 5 | class ClientCommand: 6 | command: str 7 | args: list 8 | 9 | 10 | def parse_client_command(message: str) -> ClientCommand: 11 | """ Will try to match a client command inside a message. A client command begins with a '/' character. 12 | for example /reset all will be parsed as command='reset' and args={'all'} 13 | Another example /phone set +1232342342 will be parsed as command='phone' and args={'set', '+1232342342'} 14 | """ 15 | if message.startswith("/"): 16 | parts = message.split(" ") 17 | command = parts[0][1:] 18 | args = [] 19 | if len(parts) > 1: 20 | args = parts[1:] 21 | return ClientCommand(command, args) 22 | 23 | else: 24 | return None 25 | 26 | 27 | 28 | if __name__ == "__main__": 29 | print(parse_client_command("/reset all")) 30 | print(parse_client_command("/phone set +1232342342")) 31 | 32 | 33 | -------------------------------------------------------------------------------- /cel/comms/utils.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import hashlib 3 | import json 4 | from loguru import logger as log 5 | 6 | def get_invariant_hash(obj): 7 | """ 8 | Returns an MD5 hash of a JSON-serialized copy of the input object, with the 'metadata' field removed. 9 | This hash can be used to check if two objects are equivalent, regardless of their metadata. 10 | """ 11 | 12 | # Copiar el objeto para evitar modificar el original 13 | obj_copy = obj.to_dict() 14 | 15 | # Eliminar el campo 'metadata' del objeto copiado 16 | if 'metadata' in obj_copy: 17 | del obj_copy['metadata'] 18 | 19 | # Serializar el objeto en formato JSON 20 | json_str = json.dumps(obj_copy, sort_keys=True) 21 | 22 | # Generar un hash MD5 del JSON serializado 23 | md5_hash = hashlib.md5(json_str.encode('utf-8')).hexdigest() 24 | 25 | return md5_hash 26 | 27 | 28 | def async_run(coro, then=None): 29 | try: 30 | loop = asyncio.get_running_loop() 31 | except RuntimeError: # 'RuntimeError: There is no current event loop...' 32 | loop = None 33 | 34 | if loop and loop.is_running(): 35 | log.debug('Async event loop already running. Adding coroutine to the event loop.') 36 | tsk = loop.create_task(coro) 37 | # ^-- https://docs.python.org/3/library/asyncio-task.html#task-object 38 | # Optionally, a callback function can be executed when the coroutine completes 39 | # tsk.add_done_callback(lambda t: print(f'Task done with result={t.result()} << return val of main()')) 40 | tsk.add_done_callback(lambda t: then(t.result())) 41 | 42 | else: 43 | log.debug('Starting new event loop') 44 | result = asyncio.run(coro) 45 | 46 | 47 | -------------------------------------------------------------------------------- /cel/config.py: -------------------------------------------------------------------------------- 1 | CACHE_DIRECTORY = ".cache" 2 | 3 | CACHE_DEFAULT_SETTINGS = { 4 | 'statistics': 0, # False 5 | 'tag_index': 0, # False 6 | 'eviction_policy': 'least-recently-stored', 7 | 'size_limit': 2**30, # 1gb 8 | 'cull_limit': 10, 9 | 'sqlite_auto_vacuum': 1, # FULL 10 | 'sqlite_cache_size': 2**13, # 8,192 pages 11 | 'sqlite_journal_mode': 'wal', 12 | 'sqlite_mmap_size': 2**26, # 64mb 13 | 'sqlite_synchronous': 1, # NORMAL 14 | 'disk_min_file_size': 2**15, # 32kb 15 | } -------------------------------------------------------------------------------- /cel/connectors/cli/model/cli_lead.py: -------------------------------------------------------------------------------- 1 | import os 2 | from cel.gateway.model.conversation_lead import ConversationLead 3 | from cel.gateway.model.conversation_peer import ConversationPeer 4 | 5 | 6 | class CliLead(ConversationLead): 7 | 8 | def __init__(self, **kwargs): 9 | super().__init__(**kwargs) 10 | # get current process id from os 11 | self.process_id = os.getpid() 12 | 13 | 14 | def get_session_id(self): 15 | return f"{self.connector_name}:{self.process_id}" 16 | 17 | def to_dict(self): 18 | data = super().to_dict() 19 | data['process_id'] = self.process_id 20 | return data 21 | 22 | @classmethod 23 | def from_dict(cls, lead_dict): 24 | return CliLead() 25 | 26 | 27 | def __str__(self): 28 | return f"CliLead: {self.process_id}" 29 | 30 | 31 | @classmethod 32 | def from_message(cls, **kwargs): 33 | conversation_peer = ConversationPeer( 34 | name='terminal', 35 | id=os.getpid(), 36 | phone=None, 37 | avatarUrl=None, 38 | email=None 39 | ) 40 | return CliLead(conversation_from=conversation_peer, **kwargs) -------------------------------------------------------------------------------- /cel/connectors/cli/model/cli_message.py: -------------------------------------------------------------------------------- 1 | import time 2 | from cel.connectors.cli.model.cli_lead import CliLead 3 | from cel.gateway.model.base_connector import BaseConnector 4 | from cel.connectors.telegram.model.telegram_attachment import TelegramAttachment 5 | from cel.connectors.telegram.model.telegram_lead import TelegramLead 6 | from cel.gateway.model.conversation_lead import ConversationLead 7 | from cel.gateway.model.message import Message 8 | 9 | 10 | class CliMessage(Message): 11 | 12 | def __init__(self, 13 | lead: ConversationLead, 14 | text: str = None, 15 | metadata: dict = None, 16 | date: int = None, 17 | attachments: list[TelegramAttachment] = None 18 | ): 19 | super().__init__(lead, 20 | text=text, 21 | date=date, 22 | metadata=metadata, 23 | attachments=attachments) 24 | 25 | 26 | def is_voice_message(self): 27 | return False 28 | 29 | @classmethod 30 | async def load_from_message(cls, 31 | message: str, 32 | connector: BaseConnector = None): 33 | 34 | # get text from message or caption if it is a media message 35 | text = message 36 | date = int(time.time()) 37 | lead = CliLead.from_message(connector=connector) 38 | return CliMessage(lead=lead, 39 | text=text, 40 | date=date) 41 | 42 | 43 | def __str__(self): 44 | return f"CliMessage: {self.text}" 45 | 46 | def __repr__(self): 47 | return f"CliMessage: {self.text}" 48 | 49 | -------------------------------------------------------------------------------- /cel/connectors/livekit/model/livekit_lead.py: -------------------------------------------------------------------------------- 1 | from cel.gateway.model.conversation_lead import ConversationLead 2 | from cel.gateway.model.conversation_peer import ConversationPeer 3 | 4 | 5 | class LiveKitLead(ConversationLead): 6 | """ LiveKitLead class.""" 7 | 8 | def __init__(self, call_object: dict, **kwargs): 9 | super().__init__(connector_name="livekit", **kwargs) 10 | self.call_object: dict = call_object 11 | 12 | def get_session_id(self): 13 | return f"{self.connector_name}:{self.call_object['session_id']}" 14 | 15 | @classmethod 16 | def from_message(cls, message: dict, **kwargs): 17 | conversation_peer = ConversationPeer( 18 | name='Unknown', 19 | id=message['session_id'], 20 | phone=None, 21 | avatarUrl=None, 22 | email=None, 23 | ) 24 | return LiveKitLead(call_object=message, conversation_from=conversation_peer, **kwargs) 25 | -------------------------------------------------------------------------------- /cel/connectors/livekit/model/livekit_message.py: -------------------------------------------------------------------------------- 1 | from loguru import logger as log 2 | from cel.gateway.model.message import Message 3 | from cel.gateway.model.conversation_lead import ConversationLead 4 | from cel.gateway.model.base_connector import BaseConnector 5 | from cel.connectors.livekit.model.livekit_lead import LiveKitLead 6 | 7 | class LiveKitMessage(Message): 8 | """ 9 | LiveKitMessage class to represent a message in the LiveKit connector. 10 | """ 11 | 12 | def __init__(self, lead: ConversationLead, text: str = None): 13 | """ 14 | Initialize the LiveKitMessage instance. 15 | 16 | Args: 17 | lead (ConversationLead): The lead associated with the message. 18 | text (str): The text of the message. 19 | """ 20 | super().__init__(lead, text=text) 21 | 22 | def is_voice_message(self): 23 | return False 24 | 25 | @classmethod 26 | async def load_from_message(cls, request, connector: BaseConnector = None): 27 | """ 28 | Load a LiveKitMessage from a message dictionary. 29 | 30 | Args: 31 | message_dict (dict): The message dictionary. 32 | token (str): The token for authentication. 33 | connector: The connector instance. 34 | 35 | Returns: 36 | LiveKitMessage: The loaded LiveKitMessage instance. 37 | """ 38 | log.debug(f"[LiveKit] Raw request in load_from_message: {request}") 39 | user_message = request.get("user_text") 40 | 41 | if not user_message: 42 | raise ValueError("No message found in the message_dict") 43 | 44 | lead = LiveKitLead.from_message(request, connector=connector) 45 | return LiveKitMessage(lead=lead, text=user_message) 46 | 47 | def __str__(self): 48 | return f"LiveKitMessage: {self.text}" 49 | 50 | def __repr__(self): 51 | return f"LiveKitMessage: {self.text}" 52 | -------------------------------------------------------------------------------- /cel/connectors/telegram/__init__.py: -------------------------------------------------------------------------------- 1 | from .telegram_connector import TelegramConnector 2 | from .model.telegram_lead import TelegramLead 3 | from .model.telegram_message import TelegramMessage 4 | from .model.telegram_attachment import TelegramAttachment -------------------------------------------------------------------------------- /cel/connectors/telegram/model/telegram_lead.py: -------------------------------------------------------------------------------- 1 | from cel.gateway.model.conversation_lead import ConversationLead 2 | from cel.gateway.model.conversation_peer import ConversationPeer 3 | 4 | 5 | class TelegramLead(ConversationLead): 6 | 7 | def __init__(self, chat_id: str, **kwargs): 8 | super().__init__(**kwargs) 9 | self.chat_id: str = str(chat_id) 10 | 11 | 12 | def get_session_id(self): 13 | return f"{self.connector_name}:{self.chat_id}" 14 | 15 | def to_dict(self): 16 | data = super().to_dict() 17 | data['chat_id'] = self.chat_id 18 | return data 19 | 20 | @classmethod 21 | def from_dict(cls, lead_dict): 22 | return TelegramLead( 23 | chat_id=lead_dict.get("chat_id"), 24 | metadata=lead_dict.get("metadata"), 25 | connector_name=lead_dict.get("connector_name") if lead_dict.get("connector_name") else None, 26 | ) 27 | 28 | def __str__(self): 29 | return f"TelegramLead: {self.chat_id}" 30 | 31 | 32 | @classmethod 33 | def from_telegram_message(cls, message: dict, **kwargs): 34 | chat_id = str(message['chat']['id']) 35 | metadata = { 36 | 'message_id': str(message['message_id']), 37 | 'date': message['date'], 38 | 'raw': message 39 | } 40 | conversation_peer = ConversationPeer( 41 | name=message['from']['first_name'], 42 | id=str(message['from']['id']), 43 | phone=None, 44 | avatarUrl=None, 45 | email=None 46 | ) 47 | return TelegramLead(chat_id=chat_id, metadata=metadata, conversation_from=conversation_peer, **kwargs) -------------------------------------------------------------------------------- /cel/connectors/telegram/model/telegram_message.py: -------------------------------------------------------------------------------- 1 | from cel.gateway.model.base_connector import BaseConnector 2 | from cel.connectors.telegram.model.telegram_attachment import TelegramAttachment 3 | from cel.connectors.telegram.model.telegram_lead import TelegramLead 4 | from cel.gateway.model.conversation_lead import ConversationLead 5 | from cel.gateway.model.message import Message 6 | 7 | 8 | class TelegramMessage(Message): 9 | 10 | def __init__(self, 11 | lead: ConversationLead, 12 | text: str = None, 13 | metadata: dict = None, 14 | date: int = None, 15 | attachments: list[TelegramAttachment] = None 16 | ): 17 | super().__init__(lead, text=text, date=date, metadata=metadata, attachments=attachments) 18 | 19 | 20 | def is_voice_message(self): 21 | # check if the message has a voice attachment 22 | if self.attachments: 23 | for attach in self.attachments: 24 | if attach.type == "voice": 25 | return True 26 | return False 27 | 28 | @classmethod 29 | async def load_from_message(cls, message_dict, token: str, connector: BaseConnector = None): 30 | msg = message_dict.get("message") 31 | # get text from message or caption if it is a media message 32 | text = msg.get("text") or msg.get("caption") 33 | 34 | # if text begins with /start, it is a command 35 | if text and text.startswith("/start"): 36 | # decode the arguments 37 | args = text.split(" ")[1:] 38 | # decode string from base64 39 | import base64 40 | text = base64.b64decode(args[0]).decode("utf-8") 41 | 42 | date = msg.get("date") 43 | metadata = {'raw': msg} 44 | lead = TelegramLead.from_telegram_message(msg, connector=connector) 45 | attach = await TelegramAttachment.load_from_message(message_dict, token) 46 | return TelegramMessage(lead=lead, text=text, date=date, metadata=metadata, attachments=[attach] if attach else None) 47 | 48 | 49 | def __str__(self): 50 | return f"TelegramMessage: {self.text}" 51 | 52 | def __repr__(self): 53 | return f"TelegramMessage: {self.text}" 54 | 55 | -------------------------------------------------------------------------------- /cel/connectors/telegram/run_mode.py: -------------------------------------------------------------------------------- 1 | class RunMode: 2 | """ 3 | A class to represent the different modes of running a Telegram connector. 4 | 5 | Attributes: 6 | ----------- 7 | WEBHOOK : str 8 | A constant representing the webhook mode. 9 | POLLING : str 10 | A constant representing the polling mode. 11 | 12 | Methods: 13 | -------- 14 | get_modes(): 15 | Returns a list of all available run modes. 16 | is_valid(mode: str): 17 | Checks if the provided mode is a valid run mode. 18 | get_mode(mode: str): 19 | Returns the mode if it is valid, otherwise raises an exception. 20 | get_default(): 21 | Returns the default run mode. 22 | """ 23 | 24 | WEBHOOK = "webhook" 25 | POLLING = "polling" 26 | 27 | @staticmethod 28 | def get_modes(): 29 | """ 30 | Returns a list of all available run modes. 31 | 32 | Returns: 33 | list: A list containing all available run modes. 34 | """ 35 | return [RunMode.WEBHOOK, RunMode.POLLING] 36 | 37 | @staticmethod 38 | def is_valid(mode: str): 39 | """ 40 | Checks if the provided mode is a valid run mode. 41 | 42 | Parameters: 43 | mode (str): The mode to be checked. 44 | 45 | Returns: 46 | bool: True if the mode is valid, False otherwise. 47 | """ 48 | return mode in RunMode.get_modes() 49 | 50 | @staticmethod 51 | def get_mode(mode: str): 52 | """ 53 | Returns the mode if it is valid, otherwise raises an exception. 54 | 55 | Parameters: 56 | mode (str): The mode to be returned. 57 | 58 | Returns: 59 | str: The valid mode. 60 | 61 | Raises: 62 | Exception: If the mode is not valid. 63 | """ 64 | if RunMode.is_valid(mode): 65 | return mode 66 | else: 67 | raise Exception(f"Invalid run mode: {mode}. Valid modes are: {RunMode.get_modes()}") 68 | 69 | @staticmethod 70 | def get_default(): 71 | """ 72 | Returns the default run mode. 73 | 74 | Returns: 75 | str: The default run mode. 76 | """ 77 | return RunMode.WEBHOOK -------------------------------------------------------------------------------- /cel/connectors/telegram/samples/hi.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/connectors/telegram/samples/hi.mp3 -------------------------------------------------------------------------------- /cel/connectors/telegram/samples/hi.oga: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/connectors/telegram/samples/hi.oga -------------------------------------------------------------------------------- /cel/connectors/telegram/samples/hi.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/connectors/telegram/samples/hi.ogg -------------------------------------------------------------------------------- /cel/connectors/telegram/samples/message.json: -------------------------------------------------------------------------------- 1 | { 2 | "update_id": 169216955, 3 | "message": { 4 | "message_id": 144, 5 | "from": { 6 | "id": 13201419, 7 | "is_bot": false, 8 | "first_name": "John", 9 | "last_name": "Doe", 10 | "language_code": "en" 11 | }, 12 | "chat": { 13 | "id": 13201419, 14 | "first_name": "John", 15 | "last_name": "Doe", 16 | "type": "private" 17 | }, 18 | "date": 1687732679, 19 | "text": "asd" 20 | } 21 | } -------------------------------------------------------------------------------- /cel/connectors/telegram/samples/message_audio.json: -------------------------------------------------------------------------------- 1 | { 2 | "update_id": 794947863, 3 | "message": { 4 | "message_id": 647, 5 | "from": { 6 | "id": 1320141, 7 | "is_bot": false, 8 | "first_name": "John", 9 | "username": "foobar", 10 | "language_code": "en" 11 | }, 12 | "chat": { 13 | "id": 1320141, 14 | "first_name": "John", 15 | "username": "foobar", 16 | "type": "private" 17 | }, 18 | "date": 1717182488, 19 | "voice": { 20 | "duration": 2, 21 | "mime_type": "audio/ogg", 22 | "file_id": "AwACAgEAAxkBAAICh2ZaIBgQeMjTu4_DOlmioRlXy6PGAAKeBQACWnjQRhHL8mHBhl-FNQQ", 23 | "file_unique_id": "AgADngUAAlp40EY", 24 | "file_size": 8537 25 | } 26 | } 27 | } -------------------------------------------------------------------------------- /cel/connectors/telegram/samples/message_callback_query.json: -------------------------------------------------------------------------------- 1 | { 2 | "update_id": 169216979, 3 | "callback_query": { 4 | "id": "5669966673157250073", 5 | "from": { 6 | "id": 13201419, 7 | "is_bot": false, 8 | "first_name": "John", 9 | "last_name": "Doe", 10 | "language_code": "en" 11 | }, 12 | "message": { 13 | "message_id": 193, 14 | "from": { 15 | "id": 6180395732, 16 | "is_bot": true, 17 | "first_name": "lola_chatwoot_test", 18 | "username": "lola_chatwoot_test_bot" 19 | }, 20 | "chat": { 21 | "id": 13201419, 22 | "first_name": "John", 23 | "last_name": "Doe", 24 | "type": "private" 25 | }, 26 | "date": 1687773892, 27 | "text": "Select an option", 28 | "reply_markup": { 29 | "inline_keyboard": [ 30 | [ 31 | { 32 | "text": "Option 1", 33 | "callback_data": "option1" 34 | }, 35 | { 36 | "text": "Option 2", 37 | "callback_data": "option2" 38 | }, 39 | { 40 | "text": "Option 3", 41 | "callback_data": "option3" 42 | } 43 | ] 44 | ] 45 | } 46 | }, 47 | "chat_instance": "-4004316133490442129", 48 | "data": "option3" 49 | } 50 | } -------------------------------------------------------------------------------- /cel/connectors/telegram/samples/message_img.json: -------------------------------------------------------------------------------- 1 | { 2 | "update_id": 169216960, 3 | "message": { 4 | "message_id": 153, 5 | "from": { 6 | "id": 13201419, 7 | "is_bot": false, 8 | "first_name": "John", 9 | "last_name": "Doe", 10 | "language_code": "en" 11 | }, 12 | "chat": { 13 | "id": 13201419, 14 | "first_name": "John", 15 | "last_name": "Doe", 16 | "type": "private" 17 | }, 18 | "date": 1687752535, 19 | "photo": [ 20 | { 21 | "file_id": "AgACAgEAAxkBAAOZZJkPV95t1HVuywGHf8jzChMnjrQAAjqrMRvbRtBE0nrkCljiLKcBAAMCAANzAAMvBA", 22 | "file_unique_id": "AQADOqsxG9tG0ER4", 23 | "file_size": 1562, 24 | "width": 90, 25 | "height": 90 26 | }, 27 | { 28 | "file_id": "AgACAgEAAxkBAAOZZJkPV95t1HVuywGHf8jzChMnjrQAAjqrMRvbRtBE0nrkCljiLKcBAAMCAANtAAMvBA", 29 | "file_unique_id": "AQADOqsxG9tG0ERy", 30 | "file_size": 14487, 31 | "width": 320, 32 | "height": 320 33 | }, 34 | { 35 | "file_id": "AgACAgEAAxkBAAOZZJkPV95t1HVuywGHf8jzChMnjrQAAjqrMRvbRtBE0nrkCljiLKcBAAMCAAN4AAMvBA", 36 | "file_unique_id": "AQADOqsxG9tG0ER9", 37 | "file_size": 21976, 38 | "width": 512, 39 | "height": 512 40 | } 41 | ], 42 | "caption": "Esta es una foto😍" 43 | } 44 | } -------------------------------------------------------------------------------- /cel/connectors/telegram/samples/message_imgx2.json: -------------------------------------------------------------------------------- 1 | { 2 | "update_id": 169216962, 3 | "message": { 4 | "message_id": 157, 5 | "from": { 6 | "id": 13201419, 7 | "is_bot": false, 8 | "first_name": "John", 9 | "last_name": "Doe", 10 | "language_code": "en" 11 | }, 12 | "chat": { 13 | "id": 13201419, 14 | "first_name": "John", 15 | "last_name": "Doe", 16 | "type": "private" 17 | }, 18 | "date": 1687753813, 19 | "media_group_id": "13502030505821481", 20 | "photo": [ 21 | { 22 | "file_id": "AgACAgEAAxkBAAOdZJkUVTHMBkwcayNw3r1Mmo-1QvEAAj2rMRvbRtBEhNPWL4ebpNsBAAMCAANzAAMvBA", 23 | "file_unique_id": "AQADPasxG9tG0ER4", 24 | "file_size": 1308, 25 | "width": 66, 26 | "height": 90 27 | }, 28 | { 29 | "file_id": "AgACAgEAAxkBAAOdZJkUVTHMBkwcayNw3r1Mmo-1QvEAAj2rMRvbRtBEhNPWL4ebpNsBAAMCAANtAAMvBA", 30 | "file_unique_id": "AQADPasxG9tG0ERy", 31 | "file_size": 13294, 32 | "width": 236, 33 | "height": 320 34 | }, 35 | { 36 | "file_id": "AgACAgEAAxkBAAOdZJkUVTHMBkwcayNw3r1Mmo-1QvEAAj2rMRvbRtBEhNPWL4ebpNsBAAMCAAN4AAMvBA", 37 | "file_unique_id": "AQADPasxG9tG0ER9", 38 | "file_size": 33032, 39 | "width": 520, 40 | "height": 705 41 | } 42 | ] 43 | } 44 | } -------------------------------------------------------------------------------- /cel/connectors/telegram/samples/message_location.json: -------------------------------------------------------------------------------- 1 | { 2 | "update_id": 794947887, 3 | "message": { 4 | "message_id": 720, 5 | "from": { 6 | "id": 132014, 7 | "is_bot": false, 8 | "first_name": "John", 9 | "username": "foobar", 10 | "language_code": "es" 11 | }, 12 | "chat": { 13 | "id": 132014, 14 | "first_name": "John", 15 | "username": "foobar", 16 | "type": "private" 17 | }, 18 | "date": 1717246030, 19 | "location": { 20 | "latitude": -34.391462, 21 | "longitude": -58.69314 22 | } 23 | } 24 | } -------------------------------------------------------------------------------- /cel/connectors/telegram/samples/message_location_place.json: -------------------------------------------------------------------------------- 1 | { 2 | "update_id": 794947896, 3 | "message": { 4 | "message_id": 746, 5 | "from": { 6 | "id": 1320141990, 7 | "is_bot": false, 8 | "first_name": "AMPer", 9 | "username": "cowbe1985", 10 | "language_code": "es" 11 | }, 12 | "chat": { 13 | "id": 1320141990, 14 | "first_name": "AMPer", 15 | "username": "cowbe1985", 16 | "type": "private" 17 | }, 18 | "date": 1717253147, 19 | "location": { 20 | "latitude": -34.384203, 21 | "longitude": -58.682816 22 | }, 23 | "venue": { 24 | "location": { 25 | "latitude": -34.384203, 26 | "longitude": -58.682816 27 | }, 28 | "title": "Parque Aereo Euca Tigre", 29 | "address": "Italia 4950", 30 | "foursquare_id": "51eb33a9498e53ddc41cc1c9", 31 | "foursquare_type": "parks_outdoors/rockclimbing" 32 | } 33 | } 34 | } -------------------------------------------------------------------------------- /cel/connectors/telegram/samples/sample_message.json: -------------------------------------------------------------------------------- 1 | { 2 | "update_id":794947636, 3 | "message":{ 4 | "message_id":149, 5 | "from":{ 6 | "id":13201419, 7 | "is_bot":false, 8 | "first_name":"John", 9 | "username":"doe1234", 10 | "language_code":"en" 11 | }, 12 | "chat":{ 13 | "id":13201419, 14 | "first_name":"John", 15 | "username":"doe1234", 16 | "type":"private" 17 | }, 18 | "date":1716850049, 19 | "text":"Hola" 20 | } 21 | } -------------------------------------------------------------------------------- /cel/connectors/vapi/README.md: -------------------------------------------------------------------------------- 1 | # Vapi Connector 2 | 3 | The Vapi connector allows you seamslessly integrate with VAPI.ai to build voice assistants. 4 | 5 | 6 | ## Create VAPI.ai Account 7 | 8 | You need to create a VAPI.ai account and create a new project to get started. Once you have created a project, you will get an API key that you can use to authenticate your requests. 9 | VAPI.ai has a free tier that allows you to build voice assistants with basic functionality. If you need more advanced features, you can upgrade to a paid plan. 10 | 11 | ## Setup VAPI Custom LLM 12 | 13 | You can use VAPI's Custom LLM feature to connect with Cel.ai. This allows you to build custom voice assistances without sacrificing the power of Cel.ai's framework. 14 | 15 | 16 | -------------------------------------------------------------------------------- /cel/connectors/vapi/model/vapi_lead.py: -------------------------------------------------------------------------------- 1 | import time 2 | from cel.gateway.model.conversation_lead import ConversationLead 3 | from cel.gateway.model.conversation_peer import ConversationPeer 4 | 5 | 6 | class VAPILead(ConversationLead): 7 | """ VAPILead class """ 8 | 9 | def __init__(self, call_object: dict, **kwargs): 10 | """ VAPILead constructor 11 | 12 | Args: 13 | call_object (dict): Call object 14 | Sample: 15 | { 16 | "id": "c7719e5c-ea98-40e1-b1dc-66131da31532", 17 | "orgId": "2ac97024-f9e9-425e-a846-ce5e2e3540f1", 18 | "createdAt": "2024-07-02T05:29:55.903Z", 19 | "updatedAt": "2024-07-02T05:29:55.903Z", 20 | "type": "webCall", 21 | "status": "queued", 22 | "assistantId": "1d9d46ba-618e-4867-8797-5a8dc2f9f42x", 23 | "webCallUrl": "https://vapi.daily.co/E3pM5r6l7Q82gT4hElS7" 24 | } 25 | 26 | """ 27 | super().__init__(**kwargs) 28 | self.call_object: dict = call_object 29 | 30 | 31 | def get_session_id(self): 32 | return f"{self.connector_name}:{self.call_object['id']}" 33 | 34 | def to_dict(self): 35 | data = super().to_dict() 36 | data['call_object'] = self.call_object 37 | return data 38 | 39 | @classmethod 40 | def from_dict(cls, lead_dict): 41 | return VAPILead( 42 | call_object=lead_dict.get("call_object") 43 | ) 44 | 45 | def __str__(self): 46 | return f"VAPILead: {self.call_object['id']}" 47 | 48 | 49 | @classmethod 50 | def from_vapi_message(cls, message: dict, **kwargs): 51 | call_object = message['call'] 52 | metadata = { 53 | 'date': time.time(), 54 | 'raw': message 55 | } 56 | conversation_peer = ConversationPeer( 57 | name='Unknown', 58 | id=call_object['id'], 59 | phone=None, 60 | avatarUrl=None, 61 | email=None 62 | ) 63 | return VAPILead(call_object=call_object, metadata=metadata, conversation_from=conversation_peer, **kwargs) -------------------------------------------------------------------------------- /cel/connectors/vapi/model/vapi_message.py: -------------------------------------------------------------------------------- 1 | import time 2 | from loguru import logger as log 3 | from cel.connectors.vapi.model.vapi_lead import VAPILead 4 | from cel.gateway.model.base_connector import BaseConnector 5 | from cel.connectors.telegram.model.telegram_attachment import TelegramAttachment 6 | from cel.connectors.telegram.model.telegram_lead import TelegramLead 7 | from cel.gateway.model.conversation_lead import ConversationLead 8 | from cel.gateway.model.message import Message 9 | 10 | 11 | class VAPIMessage(Message): 12 | 13 | def __init__(self, 14 | lead: ConversationLead, 15 | text: str = None, 16 | metadata: dict = None, 17 | date: int = None 18 | ): 19 | super().__init__(lead, text=text, date=date, metadata=metadata) 20 | 21 | 22 | def is_voice_message(self): 23 | # check if the message has a voice attachment 24 | return False 25 | 26 | @classmethod 27 | async def load_from_message(cls, request, connector: BaseConnector = None): 28 | 29 | try: 30 | messages = request.get("messages") 31 | if not messages: 32 | raise ValueError("No messages found in the message_dict") 33 | 34 | # take the last message 35 | user_message = messages[-1] 36 | 37 | if not user_message: 38 | raise ValueError("No message found in the message_dict") 39 | 40 | if user_message.get("role") != "user": 41 | raise ValueError("The message is not from a user") 42 | 43 | # get text from message or caption if it is a media message 44 | text = user_message.get("content") 45 | date = time.time() 46 | metadata = {'raw': user_message} 47 | lead = VAPILead.from_vapi_message(request, connector=connector) 48 | return VAPIMessage(lead=lead, text=text, date=date, metadata=metadata) 49 | except Exception as e: 50 | log.error(f"Error loading VAPI message from message_dict: {e}") 51 | return None 52 | 53 | def __str__(self): 54 | return f"TelegramMessage: {self.text}" 55 | 56 | def __repr__(self): 57 | return f"TelegramMessage: {self.text}" 58 | 59 | -------------------------------------------------------------------------------- /cel/connectors/vapi/sample_data/chunk_vapi_1.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "chatcmpl-9gRMw6KP9kwR5yoieEccx0qwz5vKz", 3 | "choices": [ 4 | { 5 | "delta": { 6 | "content": "", 7 | "function_call": null, 8 | "role": "assistant", 9 | "tool_calls": null 10 | }, 11 | "finish_reason": null, 12 | "index": 0, 13 | "logprobs": null 14 | } 15 | ], 16 | "created": 1719902010, 17 | "model": "gpt-3.5-turbo-0125", 18 | "object": "chat.completion.chunk", 19 | "system_fingerprint": null, 20 | "usage": null 21 | } -------------------------------------------------------------------------------- /cel/connectors/vapi/sample_data/chunk_vapi_2.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "chatcmpl-9gRMw6KP9kwR5yoieEccx0qwz5vKz", 3 | "choices": [ 4 | { 5 | "delta": { 6 | "content": "Hello", 7 | "function_call": null, 8 | "role": null, 9 | "tool_calls": null 10 | }, 11 | "finish_reason": null, 12 | "index": 0, 13 | "logprobs": null 14 | } 15 | ], 16 | "created": 1719902010, 17 | "model": "gpt-3.5-turbo-0125", 18 | "object": "chat.completion.chunk", 19 | "system_fingerprint": null, 20 | "usage": null 21 | } -------------------------------------------------------------------------------- /cel/connectors/vapi/sample_data/chunk_vapi_3.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "chatcmpl-9gRMw6KP9kwR5yoieEccx0qwz5vKz", 3 | "choices": [ 4 | { 5 | "delta": { 6 | "content": "!", 7 | "function_call": null, 8 | "role": null, 9 | "tool_calls": null 10 | }, 11 | "finish_reason": null, 12 | "index": 0, 13 | "logprobs": null 14 | } 15 | ], 16 | "created": 1719902010, 17 | "model": "gpt-3.5-turbo-0125", 18 | "object": "chat.completion.chunk", 19 | "system_fingerprint": null, 20 | "usage": null 21 | } -------------------------------------------------------------------------------- /cel/connectors/vapi/sample_data/chunk_vapi_last.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "chatcmpl-9gRMw6KP9kwR5yoieEccx0qwz5vKz", 3 | "choices": [ 4 | { 5 | "delta": { 6 | "content": null, 7 | "function_call": null, 8 | "role": null, 9 | "tool_calls": null 10 | }, 11 | "finish_reason": "stop", 12 | "index": 0, 13 | "logprobs": null 14 | } 15 | ], 16 | "created": 1719902010, 17 | "model": "gpt-3.5-turbo-0125", 18 | "object": "chat.completion.chunk", 19 | "system_fingerprint": null, 20 | "usage": null 21 | } -------------------------------------------------------------------------------- /cel/connectors/vapi/sample_data/request.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": "gpt-3.5-turbo", 3 | "messages": [ 4 | { 5 | "role": "system", 6 | "content": "This is a blank template with minimal defaults, you can change the model, temperature, and messages." 7 | }, 8 | { 9 | "role": "assistant", 10 | "content": "Hi. My name is cell dot I a." 11 | }, 12 | { 13 | "role": "user", 14 | "content": "Hi." 15 | } 16 | ], 17 | "temperature": 0.7, 18 | "stream": true, 19 | "max_tokens": 250, 20 | "call": { 21 | "id": "c7719e5c-ea98-40e1-b1dc-66131da31533", 22 | "orgId": "2ac97024-f9e9-425e-a846-ce5e2e3540f8", 23 | "createdAt": "2024-07-02T05:29:55.903Z", 24 | "updatedAt": "2024-07-02T05:29:55.903Z", 25 | "type": "webCall", 26 | "status": "queued", 27 | "assistantId": "1d9d46ba-618e-4867-8797-5a8dc2f9f42c", 28 | "webCallUrl": "https://vapi.daily.co/E3pM5r6l7Q82gT4hElS7" 29 | }, 30 | "metadata": {} 31 | } -------------------------------------------------------------------------------- /cel/connectors/vapi/utils.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | def create_chunk_response(id: str, text: str = None): 5 | res = { 6 | "id": id, 7 | "choices": [ 8 | { 9 | "delta": { 10 | "content": text, 11 | "function_call": None, 12 | "role": None, 13 | "tool_calls": None 14 | }, 15 | "finish_reason": "stop" if text is None else None, 16 | "index": 0, 17 | "logprobs": None 18 | } 19 | ], 20 | "created": int(time.time()), 21 | "model": "gpt-3.5-turbo-0125", 22 | "object": "chat.completion.chunk", 23 | "system_fingerprint": None, 24 | "usage": None 25 | } 26 | return res -------------------------------------------------------------------------------- /cel/connectors/whatsapp/__init__.py: -------------------------------------------------------------------------------- 1 | from .model.media_utils import upload_media, delete_media, query_media_url, download_media 2 | from .model.whatsapp_attachment import WhatsappAttachment 3 | from .model.whatsapp_lead import WhatsappLead 4 | from .model.whatsapp_message import WhatsappMessage 5 | from .whatsapp_connector import WhatsappConnector 6 | -------------------------------------------------------------------------------- /cel/connectors/whatsapp/components/component.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class Component(ABC): 5 | @abstractmethod 6 | def __str__(self): 7 | pass -------------------------------------------------------------------------------- /cel/connectors/whatsapp/components/document.py: -------------------------------------------------------------------------------- 1 | # { 2 | # "type": "document", 3 | # "document": { 4 | # "id": "your-media-id", 5 | # # filename is an optional parameter 6 | # "filename": "your-document-filename" 7 | # } 8 | # } 9 | from abc import ABC 10 | import json 11 | from .component import Component 12 | from .utils import del_none 13 | 14 | class Document(Component): 15 | def __init__(self, id: str, filename: str | None = None) -> None: 16 | self.id = id 17 | self.filename = filename 18 | 19 | def __str__(self): 20 | document = { 21 | "type": "document", 22 | "document": { 23 | "id": self.id, 24 | "filename": self.filename 25 | } 26 | } 27 | del_none(document) 28 | return json.dumps(document) 29 | 30 | def __repr__(self): 31 | return f"DocumentComponent(id={self.id}, filename={self.filename})" 32 | 33 | def __eq__(self, other): 34 | return self.id == other.id and self.filename == other.filename 35 | 36 | 37 | if __name__ == "__main__": 38 | document = Document("your-media-id", "your-document-filename") 39 | print(document) 40 | # Expected output: '{"type": "document", "document": {"id": "your-media-id", "filename": "your-document-filename"}}' 41 | document = Document("your-media-id") 42 | print(document) 43 | # Expected output: '{"type": "document", "document": {"id": "your-media-id", "filename": null}}' -------------------------------------------------------------------------------- /cel/connectors/whatsapp/components/document_url.py: -------------------------------------------------------------------------------- 1 | # { 2 | # "type": "document", 3 | # "document": { 4 | # "link": "the-provider-name/protocol://the-url", 5 | # # provider and filename are optional parameters 6 | # "provider": { 7 | # "name" : "provider-name" 8 | # }, 9 | # "filename": "your-document-filename" 10 | # } 11 | # } 12 | from .component import Component 13 | from .utils import del_none 14 | import json 15 | 16 | 17 | 18 | class DocumentURL(Component): 19 | def __init__(self, link: str, provider: str | None = None, filename: str | None = None) -> None: 20 | self.link = link 21 | self.provider = provider 22 | self.filename = filename 23 | 24 | def __str__(self): 25 | document = { 26 | "type": "document", 27 | "document": { 28 | "link": self.link, 29 | "provider": {"name": self.provider} if self.provider else None, 30 | "filename": self.filename 31 | } 32 | } 33 | del_none(document) 34 | return json.dumps(document) 35 | 36 | def __repr__(self): 37 | return f"DocumentURLComponent(link={self.link}, provider={self.provider}, filename={self.filename})" 38 | 39 | def __eq__(self, other): 40 | return self.link == other.link and self.provider == other.provider and self.filename == other.filename -------------------------------------------------------------------------------- /cel/connectors/whatsapp/components/image.py: -------------------------------------------------------------------------------- 1 | import json 2 | from abc import ABC 3 | from .component import Component 4 | from .utils import del_none 5 | 6 | 7 | # { 8 | # "type": "image", 9 | # "image": { 10 | # "link": "http(s)://the-url", 11 | # # provider is an optional parameter 12 | # "provider": { 13 | # "name" : "provider-name" 14 | # }, 15 | # } 16 | # } 17 | 18 | 19 | class Image(Component): 20 | def __init__(self, link: str, provider: str | None = None) -> None: 21 | self.link = link 22 | self.provider = provider 23 | 24 | def __str__(self): 25 | image = { 26 | "type": "image", 27 | "image": { 28 | "link": self.link, 29 | "provider": {"name": self.provider} if self.provider else None 30 | } 31 | } 32 | del_none(image) 33 | return json.dumps(image) 34 | 35 | 36 | 37 | def __repr__(self): 38 | return f"ImageComponent(link={self.link}, provider={self.provider})" 39 | 40 | 41 | def __eq__(self, other): 42 | return self.link == other.link and self.provider == other.provider 43 | 44 | 45 | if __name__ == '__main__': 46 | image = Image("http(s)://the-url", "provider-name") 47 | print(image) 48 | image = Image("http(s)://the-url") 49 | print(image) 50 | # Expected output: '{"type": "image", "image": {"link": "http(s)://the-url", "provider": {"name": "provider-name"}}} -------------------------------------------------------------------------------- /cel/connectors/whatsapp/components/list_item.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from .component import Component 3 | import json 4 | from .utils import del_none 5 | 6 | class ListItem(Component): 7 | def __init__(self, title: str, id: str = None, description: str = None) -> None: 8 | self.title = title 9 | self.id = id or title 10 | self.description = description 11 | 12 | def __str__(self): 13 | item = { 14 | "id": self.id, 15 | "title": self.title, 16 | "description": self.description 17 | } 18 | del_none(item) 19 | return json.dumps(item) 20 | 21 | 22 | def __repr__(self): 23 | return f"ListItem(id={self.id}, title={self.title})" 24 | 25 | 26 | def __eq__(self, other): 27 | return self.id == other.id and self.title == other.title 28 | 29 | 30 | if __name__ == "__main__": 31 | item = ListItem("id", "title") 32 | print(item) 33 | -------------------------------------------------------------------------------- /cel/connectors/whatsapp/components/reply_button.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from .component import Component 3 | import json 4 | from .utils import del_none 5 | 6 | class ReplyButton(Component): 7 | def __init__(self, title: str, id: str = None) -> None: 8 | self.title = title 9 | self.id = id or title 10 | 11 | def __str__(self): 12 | button = { 13 | "type": "reply", 14 | "reply": {"id": self.id, "title": self.title } 15 | } 16 | del_none(button) 17 | return json.dumps(button) 18 | 19 | 20 | def __repr__(self): 21 | return f"ReplyButton(id={self.id}, title={self.title})" 22 | 23 | 24 | def __eq__(self, other): 25 | return self.id == other.id and self.title == other.title 26 | 27 | 28 | if __name__ == "__main__": 29 | button = ReplyButton("id", "title") 30 | print(button) 31 | # Expected output: '{"type": "reply", "reply": {"id": "id", "title": "title"}}' -------------------------------------------------------------------------------- /cel/connectors/whatsapp/components/text.py: -------------------------------------------------------------------------------- 1 | 2 | from .component import Component 3 | 4 | 5 | 6 | # { 7 | # "type": "text", 8 | # "text": "replacement_text" 9 | # } 10 | class Text(Component): 11 | def __init__(self, text: str) -> None: 12 | self.text = text 13 | 14 | def __str__(self): 15 | return f'{{"type": "text", "text": "{self.text}"}}' 16 | 17 | def __repr__(self): 18 | return f"TextComponent(text={self.text})" 19 | 20 | def __eq__(self, other): 21 | return self.text == other.text 22 | 23 | 24 | if __name__ == "__main__": 25 | text = Text("replacement_text") 26 | print(text) 27 | # Expected output: '{"type": "text", "text": "replacement_text"}' 28 | 29 | -------------------------------------------------------------------------------- /cel/connectors/whatsapp/components/utils.py: -------------------------------------------------------------------------------- 1 | def del_none(d): 2 | """ 3 | Delete keys with the value ``None`` in a dictionary, recursively. 4 | 5 | This alters the input so you may wish to ``copy`` the dict first. 6 | """ 7 | # For Python 3, write `list(d.items())`; `d.items()` won’t work 8 | # For Python 2, write `d.items()`; `d.iteritems()` won’t work 9 | for key, value in list(d.items()): 10 | if value is None: 11 | del d[key] 12 | elif isinstance(value, dict): 13 | del_none(value) 14 | return d # For convenience -------------------------------------------------------------------------------- /cel/connectors/whatsapp/components/video.py: -------------------------------------------------------------------------------- 1 | # { 2 | # "type": "video", 3 | # "video": { 4 | # "link": "the-provider-name/protocol://the-url" 5 | # # provider is an optional parameter 6 | # "provider": { 7 | # "name" : "provider-name" 8 | # } 9 | # } 10 | # } 11 | 12 | from .component import Component 13 | from .utils import del_none 14 | import json 15 | 16 | 17 | class Video(Component): 18 | def __init__(self, link: str, provider: str | None = None) -> None: 19 | self.link = link 20 | self.provider = provider 21 | 22 | def __str__(self): 23 | video = { 24 | "type": "video", 25 | "video": { 26 | "link": self.link, 27 | "provider": {"name": self.provider} if self.provider else None 28 | } 29 | } 30 | del_none(video) 31 | return json.dumps(video) 32 | 33 | def __repr__(self): 34 | return f"VideoComponent(link={self.link}, provider={self.provider})" 35 | 36 | def __eq__(self, other): 37 | return self.link == other.link and self.provider == other.provider 38 | 39 | 40 | 41 | 42 | 43 | if __name__ == "__main__": 44 | video = Video("the-provider-name/protocol://the-url", "provider-name") 45 | print(video) 46 | # Expected output: '{"type": "video", "video": {"link": "the-provider-name/protocol://the-url", "provider": {"name": "provider-name"}}} 47 | video = Video("the-provider-name/protocol://the-url") 48 | print(video) 49 | # Expected output: '{"type": "video", "video": {"link": "the-provider-name/protocol://the-url", "provider": null}}' -------------------------------------------------------------------------------- /cel/connectors/whatsapp/constants.py: -------------------------------------------------------------------------------- 1 | BASE_URL = "https://graph.facebook.com/v18.0" -------------------------------------------------------------------------------- /cel/connectors/whatsapp/model/samples/img_caption_message.json: -------------------------------------------------------------------------------- 1 | { 2 | "object": "whatsapp_business_account", 3 | "entry": [ 4 | { 5 | "id": "103048736088448", 6 | "changes": [ 7 | { 8 | "value": { 9 | "messaging_product": "whatsapp", 10 | "metadata": { 11 | "display_phone_number": "15550463673", 12 | "phone_number_id": "105602452496989" 13 | }, 14 | "contacts": [ 15 | { 16 | "profile": { 17 | "name": "Alejandro" 18 | }, 19 | "wa_id": "5491166937848" 20 | } 21 | ], 22 | "messages": [ 23 | { 24 | "from": "5491166937848", 25 | "id": "wamid.HBgNNTQ5MTE2NjkzNzg0OBUCABIYFDNBNzY1REExREExM0VCRjhFMThBAA==", 26 | "timestamp": "1717858935", 27 | "type": "image", 28 | "image": { 29 | "caption": "Take a look", 30 | "mime_type": "image/jpeg", 31 | "sha256": "dEctuifbo+EGLhzh3H5KAuaS1b9fnn10LMpZBqiSmjA=", 32 | "id": "971782074594239" 33 | } 34 | } 35 | ] 36 | }, 37 | "field": "messages" 38 | } 39 | ] 40 | } 41 | ] 42 | } -------------------------------------------------------------------------------- /cel/connectors/whatsapp/model/samples/img_message.json: -------------------------------------------------------------------------------- 1 | { 2 | "object": "whatsapp_business_account", 3 | "entry": [ 4 | { 5 | "id": "103048736088448", 6 | "changes": [ 7 | { 8 | "value": { 9 | "messaging_product": "whatsapp", 10 | "metadata": { 11 | "display_phone_number": "15550463673", 12 | "phone_number_id": "105602452496989" 13 | }, 14 | "contacts": [ 15 | { 16 | "profile": { 17 | "name": "John Doe" 18 | }, 19 | "wa_id": "134911669XXXXX" 20 | } 21 | ], 22 | "messages": [ 23 | { 24 | "from": "134911669XXXXX", 25 | "id": "wamid.HBgNNTQ5MTE2NjkzNzg0OBUCABIYFDNBRkEzN0ZFMDXXXX==", 26 | "timestamp": "1717858794", 27 | "type": "image", 28 | "image": { 29 | "caption": "Take a look", 30 | "mime_type": "image/jpeg", 31 | "sha256": "dEctuifbo+EGLhzh3H5KAuaS1b9fnn10LMpZBqiSmjA=", 32 | "id": "971782074594239" 33 | } 34 | } 35 | ] 36 | }, 37 | "field": "messages" 38 | } 39 | ] 40 | } 41 | ] 42 | } -------------------------------------------------------------------------------- /cel/connectors/whatsapp/model/samples/react_message.json: -------------------------------------------------------------------------------- 1 | { 2 | "object": "whatsapp_business_account", 3 | "entry": [ 4 | { 5 | "id": "103048736088448", 6 | "changes": [ 7 | { 8 | "value": { 9 | "messaging_product": "whatsapp", 10 | "metadata": { 11 | "display_phone_number": "15550463", 12 | "phone_number_id": "1056024524" 13 | }, 14 | "contacts": [ 15 | { 16 | "profile": { 17 | "name": "John Doe" 18 | }, 19 | "wa_id": "123456788" 20 | } 21 | ], 22 | "messages": [ 23 | { 24 | "from": "1234568", 25 | "id": "wamid.HBgNNTQ5MTE2NjkzNzg0OBUCABIYFDNBNTBFM0QwN0E5RjZEA==", 26 | "timestamp": "1718768771", 27 | "type": "reaction", 28 | "reaction": { 29 | "message_id": "wamid.HBgNNTQ5MTE2NjkzNzg0OBUCABIYFDNBMUVGRTM3NzU5MDk==", 30 | "emoji": "\\u2764\\ufe0f" 31 | } 32 | } 33 | ] 34 | }, 35 | "field": "messages" 36 | } 37 | ] 38 | } 39 | ] 40 | } -------------------------------------------------------------------------------- /cel/connectors/whatsapp/model/samples/text_message.json: -------------------------------------------------------------------------------- 1 | { 2 | "object": "whatsapp_business_account", 3 | "entry": [ 4 | { 5 | "id": "103048736088448", 6 | "changes": [ 7 | { 8 | "value": { 9 | "messaging_product": "whatsapp", 10 | "metadata": { 11 | "display_phone_number": "15550463673", 12 | "phone_number_id": "105602452496989" 13 | }, 14 | "contacts": [ 15 | { 16 | "profile": { 17 | "name": "John Doe" 18 | }, 19 | "wa_id": "139116693XXXX" 20 | } 21 | ], 22 | "messages": [ 23 | { 24 | "from": "139116693XXXX", 25 | "id": "wamid.HBgNNTQ5MTE2NjkzNzg0OBUCABIYFDNBXXXX==", 26 | "timestamp": "1717847190", 27 | "text": { 28 | "body": "hola" 29 | }, 30 | "type": "text" 31 | } 32 | ] 33 | }, 34 | "field": "messages" 35 | } 36 | ] 37 | } 38 | ] 39 | } -------------------------------------------------------------------------------- /cel/connectors/whatsapp/model/whatsapp_lead.py: -------------------------------------------------------------------------------- 1 | from cel.connectors.whatsapp.phone_utils import filter_phone_number 2 | from cel.gateway.model.conversation_lead import ConversationLead 3 | from cel.gateway.model.conversation_peer import ConversationPeer 4 | 5 | 6 | class WhatsappLead(ConversationLead): 7 | 8 | def __init__(self, phone: str, **kwargs): 9 | super().__init__(**kwargs) 10 | self.phone: str = filter_phone_number(str(phone)) 11 | 12 | def get_session_id(self): 13 | return f"{self.connector_name}:{self.phone}" 14 | 15 | def to_dict(self): 16 | data = super().to_dict() 17 | data['phone'] = self.phone 18 | return data 19 | 20 | @classmethod 21 | def from_dict(cls, lead_dict): 22 | return WhatsappLead( 23 | phone=lead_dict.get("phone"), 24 | metadata=lead_dict.get("metadata") 25 | ) 26 | 27 | def __str__(self): 28 | return f"WhatsappLead: {self.phone}" 29 | 30 | @classmethod 31 | def from_whatsapp_message(cls, data: dict, **kwargs): 32 | assert isinstance(data, dict), "data must be a dictionary" 33 | 34 | phone = data.get("entry")[0].get("changes")[0].get("value").get("contacts")[0].get("wa_id") 35 | 36 | metadata = { 37 | 'phone_number_id': data.get("entry")[0].get("changes")[0].get("value").get("metadata").get("phone_number_id"), 38 | 'display_phone_number': data.get("entry")[0].get("changes")[0].get("value").get("metadata").get("display_phone_number"), 39 | 'message_id': data.get("entry")[0].get("id"), 40 | 'date': data.get("entry")[0].get("changes")[0].get("value").get("timestamp"), 41 | 'raw': data, 42 | 'wamid': data.get("entry")[0].get("changes")[0].get("value").get("messages")[0].get("id") 43 | } 44 | conversation_peer = ConversationPeer( 45 | name=data.get("entry")[0].get("changes")[0].get("value").get("contacts")[0].get("profile").get("name"), 46 | id=data.get("entry")[0].get("changes")[0].get("value").get("contacts")[0].get("wa_id"), 47 | phone=phone, 48 | avatarUrl=None, 49 | email=None 50 | ) 51 | return WhatsappLead(phone=phone, metadata=metadata, conversation_from=conversation_peer, **kwargs) -------------------------------------------------------------------------------- /cel/connectors/whatsapp/phone_utils.py: -------------------------------------------------------------------------------- 1 | def filter_phone_number(phone_number: str): 2 | if phone_number is None: 3 | return None 4 | 5 | if phone_number.startswith('54911'): 6 | # replace 54911 with 5411 7 | return '5411' + phone_number[5:] 8 | 9 | if phone_number.startswith('521'): 10 | return phone_number.replace('521', '52') 11 | 12 | return phone_number 13 | 14 | -------------------------------------------------------------------------------- /cel/connectors/whatsapp/utils.py: -------------------------------------------------------------------------------- 1 | def build_headers(token: str): 2 | 3 | return { 4 | "Content-Type": "application/json", 5 | "Authorization": f"Bearer {token}" 6 | } -------------------------------------------------------------------------------- /cel/decoders/gcp_geocoding.py: -------------------------------------------------------------------------------- 1 | import os 2 | from geopy.geocoders import GoogleV3 3 | 4 | GOOGLE_GEOCODING_API_KEY = os.getenv("GOOGLE_GEOCODING_API_KEY") 5 | 6 | 7 | def googlev3_decode_location(location): 8 | if not GOOGLE_GEOCODING_API_KEY: 9 | raise ValueError("GOOGLE_GEOCODING_API_KEY env var is not set") 10 | 11 | geolocator = GoogleV3(api_key=GOOGLE_GEOCODING_API_KEY) 12 | location = geolocator.reverse(location) 13 | return location.address 14 | 15 | 16 | if __name__ == "__main__": 17 | 18 | location = (40.748817, -73.985428) 19 | print(googlev3_decode_location(location)) 20 | 21 | 22 | location = { 23 | "latitude": -34.391462, 24 | "longitude": -58.69314 25 | } 26 | 27 | location = (location["latitude"], location["longitude"]) 28 | print(googlev3_decode_location(location)) 29 | 30 | 31 | -------------------------------------------------------------------------------- /cel/gateway/__init__.py: -------------------------------------------------------------------------------- 1 | # from .message_gateway import MessageGateway, MessageGatewayContext 2 | # from .request_context import RequestContext -------------------------------------------------------------------------------- /cel/gateway/model/__init__.py: -------------------------------------------------------------------------------- 1 | # from .attachment import MessageAttachmentType, MessageAttachment, LocationAttachment, FileAttachment 2 | # from .conversation_lead import ConversationLead 3 | # from .conversation_peer import ConversationPeer 4 | # from .message_gateway_context import MessageGatewayContext 5 | # from .message import Message 6 | # from .base_connector import BaseConnector -------------------------------------------------------------------------------- /cel/gateway/model/conversation_peer.py: -------------------------------------------------------------------------------- 1 | import json 2 | from abc import ABC 3 | 4 | class ConversationPeer(ABC): 5 | 6 | def __init__(self, name: str, metadata: dict = None, id: str = None, phone: str = None, avatarUrl: str = None, email: str = None): 7 | self.name = name 8 | self.metadata = metadata 9 | self.id = id 10 | self.phone = phone 11 | self.avatarUrl = avatarUrl 12 | self.email = email 13 | 14 | def to_dict(self): 15 | return { 16 | 'name': self.name, 17 | 'metadata': self.metadata, 18 | 'id': self.id, 19 | 'phone': self.phone, 20 | 'avatarUrl': self.avatarUrl, 21 | 'email': self.email 22 | } 23 | 24 | def to_json(self): 25 | return json.dumps(self.to_dict()) 26 | 27 | def __str__(self): 28 | return f"ConversationPeer: {self.name}" 29 | 30 | @classmethod 31 | def from_dict(cls, peer_dict): 32 | return ConversationPeer( 33 | name=peer_dict.get("name"), 34 | metadata=peer_dict.get("metadata"), 35 | id=peer_dict.get("id"), 36 | phone=peer_dict.get("phone"), 37 | avatarUrl=peer_dict.get("avatarUrl"), 38 | email=peer_dict.get("email") 39 | ) 40 | 41 | class ConversationPeerEncoder(json.JSONEncoder): 42 | def default(self, obj): 43 | if isinstance(obj, ConversationPeer): 44 | return obj.to_dict() 45 | return super().default(obj) 46 | 47 | 48 | # if __name__ == "__main__": 49 | # peer = ConversationPeer(name="John Doe") 50 | 51 | # obj = { 52 | # "id": 1, 53 | # "peer": peer 54 | # } 55 | 56 | # # json.dumps(obj) 57 | 58 | # json_str = json.dumps(obj, cls=ConversationPeerEncoder) 59 | # print(json_str) 60 | -------------------------------------------------------------------------------- /cel/gateway/model/message.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from abc import ABC, abstractmethod 4 | from dataclasses import dataclass 5 | import datetime 6 | 7 | from .attachment import FileAttachment 8 | from .conversation_lead import ConversationLead 9 | 10 | 11 | 12 | class Message(ABC): 13 | """This class represents a generic message object that can be sent or received by the system. 14 | this class should be subclassed by the specific message platform such as telegram, slack, whatsapp, etc. 15 | """ 16 | def __init__(self, 17 | lead: ConversationLead, 18 | text: str = None, 19 | date: int = None, 20 | metadata: dict = None, 21 | attachments: list[FileAttachment] = None, 22 | isSTT: bool = False, 23 | id: str = None 24 | ): 25 | self.lead = lead 26 | self.text = text 27 | self.metadata = metadata 28 | self.date = date or datetime.datetime.now().timestamp() 29 | self.attachments: list[FileAttachment] = attachments 30 | self.isSTT = isSTT or False 31 | self.id = id 32 | 33 | @abstractmethod 34 | def is_voice_message(self): 35 | """This method should be implemented by the subclass to check if the message is a voice message""" 36 | raise NotImplementedError 37 | 38 | @classmethod 39 | def load_from_dict(cls, message_dict: dict): 40 | """This method should be implemented by the subclass to load the message from the message object""" 41 | raise NotImplementedError -------------------------------------------------------------------------------- /cel/gateway/model/message_gateway_context.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from dataclasses import dataclass 3 | from fastapi import APIRouter, FastAPI 4 | 5 | 6 | @dataclass 7 | class MessageGatewayContext(ABC): 8 | router: APIRouter 9 | webhook_url: str 10 | app: FastAPI 11 | -------------------------------------------------------------------------------- /cel/gateway/model/middleware.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from loguru import logger as log 3 | from cel.assistants.base_assistant import BaseAssistant 4 | from cel.gateway.model.base_connector import BaseConnector 5 | from cel.gateway.model.message import Message 6 | from cel.gateway.model.message_gateway_context import MessageGatewayContext 7 | from cel.gateway.model.outgoing.outgoing_message import OutgoingMessage 8 | 9 | 10 | class BaseMiddleware: 11 | 12 | @abstractmethod 13 | async def incoming_message(self, 14 | message: Message, 15 | connector: BaseConnector, 16 | assistant: BaseAssistant): 17 | 18 | raise NotImplementedError 19 | 20 | 21 | 22 | @abstractmethod 23 | async def outgoing_message(self, 24 | message: OutgoingMessage, 25 | connector: BaseConnector, 26 | assistant: BaseAssistant, 27 | is_partial: bool = False, 28 | is_summary: bool = False, 29 | mode = None): 30 | 31 | raise NotImplementedError 32 | 33 | 34 | 35 | @abstractmethod 36 | async def startup(self, ctx: MessageGatewayContext): 37 | pass -------------------------------------------------------------------------------- /cel/gateway/model/outgoing/__init__.py: -------------------------------------------------------------------------------- 1 | from .outgoing_message_factory import outgoing_message_from_dict 2 | from .outgoing_message_link import OutgoingLinkMessage 3 | from .outgoing_message_select import OutgoingSelectMessage 4 | from .outgoing_message_text import OutgoingTextMessage 5 | from .outgoing_message_buttons import OutgoingButtonsMessage 6 | from .outgoing_message import OutgoingMessage, OutgoingMessageType -------------------------------------------------------------------------------- /cel/gateway/model/outgoing/outgoing_message.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | import datetime 3 | from cel.gateway.model.attachment import FileAttachment 4 | from cel.gateway.model.conversation_lead import ConversationLead 5 | 6 | class OutgoingMessageType: 7 | TEXT = "text" 8 | IMAGE = "image" 9 | AUDIO = "audio" 10 | VOICE = "voice" 11 | VIDEO = "video" 12 | DOCUMENT = "document" 13 | LOCATION = "location" 14 | CONTACT = "contact" 15 | 16 | SELECT = "select" 17 | LINK = "link" 18 | BUTTONS = "buttons" 19 | 20 | 21 | class OutgoingMessage(ABC): 22 | """This class represents a generic outgoing message object""" 23 | 24 | def __init__(self, 25 | # outgoing message type: 26 | type: str, 27 | lead: ConversationLead, 28 | metadata: dict = None, 29 | attachments: list[FileAttachment] = None, 30 | is_partial: bool = True, 31 | is_private: bool = False 32 | ): 33 | self.date = datetime.datetime.now().timestamp() 34 | self.attachments: list[FileAttachment] = attachments 35 | self.lead = lead 36 | self.metadata = metadata 37 | self.is_partial = is_partial 38 | self.is_private = is_private 39 | self.type = type 40 | 41 | assert isinstance(self.lead, ConversationLead),\ 42 | "lead must be an instance of ConversationLead" 43 | assert self.metadata is None or isinstance(self.metadata, dict),\ 44 | "metadata must be a dictionary" 45 | assert self.attachments is None or isinstance(self.attachments, list),\ 46 | "attachments must be a list" 47 | assert self.type in [OutgoingMessageType.TEXT, 48 | OutgoingMessageType.IMAGE, 49 | OutgoingMessageType.AUDIO, 50 | OutgoingMessageType.VIDEO, 51 | OutgoingMessageType.DOCUMENT, 52 | OutgoingMessageType.LOCATION, 53 | OutgoingMessageType.CONTACT, 54 | OutgoingMessageType.SELECT, 55 | OutgoingMessageType.BUTTONS, 56 | OutgoingMessageType.LINK],\ 57 | "type must be a valid OutgoingMessageType" 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /cel/gateway/model/outgoing/outgoing_message_factory.py: -------------------------------------------------------------------------------- 1 | from cel.gateway.model.conversation_lead import ConversationLead 2 | from cel.gateway.model.outgoing.outgoing_message import OutgoingMessage, OutgoingMessageType 3 | from cel.gateway.model.outgoing.outgoing_message_link import OutgoingLinkMessage 4 | from cel.gateway.model.outgoing.outgoing_message_select import OutgoingSelectMessage 5 | from cel.gateway.model.outgoing.outgoing_message_text import OutgoingTextMessage 6 | from cel.gateway.model.outgoing.outgoing_message_buttons import OutgoingButtonsMessage 7 | 8 | from loguru import logger as log 9 | 10 | def outgoing_message_from_dict(data: dict) -> OutgoingMessage: 11 | 12 | try: 13 | """Creates an OutgoingMessage instance from a dictionary""" 14 | assert isinstance(data, dict),\ 15 | "data must be a dictionary" 16 | assert "type" in data,\ 17 | "data must have a 'type' key" 18 | 19 | if data["type"] == OutgoingMessageType.TEXT: 20 | return OutgoingTextMessage.from_dict(data) 21 | 22 | if data["type"] == OutgoingMessageType.SELECT: 23 | return OutgoingSelectMessage.from_dict(data) 24 | 25 | if data["type"] == OutgoingMessageType.LINK: 26 | return OutgoingLinkMessage.from_dict(data) 27 | 28 | if data["type"] == OutgoingMessageType.BUTTONS: 29 | return OutgoingButtonsMessage.from_dict(data) 30 | 31 | # TODO: add other message types here 32 | raise ValueError(f"Not implemented message type: {data['type']}") 33 | 34 | except AssertionError as e: 35 | log.error(f"Invalid outgoing message creating params: {e}") 36 | raise e 37 | except Exception as e: 38 | log.error(f"Error creating outgoing message: {e}") 39 | raise e 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /cel/gateway/model/outgoing/outgoing_message_text.py: -------------------------------------------------------------------------------- 1 | from cel.gateway.model.attachment import FileAttachment 2 | from cel.gateway.model.conversation_lead import ConversationLead 3 | from cel.gateway.model.outgoing.outgoing_message import OutgoingMessage, OutgoingMessageType 4 | 5 | 6 | class OutgoingTextMessage(OutgoingMessage): 7 | """This class represents a text outgoing text message object""" 8 | 9 | def __init__(self, 10 | content: str, 11 | lead: ConversationLead, 12 | **kwargs 13 | ): 14 | super().__init__(OutgoingMessageType.TEXT, lead, **kwargs) 15 | self.content = content 16 | 17 | assert isinstance(self.content, str), "text must be a string" 18 | 19 | def __str__(self): 20 | return self.content 21 | 22 | 23 | @staticmethod 24 | def from_dict(data: dict) -> 'OutgoingTextMessage': 25 | """Creates an OutgoingTextMessage instance from a dictionary""" 26 | assert isinstance(data, dict),\ 27 | "data must be a dictionary" 28 | assert "content" in data,\ 29 | "data must have a 'content' key" 30 | assert "lead" in data,\ 31 | "data must have a 'lead' key" 32 | assert isinstance(data["lead"], ConversationLead),\ 33 | "lead must be an instance of ConversationLead" 34 | 35 | return OutgoingTextMessage( 36 | content=data["content"], 37 | lead=data["lead"], 38 | metadata=data.get("metadata"), 39 | attachments=[FileAttachment.from_dict(attachment) for attachment in data.get("attachments", [])], 40 | is_partial=data.get("is_partial", True) 41 | ) 42 | 43 | @staticmethod 44 | def description() -> str: 45 | return """For a simple text message, use the following structure: 46 | { 47 | "type": "text", 48 | "content": "message content", 49 | }""" -------------------------------------------------------------------------------- /cel/message_enhancers/default_message_enhancer.py: -------------------------------------------------------------------------------- 1 | from loguru import logger as log 2 | from cel.gateway.model.conversation_lead import ConversationLead 3 | from cel.gateway.model.outgoing.outgoing_message import OutgoingMessage 4 | from cel.gateway.model.outgoing.outgoing_message_text import OutgoingTextMessage 5 | 6 | 7 | 8 | class DefaultMessageEnhancer: 9 | """This dummy enhancer will map each input text from genAI to a simple text message.""" 10 | 11 | def __init__(self): 12 | log.warning("Creating default message enhancer. This is a dummy enhancer. You should try smarter enhancers.") 13 | 14 | async def __call__(self, lead: ConversationLead, 15 | text: str, 16 | is_partial: bool = True) -> OutgoingMessage: 17 | 18 | return OutgoingTextMessage( 19 | lead=lead, 20 | content=text 21 | ) 22 | 23 | -------------------------------------------------------------------------------- /cel/middlewares/__init__.py: -------------------------------------------------------------------------------- 1 | from .deepgram_stt import DeepgramSTTMiddleware 2 | from .geodecoding import GeodecodingMiddleware 3 | from .in_mem_blacklist import InMemBlackListMiddleware 4 | from .session_middleware import SessionMiddleware 5 | from .redis_blacklist import RedisBlackListMiddleware 6 | from .redis_blacklist_async import RedisBlackListAsyncMiddleware -------------------------------------------------------------------------------- /cel/middlewares/chatwoot/model.py: -------------------------------------------------------------------------------- 1 | 2 | from dataclasses import dataclass 3 | 4 | ChatwootMessageTypes = ["incoming", "outgoing"] 5 | 6 | @dataclass 7 | class ChatwootConversationRef: 8 | id: int 9 | identifier: str 10 | updated_at: int 11 | 12 | 13 | @dataclass 14 | class InboxRef: 15 | id: int 16 | name: str 17 | 18 | @dataclass 19 | class ContactLead: 20 | identifier: str 21 | name: str = None 22 | email: str = None 23 | phone_number: str = None 24 | 25 | -------------------------------------------------------------------------------- /cel/middlewares/chatwoot/phone_util.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | def format_to_e164(phone_number): 5 | # Eliminar todos los caracteres que no sean dígitos 6 | digits_only = re.sub(r'\D', '', phone_number) 7 | 8 | # Agregar el signo '+' al inicio 9 | return f'+{digits_only}' 10 | 11 | -------------------------------------------------------------------------------- /cel/middlewares/in_mem_blacklist.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from dataclasses import dataclass, field 3 | import time 4 | from cel.assistants.base_assistant import BaseAssistant 5 | from cel.gateway.model.base_connector import BaseConnector 6 | from cel.gateway.model.message import Message 7 | from loguru import logger as log 8 | 9 | @dataclass 10 | class BlackListEntry(ABC): 11 | reason: str = None 12 | date: int = field(default_factory=lambda: int(time.time())) 13 | ttl: int = None 14 | 15 | class InMemBlackListMiddleware: 16 | """Middleware to block users based on a blacklist. The blacklist is stored in memory.""" 17 | 18 | def __init__(self, 19 | black_list: dict[str, BlackListEntry] = None, 20 | reject_message: str = "You are banned from using this service"): 21 | # session_id -> BlackListEntry 22 | self.black_list = black_list or {} 23 | self.reject_message = reject_message 24 | 25 | async def __call__(self, message: Message, connector: BaseConnector, assistant: BaseAssistant): 26 | assert isinstance(message, Message), "Message must be a Message object" 27 | 28 | id = message.lead.get_session_id() 29 | source = message.lead.connector_name 30 | entry = self.get_entry(id) 31 | if entry: 32 | log.critical(f"User {id} from {source} is blacklisted. Reason: {entry.reason}") 33 | if connector: 34 | await connector.send_text_message(message.lead, self.reject_message) 35 | return False 36 | else: 37 | return True 38 | 39 | def add_to_black_list(self, id: str, reason: str = None, ttl: int = None): 40 | self.black_list[id] = BlackListEntry(reason=reason, date=int(time.time()), ttl=ttl) 41 | 42 | def remove_from_black_list(self, id: str): 43 | self.black_list.pop(id, None) 44 | 45 | def get_entry(self, id: str): 46 | entry = self.black_list.get(id) 47 | # check that the entry is not expired 48 | if entry and entry.ttl: 49 | if entry.date + entry.ttl < time.time(): 50 | self.remove_from_black_list(id) 51 | return None 52 | 53 | return entry 54 | -------------------------------------------------------------------------------- /cel/middlewares/moderation/moderation_events.py: -------------------------------------------------------------------------------- 1 | class ModMiddlewareEvents: 2 | on_message_flagged = "on_message_flagged" -------------------------------------------------------------------------------- /cel/middlewares/redis_blacklist.py: -------------------------------------------------------------------------------- 1 | from dataclasses import asdict 2 | from loguru import logger as log 3 | import json 4 | import time 5 | from redis import Redis 6 | from cel.assistants.base_assistant import BaseAssistant 7 | from cel.gateway.model.base_connector import BaseConnector 8 | from cel.gateway.model.message import Message 9 | from cel.middlewares.in_mem_blacklist import BlackListEntry 10 | 11 | 12 | 13 | class RedisBlackListMiddleware: 14 | """Middleware to block users based on a blacklist. The blacklist is stored in a Redis database.""" 15 | 16 | def __init__(self, redis: str | Redis = None, key_prefix: str = "blacklistmw"): 17 | self.client = Redis.from_url(redis or 'redis://localhost:6379/0') if isinstance(redis, str) else redis 18 | self.black_list_key = key_prefix 19 | 20 | async def __call__(self, message: Message, connector: BaseConnector, assistant: BaseAssistant): 21 | assert isinstance(message, Message), "Message must be a Message object" 22 | 23 | id = message.lead.get_session_id() 24 | source = message.lead.connector_name 25 | entry = self.client.hget(self.black_list_key, id) 26 | if entry: 27 | entry = json.loads(entry) 28 | log.critical(f"User {id} from {source} is blacklisted. Reason: {entry['reason']}") 29 | return False 30 | else: 31 | return True 32 | 33 | def add_to_black_list(self, id: str, reason: str = None): 34 | entry = BlackListEntry(reason=reason, date=int(time.time())) 35 | self.client.hset(self.black_list_key, id, json.dumps(asdict(entry))) 36 | 37 | def remove_from_black_list(self, id: str): 38 | self.client.hdel(self.black_list_key, id) 39 | 40 | def get_entry(self, id: str): 41 | entry = self.client.hget(self.black_list_key, id) 42 | if entry: 43 | return json.loads(entry) 44 | return None -------------------------------------------------------------------------------- /cel/middlewares/redis_blacklist_async.py: -------------------------------------------------------------------------------- 1 | from redis import asyncio as aioredis 2 | import json 3 | from dataclasses import asdict 4 | import time 5 | from loguru import logger as log 6 | from cel.assistants.base_assistant import BaseAssistant 7 | from cel.gateway.model.base_connector import BaseConnector 8 | from cel.gateway.model.message import Message 9 | from cel.middlewares.in_mem_blacklist import BlackListEntry 10 | 11 | Redis = aioredis.Redis 12 | 13 | class RedisBlackListAsyncMiddleware: 14 | """Middleware to block users based on a blacklist. The blacklist is stored in a Redis database.""" 15 | 16 | def __init__(self, redis: str | Redis = None, key_prefix: str = "blacklist"): 17 | self.client = redis if isinstance(redis, Redis) else aioredis.from_url(redis or 'redis://localhost:6379/0') 18 | self.black_list_key = key_prefix 19 | 20 | async def __call__(self, message: Message, connector: BaseConnector, assistant: BaseAssistant): 21 | assert isinstance(message, Message), "Message must be a Message object" 22 | 23 | id = message.lead.get_session_id() 24 | source = message.lead.connector_name 25 | entry = await self.client.hget(self.black_list_key, id) 26 | if entry: 27 | entry = json.loads(entry) 28 | log.critical(f"User {id} from {source} is blacklisted. Reason: {entry['reason']}") 29 | return False 30 | else: 31 | return True 32 | 33 | async def add_to_black_list(self, id: str, reason: str = None, ttl: int = None): 34 | entry = BlackListEntry(reason=reason, date=int(time.time())) 35 | await self.client.hset(self.black_list_key, id, json.dumps(asdict(entry))) 36 | # set expiration 37 | if ttl: 38 | await self.client.expire(self.black_list_key, ttl) 39 | 40 | async def remove_from_black_list(self, id: str): 41 | await self.client.hdel(self.black_list_key, id) 42 | 43 | async def get_entry(self, id: str): 44 | entry = await self.client.hget(self.black_list_key, id) 45 | if entry: 46 | return json.loads(entry) 47 | return None 48 | -------------------------------------------------------------------------------- /cel/model/common.py: -------------------------------------------------------------------------------- 1 | # define roles type 2 | from abc import ABC 3 | from dataclasses import dataclass 4 | 5 | from cel.assistants.common import FunctionDefinition 6 | 7 | 8 | 9 | class Role: 10 | USER = "user" 11 | ASSISTANT = "assistant" 12 | SYSTEM = "system" 13 | FUNCTION = "function" 14 | 15 | @dataclass 16 | class ContextMessage(ABC): 17 | role: Role 18 | content: str 19 | name: str = None 20 | function_call: dict = None 21 | 22 | 23 | @dataclass 24 | class PromptCompiled: 25 | prompt: str 26 | settings: dict 27 | functions: list[FunctionDefinition] 28 | context: dict 29 | 30 | @dataclass 31 | class PromptContext: 32 | history: list[ContextMessage] 33 | state: dict 34 | message: str | None 35 | date: str 36 | 37 | 38 | -------------------------------------------------------------------------------- /cel/rag/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/rag/__init__.py -------------------------------------------------------------------------------- /cel/rag/providers/rag_retriever.py: -------------------------------------------------------------------------------- 1 | from cel.model.common import ContextMessage 2 | from cel.rag.stores.vector_store import VectorRegister 3 | 4 | 5 | from abc import abstractmethod 6 | 7 | 8 | class RAGRetriever: 9 | @abstractmethod 10 | def search(self, 11 | query: str, 12 | top_k: int = 1, 13 | history: list[ContextMessage] = None, 14 | state: dict = {}) -> list[VectorRegister]: 15 | raise NotImplementedError() -------------------------------------------------------------------------------- /cel/rag/slicers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/rag/slicers/__init__.py -------------------------------------------------------------------------------- /cel/rag/slicers/base_slicer.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from dataclasses import dataclass 3 | from pathlib import Path 4 | 5 | 6 | 7 | # A Slice has a text, metadata: dict, and a id: str, source: str 8 | @dataclass 9 | class Slice(ABC): 10 | """ 11 | A class used to represent a Slice. 12 | 13 | Attributes 14 | ---------- 15 | id : str 16 | unique identifier for the slice 17 | text : str 18 | text content of the slice 19 | metadata : dict 20 | additional information about the slice 21 | source : str 22 | source from which the slice was derived 23 | """ 24 | id: str 25 | text: str 26 | metadata: dict 27 | source: str 28 | 29 | 30 | 31 | class Slicer(ABC): 32 | 33 | @abstractmethod 34 | def slice(self) -> list[Slice]: 35 | pass 36 | 37 | 38 | -------------------------------------------------------------------------------- /cel/rag/slicers/markdown/__init__.py: -------------------------------------------------------------------------------- 1 | from .markdown import MarkdownSlicer -------------------------------------------------------------------------------- /cel/rag/slicers/utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/cel/rag/slicers/utils.py -------------------------------------------------------------------------------- /cel/rag/stores/vector_store.py: -------------------------------------------------------------------------------- 1 | # Vector store abstract class 2 | 3 | from abc import ABC 4 | from abc import abstractmethod 5 | from dataclasses import dataclass 6 | from typing import Union 7 | import numpy as np 8 | from cel.rag.text2vec.utils import Embedding 9 | 10 | 11 | @dataclass 12 | class VectorRegister(ABC): 13 | id: str 14 | vector: np.ndarray 15 | text: str 16 | metadata: dict 17 | 18 | def __str__(self): 19 | return f"{self.text}" 20 | 21 | 22 | class VectorStore(ABC): 23 | """Base class for vector stores. A vector store is a class that stores and retrieves by similarity and id. 24 | For simplicity, the vector store will define the embedding model to be used. 25 | We strongly recommend implement your own vector store flavor to suit your needs, combining the vector store 26 | with a database or a cache system and a embedding model of your choice. 27 | """ 28 | 29 | @abstractmethod 30 | def get_vector(self, id: str) -> VectorRegister: 31 | """Get the vector representation of an id""" 32 | pass 33 | 34 | @abstractmethod 35 | def get_similar(self, vector: Embedding, top_k: int) -> list[VectorRegister]: 36 | """Get the most similar vectors to the given vector""" 37 | pass 38 | 39 | @abstractmethod 40 | def search(self, query: str, top_k: int) -> list[VectorRegister]: 41 | """Search for vectors by a query""" 42 | pass 43 | 44 | @abstractmethod 45 | def upsert(self, id: str, vector: Embedding, text: str, metadata: dict): 46 | """Upsert a vector to the store""" 47 | pass 48 | 49 | @abstractmethod 50 | def upsert_text(self, id: str, text: str, metadata: dict): 51 | """Upsert a text to the store""" 52 | pass 53 | 54 | @abstractmethod 55 | def delete(self, id: str): 56 | """Delete a vector from the store""" 57 | pass -------------------------------------------------------------------------------- /cel/rag/text2vec/cache/base_cache.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | class BaseCache(ABC): 4 | @abstractmethod 5 | def memoize(self, typed: bool, expire: int, tag: str): 6 | pass -------------------------------------------------------------------------------- /cel/rag/text2vec/cache/disk_cache.py: -------------------------------------------------------------------------------- 1 | from diskcache import Cache 2 | from .base_cache import BaseCache 3 | 4 | class DiskCache(BaseCache): 5 | def __init__(self, cache_dir: str): 6 | self.cache = Cache(cache_dir) 7 | 8 | def memoize(self, typed: bool, expire: int, tag: str): 9 | return self.cache.memoize(typed=typed, expire=expire, tag=tag) -------------------------------------------------------------------------------- /cel/rag/text2vec/cache/redis_cache.py: -------------------------------------------------------------------------------- 1 | from redis import Redis 2 | from .base_cache import BaseCache 3 | 4 | class RedisCache(BaseCache): 5 | def __init__(self, redis: str | Redis = None): 6 | self.client = redis if isinstance(redis, Redis) else Redis.from_url(redis or 'redis://localhost:6379/0') 7 | 8 | def memoize(self, typed: bool, tag: str, expire: int = None): 9 | def decorator(func): 10 | def wrapper(*args, **kwargs): 11 | key = f"{tag}:{args}:{kwargs}" if typed else f"{tag}:{args}" 12 | cached_result = self.client.get(key) 13 | # decode the cached result 14 | if cached_result: 15 | cached_result = cached_result.decode('utf-8') 16 | return cached_result 17 | 18 | result = func(*args, **kwargs) 19 | 20 | if result is not None and expire: 21 | self.client.setex(key, expire, str(result)) 22 | else: 23 | self.client.set(key, str(result)) 24 | 25 | return result 26 | 27 | return wrapper 28 | return decorator -------------------------------------------------------------------------------- /cel/rag/text2vec/cached_ollama.py: -------------------------------------------------------------------------------- 1 | from .cache.base_cache import BaseCache 2 | from .cache.disk_cache import DiskCache 3 | from .utils import Embedding, Text2VectorProvider 4 | 5 | try: 6 | import ollama 7 | except ImportError: 8 | raise ValueError( 9 | "The Ollama library is required for this example. Please install it by running: pip install ollama" 10 | ) 11 | 12 | 13 | 14 | class CachedOllamaEmbedding(Text2VectorProvider): 15 | """A wrapper around the Ollama library that caches the results of the embeddings calls. 16 | Uses diskcache to cache the results of text2vec calls. 17 | Useful for reducing the number of API calls 18 | For example, if you are using ChromaDB every time you boot up your application, 19 | Chroma needs to be re-embedded. This can be time consuming and costly. 20 | This class will cache the results of the text2vec calls, so that the next time you boot up your application, 21 | the embeddings will be retrieved from the cache. 22 | 23 | Parameters: 24 | model: str 25 | Ollama embedding models, check the Ollama documentation for the available models. 26 | https://ollama.com/library 27 | default: "mxbai-embed-large" 28 | cache_backend: CacheBackend 29 | The cache backend to use. Default is DiskCacheBackend. 30 | """ 31 | 32 | def __init__(self, model: str = "mxbai-embed-large", cache_backend: BaseCache = None, CACHE_EXPIRE=86400): 33 | self.model = model 34 | self.cache_backend = cache_backend or DiskCache(cache_dir='/tmp/diskcache') 35 | self.cache_expire = CACHE_EXPIRE 36 | self.cache_tag= 'ollama' 37 | 38 | def text2vec(self, text: str) -> Embedding: 39 | return self._cached_text2vec(text, self.model) 40 | 41 | def texts2vec(self, texts: list[str]) -> list[Embedding]: 42 | return [self.text2vec(text) for text in texts] 43 | 44 | @property 45 | def _cached_text2vec(self): 46 | return self.cache_backend.memoize(typed=True, expire=self.cache_expire, tag=self.cache_tag)(ollama_cached_text2vec) 47 | 48 | def ollama_cached_text2vec(text: str, model: str) -> list[float]: 49 | response = ollama.embeddings(model=model, prompt=text) 50 | embedding = response["embedding"] 51 | return embedding -------------------------------------------------------------------------------- /cel/rag/text2vec/utils.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Sequence, Union 3 | import numpy as np 4 | 5 | 6 | Embedding = Union[Sequence[float], Sequence[int]] 7 | Embeddings = Sequence[Embedding] 8 | 9 | 10 | 11 | class Text2VectorProvider(ABC): 12 | 13 | @abstractmethod 14 | def text2vec(self, text: str) -> Embedding: 15 | pass 16 | 17 | @abstractmethod 18 | def texts2vec(self, texts: list[str]) -> Embeddings: 19 | pass -------------------------------------------------------------------------------- /cel/stores/common/key_value_store.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class KeyValueStore(ABC): 5 | 6 | @abstractmethod 7 | def get(self, key, callback=None): 8 | raise NotImplementedError() 9 | 10 | @abstractmethod 11 | def set(self, key, value, ttl=None): 12 | raise NotImplementedError() 13 | 14 | @abstractmethod 15 | def delete(self, key): 16 | raise NotImplementedError() 17 | 18 | @abstractmethod 19 | def clear(self): 20 | raise NotImplementedError() 21 | 22 | @abstractmethod 23 | def get_all(self): 24 | raise NotImplementedError() 25 | 26 | 27 | 28 | class ListStore(ABC): 29 | 30 | @abstractmethod 31 | def list_append(self, key, value, ttl=None): 32 | raise NotImplementedError() 33 | 34 | @abstractmethod 35 | def list_clear(self, key): 36 | raise NotImplementedError() 37 | 38 | @abstractmethod 39 | def list_get(self, key): 40 | raise NotImplementedError() 41 | 42 | @abstractmethod 43 | def list_get_last(self, key, count: int): 44 | raise NotImplementedError() 45 | 46 | -------------------------------------------------------------------------------- /cel/stores/common/list_redis_store_async.py: -------------------------------------------------------------------------------- 1 | import json 2 | from redis import asyncio as aioredis 3 | from cel.stores.common.key_value_store import ListStore 4 | 5 | Redis = aioredis.Redis 6 | 7 | class ListRedisStoreAsync(ListStore): 8 | 9 | def __init__(self, redis: str | Redis, key_prefix="h", ttl=60): 10 | self.redis_client = redis if isinstance(redis, Redis) else aioredis.from_url(redis) 11 | self.key_prefix = key_prefix 12 | self.key_prefix = key_prefix 13 | self.ttl = ttl 14 | 15 | async def list_append(self, key, entry, ttl=None): 16 | key = self.key_prefix + ":" + key 17 | value = json.dumps(entry) 18 | await self.redis_client.rpush(key, value) 19 | if ttl: 20 | await self.redis_client.expire(key, ttl) 21 | 22 | async def list_clear(self, key): 23 | key = self.key_prefix + ":" + key 24 | await self.redis_client.delete(key) 25 | 26 | async def list_get(self, key): 27 | key = self.key_prefix + ":" + key 28 | l = await self.redis_client.lrange(key, 0, -1) 29 | # decode 30 | return [json.loads(v) for v in l] 31 | 32 | async def list_get_last(self, key, count: int): 33 | key = self.key_prefix + ":" + key 34 | l = await self.redis_client.lrange(key, -count, -1) 35 | # decode 36 | return [json.loads(v) for v in l] -------------------------------------------------------------------------------- /cel/stores/common/memory_cache.py: -------------------------------------------------------------------------------- 1 | 2 | from typing import Union 3 | import cachetools 4 | from .key_value_store import KeyValueStore, ListStore 5 | 6 | 7 | class MemoryCache(KeyValueStore): 8 | """MemoryCache is a CacheStore implementation that stores cache data in memory using cachetools.LRUCache. 9 | 10 | Args: 11 | key_prefix (str): A key prefix for the cache store. 12 | memory_maxsize (int): The maximum number of items to store in the cache. 13 | """ 14 | 15 | 16 | def __init__(self, key_prefix, memory_maxsize=1000): 17 | self.cache = cachetools.LRUCache(maxsize=memory_maxsize) 18 | self.key_prefix = key_prefix 19 | 20 | def get(self, key, callback=None): 21 | """Retrieves the value associated with the given key from the cache. 22 | 23 | If the key is not in the cache, it calls the provided callback function to compute the value. 24 | 25 | Args: 26 | key (str): The cache key. 27 | callback (callable): A function that computes the value for the given key if it is not in the cache (optional). 28 | 29 | Returns: 30 | The value associated with the key, or None if the key is not in the cache. 31 | """ 32 | 33 | data = self.cache.get(key) 34 | if data is not None: 35 | return data 36 | 37 | if callback: 38 | data = callback() 39 | if data is not None: 40 | self.set(key, data) 41 | return data 42 | 43 | return None 44 | 45 | 46 | def set(self, key, value): 47 | """Sets the value for the given key in the cache. 48 | 49 | Args: 50 | key (str): The cache key. 51 | value (any): The value to store in the cache. 52 | """ 53 | self.cache[key] = value 54 | 55 | def get_all(self): 56 | """Returns all items in the cache. 57 | 58 | Returns: 59 | A list of key-value pairs in the cache. 60 | """ 61 | return self.cache.items() 62 | 63 | def delete(self, key): 64 | """Deletes the key from the cache. 65 | 66 | Args: 67 | key (str): The cache key. 68 | """ 69 | self.cache.pop(key, None) 70 | 71 | def clear(self): 72 | """Clears the cache.""" 73 | self.cache.clear() 74 | 75 | def all(self): 76 | """Returns all items in the cache. 77 | 78 | Returns: 79 | A list of key-value pairs in the cache. 80 | """ 81 | return self.cache.items() 82 | 83 | -------------------------------------------------------------------------------- /cel/stores/history/base_history_provider.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class BaseHistoryProvider(ABC): 5 | @abstractmethod 6 | async def append_to_history(self, sessionId, entry, metadata=None, ttl=None): 7 | raise NotImplementedError 8 | 9 | @abstractmethod 10 | async def get_history(self, sessionId) -> list: 11 | raise NotImplementedError 12 | 13 | @abstractmethod 14 | async def clear_history(self, sessionId, keep_last_messages=None): 15 | raise NotImplementedError 16 | 17 | @abstractmethod 18 | async def get_last_messages(self, sessionId, count) -> list: 19 | raise NotImplementedError 20 | 21 | @abstractmethod 22 | async def close_conversation(self, sessionId): 23 | raise NotImplementedError 24 | 25 | -------------------------------------------------------------------------------- /cel/stores/history/history_inmemory_provider.py: -------------------------------------------------------------------------------- 1 | import json 2 | from loguru import logger as log 3 | from cel.stores.history.base_history_provider import BaseHistoryProvider 4 | 5 | 6 | class InMemoryHistoryProvider(BaseHistoryProvider): 7 | 8 | def __init__(self, key_prefix: str = "h"): 9 | self.store = {} 10 | self.key_prefix = key_prefix 11 | log.warning(f"Create: InMemoryHistoryProvider - Avoid using this in production.") 12 | 13 | def get_key(self, sessionId: str): 14 | return f"{self.key_prefix}:{sessionId}" 15 | 16 | async def append_to_history(self, sessionId: str, entry, metadata=None, ttl=None): 17 | key = self.get_key(sessionId) 18 | value = json.dumps(entry) 19 | if key not in self.store: 20 | self.store[key] = [] 21 | self.store[key].append(value) 22 | 23 | async def get_history(self, sessionId: str): 24 | key = self.get_key(sessionId) 25 | values = self.store.get(key, []) 26 | res = [json.loads(v) for v in values] 27 | # remove None elements 28 | res = [r for r in res if r] 29 | 30 | return res 31 | 32 | async def clear_history(self, sessionId: str, keep_last_messages=None): 33 | key = self.get_key(sessionId) 34 | if keep_last_messages: 35 | self.store[key] = self.store[key][:keep_last_messages] 36 | else: 37 | self.store.pop(key, None) 38 | 39 | async def get_history_slice(self, sessionId: str, start, end): 40 | key = self.get_key(sessionId) 41 | history = self.store.get(key, [])[start:end] 42 | return [json.loads(h) for h in history] 43 | 44 | async def get_last_messages(self, sessionId: str, count): 45 | key = self.get_key(sessionId) 46 | history = self.store.get(key, [])[-count:] 47 | return [json.loads(h) for h in history] 48 | 49 | async def close_conversation(self, sessionId: str): 50 | raise NotImplementedError("Method not implemented.") -------------------------------------------------------------------------------- /cel/stores/history/history_redis_provider.py: -------------------------------------------------------------------------------- 1 | # DEPERECATED SYNC VERSION 2 | 3 | # import json 4 | # from redis import Redis 5 | # from loguru import logger as log 6 | # from prompter.stores.history.base_history_provider import BaseHistoryProvider 7 | 8 | 9 | # class RedisHistoryProvider(BaseHistoryProvider): 10 | 11 | # def __init__(self, redis: str | Redis, key_prefix: str = "h"): 12 | # self.client = redis if isinstance(redis, Redis) else Redis.from_url(redis) 13 | # self.key_prefix = key_prefix 14 | # log.debug(f"Create: RedisHistoryProvider") 15 | 16 | # def get_key(self, sessionId: str): 17 | # return f"{self.key_prefix}:{sessionId}" 18 | 19 | # def append_to_history(self, sessionId: str, entry, metadata=None, ttl=None): 20 | # key = self.get_key(sessionId) 21 | # value = json.dumps(entry) 22 | # self.client.rpush(key, value) 23 | 24 | # # set expiration to 24 hours 25 | # self.client.expire(key, ttl if ttl else 86400) 26 | 27 | # def get_history(self, sessionId: str): 28 | # key = self.get_key(sessionId) 29 | # values = self.client.lrange(key, 0, -1) 30 | # res = [json.loads(v) for v in values] 31 | # # remove None elements 32 | # res = [r for r in res if r] 33 | 34 | # return res 35 | 36 | 37 | # def clear_history(self, sessionId: str, keep_last_messages=None): 38 | # key = self.get_key(sessionId) 39 | # if keep_last_messages: 40 | # self.client.ltrim(key, 0, keep_last_messages) 41 | # else: 42 | # self.client.delete(key) 43 | 44 | # def get_history_slice(self, sessionId: str, start, end): 45 | # key = self.get_key(sessionId) 46 | # history = self.client.lrange(key, start, end) 47 | # return [json.loads(h) for h in history] 48 | 49 | # def get_last_messages(self, sessionId: str, count): 50 | # key = self.get_key(sessionId) 51 | # history = self.client.lrange(key, -count, -1) 52 | # return [json.loads(h) for h in history] 53 | 54 | # def close_conversation(self, sessionId: str): 55 | # raise NotImplementedError("Method not implemented.") 56 | 57 | 58 | -------------------------------------------------------------------------------- /cel/stores/history/history_redis_provider_async.py: -------------------------------------------------------------------------------- 1 | import json 2 | from cel.stores.common.key_value_store import ListStore 3 | from cel.stores.history.base_history_provider import BaseHistoryProvider 4 | 5 | 6 | 7 | class RedisHistoryProviderAsync(BaseHistoryProvider): 8 | 9 | def __init__(self, store: ListStore, key_prefix: str = "h", ttl=None): 10 | """ Create a new RedisHistoryProviderAsync instance. 11 | :param store: Redis store 12 | :param key_prefix: Prefix for the keys 13 | :param ttl: Time to live in seconds for history. If None, it will never expire 14 | """ 15 | 16 | print(f"Create: RedisHistoryProviderAsync") 17 | self.store = store 18 | self.key_prefix = key_prefix 19 | self.ttl = ttl 20 | 21 | def get_key(self, sessionId: str): 22 | return f"{self.key_prefix}:{sessionId}" 23 | 24 | async def append_to_history(self, sessionId: str, entry, metadata=None, ttl=None): 25 | key = self.get_key(sessionId) 26 | value = json.dumps(entry) 27 | await self.store.list_append(key, value, self.ttl or ttl) 28 | 29 | 30 | async def get_history(self, sessionId: str): 31 | key = self.get_key(sessionId) 32 | values = await self.store.list_get(key) 33 | res = [json.loads(v) for v in values] 34 | # remove None elements 35 | res = [r for r in res if r] 36 | return res 37 | 38 | 39 | async def clear_history(self, sessionId: str, keep_last_messages=None): 40 | key = self.get_key(sessionId) 41 | 42 | if keep_last_messages: 43 | # self.client.ltrim(key, 0, keep_last_messages) 44 | msgs = await self.store.list_get_last(key, keep_last_messages) 45 | await self.store.list_clear(key) 46 | for msg in msgs: 47 | await self.store.list_append(key, msg) 48 | 49 | else: 50 | await self.store.list_clear(key) 51 | 52 | 53 | async def get_last_messages(self, sessionId: str, count): 54 | key = self.get_key(sessionId) 55 | # history = self.client.lrange(key, -count, -1) 56 | history = await self.store.list_get_last(key, count) 57 | return [json.loads(h) for h in history] 58 | 59 | async def close_conversation(self, sessionId: str): 60 | raise NotImplementedError("Method not implemented.") 61 | 62 | 63 | -------------------------------------------------------------------------------- /cel/stores/state/base_state_provider.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class BaseChatStateProvider(ABC): 5 | @abstractmethod 6 | def get_key(self, sessionId: str): 7 | pass 8 | 9 | @abstractmethod 10 | def set_key_value(self, sessionId: str, key, value, ttl_in_seconds=None): 11 | pass 12 | 13 | @abstractmethod 14 | def get_key_value(self, sessionId: str, key): 15 | pass 16 | 17 | @abstractmethod 18 | def clear_store(self, sessionId: str): 19 | pass 20 | 21 | @abstractmethod 22 | def clear_all_stores(self, key_pattern: str = None): 23 | pass 24 | 25 | @abstractmethod 26 | def get_store(self, sessionId: str): 27 | pass 28 | 29 | @abstractmethod 30 | def set_store(self, sessionId: str, store, ttl=None): 31 | pass 32 | -------------------------------------------------------------------------------- /cel/stores/state/state_inmemory_provider.py: -------------------------------------------------------------------------------- 1 | from cel.stores.state.base_state_provider import BaseChatStateProvider 2 | from loguru import logger as log 3 | 4 | class InMemoryStateProvider(BaseChatStateProvider): 5 | 6 | def __init__(self, key_prefix: str = "s"): 7 | super().__init__() 8 | log.warning(f"Create InMemoryStateProvider - Avoid using this in production.") 9 | self.store = {} 10 | self.prefix = key_prefix 11 | 12 | def get_key(self, sessionId): 13 | return f"{self.prefix}:{sessionId}" 14 | 15 | async def set_key_value(self, sessionId: str, key: str, value, ttl_in_seconds=None): 16 | hash_key = self.get_key(sessionId) 17 | if hash_key not in self.store: 18 | self.store[hash_key] = {} 19 | self.store[hash_key][key] = value 20 | 21 | async def get_key_value(self, sessionId: str, key: str): 22 | hash_key = self.get_key(sessionId) 23 | if hash_key in self.store and key in self.store[hash_key]: 24 | return self.store[hash_key][key] 25 | return None 26 | 27 | async def clear_store(self, sessionId: str): 28 | hash_key = self.get_key(sessionId) 29 | if hash_key in self.store: 30 | del self.store[hash_key] 31 | 32 | async def clear_all_stores(self): 33 | self.store = {} 34 | 35 | async def get_store(self, sessionId: str): 36 | hash_key = self.get_key(sessionId) 37 | if hash_key in self.store: 38 | return self.store[hash_key] 39 | return None 40 | 41 | async def set_store(self, sessionId: str, store, ttl=None): 42 | hash_key = self.get_key(sessionId) 43 | self.store[hash_key] = store -------------------------------------------------------------------------------- /cel/stores/state/state_redis_provider.py: -------------------------------------------------------------------------------- 1 | import json 2 | from redis import Redis 3 | from loguru import logger as log 4 | from cel.stores.state.base_state_provider import BaseChatStateProvider 5 | 6 | class RedisChatStateProvider(BaseChatStateProvider): 7 | 8 | def __init__(self, redis: str | Redis, key_prefix: str = "s"): 9 | super().__init__() 10 | log.debug("Create: RedisChatStateProvider") 11 | self.client = redis if isinstance(redis, Redis) else Redis.from_url(redis) 12 | self.prefix = key_prefix 13 | 14 | def get_key(self, sessionId): 15 | return f"{self.prefix}:{sessionId}" 16 | 17 | async def set_key_value(self, sessionId: str, key: str, value, ttl_in_seconds=None): 18 | hash_key = self.get_key(sessionId) 19 | self.client.hset(hash_key, key, json.dumps(value)) 20 | if ttl_in_seconds: 21 | self.client.expire(hash_key, ttl_in_seconds) 22 | 23 | async def get_key_value(self, sessionId: str, key: str): 24 | hash_key = self.get_key(sessionId) 25 | value = self.client.hget(hash_key, key) 26 | if not value: 27 | return None 28 | return json.loads(value) 29 | 30 | async def clear_store(self, sessionId: str): 31 | hash_key = self.get_key(sessionId) 32 | self.client.delete(hash_key) 33 | 34 | async def clear_all_stores(self): 35 | hash_key = self.get_key("*") 36 | keys = self.client.keys(hash_key) 37 | if not keys: 38 | return 39 | for key in keys: 40 | self.client.delete(key) 41 | 42 | async def get_store(self, sessionId: str): 43 | hash_key = self.get_key(sessionId) 44 | store = self.client.hgetall(hash_key) 45 | if not store: 46 | return None 47 | s = {k.decode('utf-8'): json.loads(store[k]) for k in store} 48 | return s 49 | 50 | 51 | async def set_store(self, sessionId: str, store, ttl=None): 52 | hash_key = self.get_key(sessionId) 53 | for key in store: 54 | self.client.hset(hash_key, key, json.dumps(store[key])) 55 | if ttl: 56 | self.client.expire(hash_key, ttl) 57 | 58 | 59 | -------------------------------------------------------------------------------- /cel/voice/base_voice_provider.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | class BaseVoiceProvider(ABC): 5 | 6 | @abstractmethod 7 | def TTS(self, text: str, voice: str = None, settings: Any = None): 8 | raise NotImplementedError 9 | 10 | @abstractmethod 11 | def STT(self, audio: bytes | str) -> str: 12 | raise NotImplementedError -------------------------------------------------------------------------------- /cel/voice/deepgram_adapter.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from typing import Any 3 | from deepgram import DeepgramClient, PrerecordedOptions, ClientOptionsFromEnv 4 | from cel.voice.base_voice_provider import BaseVoiceProvider 5 | 6 | 7 | 8 | class DeepgramAdapter(BaseVoiceProvider): 9 | def __init__(self, 10 | smart_format:bool = True, 11 | detect_language: bool = True, 12 | model: str = None, 13 | **kwargs): 14 | 15 | self.deepgram = DeepgramClient("", ClientOptionsFromEnv()) 16 | 17 | self.options = PrerecordedOptions( 18 | model=model or "nova-2-general", 19 | smart_format=smart_format or True, 20 | detect_language=detect_language or True, 21 | **kwargs 22 | ) 23 | 24 | async def STT(self, audio: bytes | str ) -> str: 25 | if isinstance(audio, bytes): 26 | payload = {"buffer": audio} 27 | response = await self.deepgram.listen.asyncprerecorded.v("1").transcribe_file(audio, self.options) 28 | if isinstance(audio, str): 29 | payload = {"url": audio} 30 | response = await self.deepgram.listen.asyncprerecorded.v("1").transcribe_url(payload, self.options) 31 | 32 | transcript = response["results"]["channels"][0]["alternatives"][0]["transcript"] 33 | return transcript 34 | 35 | async def TTS(self, text: str, voice: str = None, settings: Any = None): 36 | raise NotImplementedError 37 | 38 | -------------------------------------------------------------------------------- /docs/assets/celia_fastapi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/docs/assets/celia_fastapi.png -------------------------------------------------------------------------------- /docs/assets/celia_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/docs/assets/celia_logo.png -------------------------------------------------------------------------------- /docs/assets/celia_overview1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/docs/assets/celia_overview1.png -------------------------------------------------------------------------------- /docs/assets/celia_stream_modes1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cel-ai/celai/66b05d285d7f9a5b9833f2b36f8b44e7d2007ead/docs/assets/celia_stream_modes1.png -------------------------------------------------------------------------------- /docs/connectors/index.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | Welcome to the Cel.ai Connectors documentation. This page provides an overview of the connectors supported by Cel.ai and instructions on how to create custom connectors. 4 | 5 | ## What Are Connectors? 6 | 7 | Cel.ai is a Python framework designed to accelerate the development of omnichannel virtual assistants. 8 | Connectors in Cel.ai are responsible for translating back and forth between the platform-specific message format and Cel.ai's agnostic message format. 9 | So connectors are a decpoupling layer between the messaging platform and Cel.ai's message format. 10 | 11 | Each connector is responsible for handling the specifics of a particular messaging platform, and register in Message Gateway the required endpoints to receive messages from the platform. 12 | 13 | 14 | ## Webhook Overview 15 | 16 | Message Gateway is the core component of Cel.ai that handles the communication between the assistant and the connectors. It is responsible for processing incoming messages, invoking the assistant, and sending responses back to the connectors. 17 | 18 | Message Gateway runs a single FastAPI server that listens for incoming messages for all registered connectors. When a connector registers into the gateway, it provides the routes that the gateway should listen to for incoming messages. 19 | 20 |

21 | 22 |

23 | 24 | So in order to receive messages from a messaging platform, you need a public URL that the platform can send messages to. This is where a webhook comes into play. Usually messaging platforms require a public HTTPS endpoint to send messages to your assistant. 25 | 26 | You can use tools like ngrok to create a secure tunnel to your local server, providing a public URL that can be used to receive webhooks and other HTTP requests from external services. Take a look at the [Webhook URL with ngrok](./webhook_url.md) guide to learn how to set up ngrok and expose your local server to the internet securely. 27 | 28 | Some users have reported that they have been able to use [pinggy.io](https://pinggy.io/) to create a public URL for their local server. You can try it out and see if it works for you. 29 | 30 | ???+ warning "Whatsapp and ngrok" 31 | 32 | Whatsapp may not work with **ngrok** free tier. Today 24 June 2024 only works with **ngrok paid tier**. 33 | 34 | 35 | ## Supported Connectors 36 | 37 | Cel.ai comes with out-of-the-box support for the following connectors: 38 | 39 | - **WhatsApp** 40 | - **Telegram** 41 | - **VAPI.com** 42 | - **CLI** 43 | -------------------------------------------------------------------------------- /examples/10_mongo_atlas_rag/qa.md: -------------------------------------------------------------------------------- 1 | # Smoothy Inc. - Q&A 2 | 3 | ## About Smoothy Inc. 4 | 5 | ### Q: What is Smoothy Inc.?** 6 | Smoothy Inc. is a company that specializes in creating smoothies in food trucks. We have been in business for over 10 years and have served thousands of customers. 7 | 8 | ### Q: What makes Smoothy Inc. unique?** 9 | Our smoothies are made with fresh fruits and vegetables and are a great way to get your daily dose of vitamins and minerals. 10 | 11 | ## Products and Ingredients 12 | 13 | ### Q: What ingredients do you use in your smoothies?** 14 | We use fresh fruits and vegetables to ensure that our smoothies are both delicious and nutritious. 15 | 16 | ### Q: Are your smoothies organic?** 17 | While not all of our ingredients are certified organic, we prioritize sourcing high-quality, fresh produce. 18 | 19 | ### Q: Do you offer any vegan or gluten-free options?** 20 | Yes, we offer a variety of vegan and gluten-free smoothies to cater to different dietary needs. 21 | 22 | ## Locations and Services 23 | 24 | ### Q: Where can I find Smoothy Inc. food trucks?** 25 | Our food trucks are located in various locations. You can check our website or social media pages for the latest updates on our locations. 26 | 27 | ### Q: Do you offer catering services?** 28 | Yes, we offer catering services for events. Please contact us for more details and to make arrangements. 29 | 30 | ## Health and Nutrition 31 | 32 | ### Q: Are your smoothies healthy?** 33 | Yes, our smoothies are made with fresh fruits and vegetables, providing a great source of vitamins and minerals. 34 | 35 | ### Q: Can I customize my smoothie?** 36 | Absolutely! You can customize your smoothie by choosing from our selection of fresh ingredients. 37 | 38 | ## Customer Experience 39 | 40 | ### Q: How can I provide feedback about my experience?** 41 | We value your feedback! You can provide feedback through our website or social media pages. 42 | 43 | ### Q: Do you have a loyalty program?** 44 | Yes, we offer a loyalty program for our regular customers. Please ask our staff for more details. 45 | 46 | ## Contact Information 47 | 48 | ### Q: How can I contact Smoothy Inc.?** 49 | You can contact us through our website, social media pages, or by visiting one of our food trucks. 50 | 51 | ### Q: Do you have a customer service phone number?** 52 | Yes, our customer service phone number is available on our website. 53 | -------------------------------------------------------------------------------- /examples/13_agentic_router_experimental/balance_agent.py: -------------------------------------------------------------------------------- 1 | 2 | # Import Cel.ai modules 3 | from cel.assistants.macaw.macaw_assistant import MacawAssistant 4 | from cel.prompt.prompt_template import PromptTemplate 5 | from cel.rag.providers.markdown_rag import MarkdownRAG 6 | from cel.assistants.function_context import FunctionContext 7 | from cel.assistants.function_response import RequestMode 8 | from cel.assistants.common import Param 9 | from cel.stores.history.base_history_provider import BaseHistoryProvider 10 | from cel.stores.state.base_state_provider import BaseChatStateProvider 11 | 12 | 13 | def build_balance_agent(base_prompt: str = ''): 14 | 15 | # Setup prompt 16 | prompt = base_prompt + """You are a virtual assistant specializing in banking balance enquiries. 17 | Your goal is to provide clients with accurate information about the balance of their bank accounts such as savings and checking accounts. 18 | You answer questions like 'What is my current balance?' or 'How much money do I have in my savings account?'. 19 | Today is {date} 20 | """ 21 | 22 | prompt_template = PromptTemplate(prompt) 23 | 24 | ast = MacawAssistant( 25 | # For observability purposes, it is recommended to provide a name to the assistant 26 | name="Balance Agent", 27 | 28 | # This is the description of the assistant, it will be used by AssistantRouter 29 | # to match the assistant with the user intent 30 | description="""You are a virtual assistant specializing in balance inquiries. 31 | Your goal is to provide clients with accurate information about the balance of 32 | their bank accounts such as savings and checking accounts.""", 33 | 34 | prompt=prompt_template, 35 | ) 36 | 37 | # TODO: Add RAG here 38 | 39 | # TODO: Event handling 40 | 41 | # TODO: Add Tooling here 42 | 43 | return ast -------------------------------------------------------------------------------- /examples/13_agentic_router_experimental/transfer_agent.py: -------------------------------------------------------------------------------- 1 | 2 | # Import Cel.ai modules 3 | from cel.assistants.macaw.macaw_assistant import MacawAssistant 4 | from cel.prompt.prompt_template import PromptTemplate 5 | from cel.rag.providers.markdown_rag import MarkdownRAG 6 | from cel.assistants.function_context import FunctionContext 7 | from cel.assistants.function_response import RequestMode 8 | from cel.assistants.common import Param 9 | from cel.stores.history.base_history_provider import BaseHistoryProvider 10 | from cel.stores.state.base_state_provider import BaseChatStateProvider 11 | 12 | 13 | def build_transfer_agent(base_prompt: str = ''): 14 | # For matching and observability purposes, its required to provide a name to the assistant 15 | name = "Transfer Agent" 16 | 17 | # This is the description of the assistant, it will be used only in semantic routers 18 | # Use name for AgenticRouter 19 | description="""You are a virtual assistant specializing in bank transfers. 20 | Your goal is to help customers transfer money between their accounts or to third-party accounts.""", 21 | 22 | # Setup prompt 23 | prompt = base_prompt + """You are a virtual assistant specializing in bank transfers. 24 | Your goal is to help customers transfer money between their accounts or to third-party accounts. 25 | Answer questions like 'How can I transfer money?' or 'I want to send $100 to John Doe's account.'. 26 | Today is {date} 27 | """ 28 | 29 | ast = MacawAssistant( 30 | name=name, 31 | description=description, 32 | prompt=PromptTemplate(prompt) 33 | ) 34 | 35 | # ---------------------------------------------------------------------- 36 | # TODO: Add RAG here 37 | # TODO: Add Tooling here 38 | # ---------------------------------------------------------------------- 39 | 40 | 41 | return ast -------------------------------------------------------------------------------- /examples/14_logic_router_experimental/balance_agent.py: -------------------------------------------------------------------------------- 1 | 2 | # Import Cel.ai modules 3 | from cel.assistants.macaw.macaw_assistant import MacawAssistant 4 | from cel.prompt.prompt_template import PromptTemplate 5 | from cel.rag.providers.markdown_rag import MarkdownRAG 6 | from cel.assistants.function_context import FunctionContext 7 | from cel.assistants.function_response import RequestMode 8 | from cel.assistants.common import Param 9 | from cel.stores.history.base_history_provider import BaseHistoryProvider 10 | from cel.stores.state.base_state_provider import BaseChatStateProvider 11 | 12 | 13 | def build_balance_agent(base_prompt: str = ''): 14 | 15 | # Setup prompt 16 | prompt = base_prompt + """You are a virtual assistant specializing in banking balance enquiries. 17 | Your goal is to provide clients with accurate information about the balance of their bank accounts such as savings and checking accounts. 18 | You answer questions like 'What is my current balance?' or 'How much money do I have in my savings account?'. 19 | Today is {date} 20 | 21 | User accounts: 22 | { 23 | "savings": 1000, 24 | "checking": 500 25 | } 26 | 27 | """ 28 | 29 | prompt_template = PromptTemplate(prompt) 30 | 31 | ast = MacawAssistant( 32 | # For observability purposes, it is recommended to provide a name to the assistant 33 | name="Balance Agent", 34 | 35 | # This is the description of the assistant, it will be used by AssistantRouter 36 | # to match the assistant with the user intent 37 | description="""You are a virtual assistant specializing in balance inquiries. 38 | Your goal is to provide clients with accurate information about the balance of 39 | their bank accounts such as savings and checking accounts.""", 40 | 41 | prompt=prompt_template, 42 | ) 43 | 44 | # TODO: Add RAG here 45 | 46 | # TODO: Add Tooling here 47 | 48 | return ast -------------------------------------------------------------------------------- /examples/14_logic_router_experimental/onboarding.py: -------------------------------------------------------------------------------- 1 | 2 | # Import Cel.ai modules 3 | from cel.assistants.macaw.macaw_assistant import MacawAssistant 4 | from cel.prompt.prompt_template import PromptTemplate 5 | from cel.rag.providers.markdown_rag import MarkdownRAG 6 | from cel.assistants.function_context import FunctionContext 7 | from cel.assistants.function_response import RequestMode 8 | from cel.assistants.common import Param 9 | from cel.stores.history.base_history_provider import BaseHistoryProvider 10 | from cel.stores.state.base_state_provider import BaseChatStateProvider 11 | 12 | 13 | def build_onboarding_agent(base_prompt: str = ''): 14 | 15 | # Setup prompt 16 | prompt = base_prompt + """You are a virtual assistant specializing in banking balance enquiries. 17 | Your goal is help clients in the onboarding process to Foobar Bank. 18 | Your target is to get the client registered. Invite the client to register on welcoming. 19 | The user is not registered yet. 20 | Today is {date} 21 | """ 22 | 23 | prompt_template = PromptTemplate(prompt) 24 | 25 | ast = MacawAssistant( 26 | # For observability purposes, it is recommended to provide a name to the assistant 27 | name="Onboarding Agent", 28 | 29 | # This is the description of the assistant, it will be used by AssistantRouter 30 | # to match the assistant with the user intent 31 | description="""You are a virtual assistant specializing in onboarding clients to Foobar Bank.""", 32 | 33 | prompt=prompt_template, 34 | ) 35 | 36 | 37 | @ast.function('register_user', 'If the user wants to continue with the registration process', params=[ 38 | Param('user_full_name', 'string', 'Full name of the user', required=True) 39 | ]) 40 | async def handle_registration(session, params, ctx: FunctionContext): 41 | # Change the state of the user to registered 42 | state = await ctx.state.get_store(session) or {} 43 | state["is_registered"] = True 44 | await ctx.state.set_store(session, state) 45 | 46 | # Response to user with success message 47 | name = params.get('user_full_name') 48 | return FunctionContext.response_text(f"Welcome {name}! You are now registered with Foobar Bank.") 49 | 50 | 51 | # TODO: Add RAG here 52 | 53 | # TODO: Add Tooling here 54 | 55 | return ast -------------------------------------------------------------------------------- /examples/2_qa_rag/qa.md: -------------------------------------------------------------------------------- 1 | # Smoothy Inc. - Q&A 2 | 3 | ## About Smoothy Inc. 4 | 5 | ### Q: What is Smoothy Inc.?** 6 | Smoothy Inc. is a company that specializes in creating smoothies in food trucks. We have been in business for over 10 years and have served thousands of customers. 7 | 8 | ### Q: What makes Smoothy Inc. unique?** 9 | Our smoothies are made with fresh fruits and vegetables and are a great way to get your daily dose of vitamins and minerals. 10 | 11 | ## Products and Ingredients 12 | 13 | ### Q: What ingredients do you use in your smoothies?** 14 | We use fresh fruits and vegetables to ensure that our smoothies are both delicious and nutritious. 15 | 16 | ### Q: Are your smoothies organic?** 17 | While not all of our ingredients are certified organic, we prioritize sourcing high-quality, fresh produce. 18 | 19 | ### Q: Do you offer any vegan or gluten-free options?** 20 | Yes, we offer a variety of vegan and gluten-free smoothies to cater to different dietary needs. 21 | 22 | ## Locations and Services 23 | 24 | ### Q: Where can I find Smoothy Inc. food trucks?** 25 | Our food trucks are located in various locations. You can check our website or social media pages for the latest updates on our locations. 26 | 27 | ### Q: Do you offer catering services?** 28 | Yes, we offer catering services for events. Please contact us for more details and to make arrangements. 29 | 30 | ## Health and Nutrition 31 | 32 | ### Q: Are your smoothies healthy?** 33 | Yes, our smoothies are made with fresh fruits and vegetables, providing a great source of vitamins and minerals. 34 | 35 | ### Q: Can I customize my smoothie?** 36 | Absolutely! You can customize your smoothie by choosing from our selection of fresh ingredients. 37 | 38 | ## Customer Experience 39 | 40 | ### Q: How can I provide feedback about my experience?** 41 | We value your feedback! You can provide feedback through our website or social media pages. 42 | 43 | ### Q: Do you have a loyalty program?** 44 | Yes, we offer a loyalty program for our regular customers. Please ask our staff for more details. 45 | 46 | ## Contact Information 47 | 48 | ### Q: How can I contact Smoothy Inc.?** 49 | You can contact us through our website, social media pages, or by visiting one of our food trucks. 50 | 51 | ### Q: Do you have a customer service phone number?** 52 | Yes, our customer service phone number is available on our website. 53 | -------------------------------------------------------------------------------- /examples/3_1_ollama/utils.py: -------------------------------------------------------------------------------- 1 | import aiohttp 2 | import asyncio 3 | 4 | async def get_crypto_price(crypto_symbol, currency): 5 | url = f"https://api.coingecko.com/api/v3/simple/price?ids={crypto_symbol}&vs_currencies={currency}" 6 | 7 | async with aiohttp.ClientSession() as session: 8 | async with session.get(url) as response: 9 | if response.status == 200: 10 | data = await response.json() 11 | if crypto_symbol in data: 12 | return data[crypto_symbol][currency] 13 | else: 14 | return f"Error: Cryptocurrency '{crypto_symbol}' not found." 15 | else: 16 | return f"Error: Unable to fetch data. Status code: {response.status}" 17 | 18 | if __name__ == "__main__": 19 | # Ejemplo de uso 20 | async def main(): 21 | crypto_symbol = "cardano" 22 | currency = "aud" 23 | price = await get_crypto_price(crypto_symbol, currency) 24 | print(f"El precio actual de {crypto_symbol} en {currency} es: {price}") 25 | 26 | # Ejecutar el ejemplo 27 | asyncio.run(main()) -------------------------------------------------------------------------------- /examples/3_2_tooling_crypto/util.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | def get_crypto_price(asset: str) -> float: 5 | url = f"https://api.coinbase.com/v2/prices/{asset}-USD/spot" 6 | response = requests.get(url) 7 | data = response.json() 8 | price = float(data['data']['amount']) 9 | return price 10 | 11 | 12 | if __name__ == "__main__": 13 | print(get_crypto_price("BTC")) 14 | print(get_crypto_price("ETH")) 15 | print(get_crypto_price("DOGE")) -------------------------------------------------------------------------------- /examples/3_2_tooling_crypto_redis/util.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | def get_crypto_price(asset: str) -> float: 5 | url = f"https://api.coinbase.com/v2/prices/{asset}-USD/spot" 6 | response = requests.get(url) 7 | data = response.json() 8 | price = float(data['data']['amount']) 9 | return price 10 | 11 | 12 | if __name__ == "__main__": 13 | print(get_crypto_price("BTC")) 14 | print(get_crypto_price("ETH")) 15 | print(get_crypto_price("DOGE")) -------------------------------------------------------------------------------- /examples/3_3_tooling_tasks/task_manager.py: -------------------------------------------------------------------------------- 1 | from cel.assistants.state_manager import AsyncStateManager 2 | 3 | 4 | class TaskManager: 5 | def __init__(self, state_mngr: AsyncStateManager): 6 | self.state_mngr = state_mngr 7 | 8 | async def add_task(self, task): 9 | async with self.state_mngr as state: 10 | tasks = state.get("tasks", []) 11 | assert isinstance(tasks, list), "Tasks must be a list" 12 | tasks.append(task) 13 | state["tasks"] = tasks 14 | 15 | async def get_tasks(self): 16 | async with self.state_mngr as state: 17 | return state.get("tasks", []) 18 | 19 | async def clear_tasks(self): 20 | async with self.state_mngr as state: 21 | state["tasks"] = [] 22 | 23 | async def remove_task(self, task): 24 | async with self.state_mngr as state: 25 | tasks = state.get("tasks", []) 26 | assert isinstance(tasks, list), "Tasks must be a list" 27 | tasks.remove(task) 28 | state["tasks"] = tasks -------------------------------------------------------------------------------- /examples/3_clerk_tooling/qa.md: -------------------------------------------------------------------------------- 1 | # Smoothy Inc. - Q&A 2 | 3 | ## About Smoothy Inc. 4 | 5 | ### Q: What is Smoothy Inc.?** 6 | Smoothy Inc. is a company that specializes in creating smoothies in food trucks. We have been in business for over 10 years and have served thousands of customers. 7 | 8 | ### Q: What makes Smoothy Inc. unique?** 9 | Our smoothies are made with fresh fruits and vegetables and are a great way to get your daily dose of vitamins and minerals. 10 | 11 | ## Products and Ingredients 12 | 13 | ### Q: What ingredients do you use in your smoothies?** 14 | We use fresh fruits and vegetables to ensure that our smoothies are both delicious and nutritious. 15 | 16 | ### Q: Are your smoothies organic?** 17 | While not all of our ingredients are certified organic, we prioritize sourcing high-quality, fresh produce. 18 | 19 | ### Q: Do you offer any vegan or gluten-free options?** 20 | Yes, we offer a variety of vegan and gluten-free smoothies to cater to different dietary needs. 21 | 22 | ## Locations and Services 23 | 24 | ### Q: Where can I find Smoothy Inc. food trucks?** 25 | Our food trucks are located in various locations. You can check our website or social media pages for the latest updates on our locations. 26 | 27 | ### Q: Do you offer catering services?** 28 | Yes, we offer catering services for events. Please contact us for more details and to make arrangements. 29 | 30 | ## Health and Nutrition 31 | 32 | ### Q: Are your smoothies healthy?** 33 | Yes, our smoothies are made with fresh fruits and vegetables, providing a great source of vitamins and minerals. 34 | 35 | ### Q: Can I customize my smoothie?** 36 | Absolutely! You can customize your smoothie by choosing from our selection of fresh ingredients. 37 | 38 | ## Customer Experience 39 | 40 | ### Q: How can I provide feedback about my experience?** 41 | We value your feedback! You can provide feedback through our website or social media pages. 42 | 43 | ### Q: Do you have a loyalty program?** 44 | Yes, we offer a loyalty program for our regular customers. Please ask our staff for more details. 45 | 46 | ## Contact Information 47 | 48 | ### Q: How can I contact Smoothy Inc.?** 49 | You can contact us through our website, social media pages, or by visiting one of our food trucks. 50 | 51 | ### Q: Do you have a customer service phone number?** 52 | Yes, our customer service phone number is available on our website. 53 | -------------------------------------------------------------------------------- /examples/6_vapi/qa.md: -------------------------------------------------------------------------------- 1 | # Smoothy Inc. - Q&A 2 | 3 | ## About Smoothy Inc. 4 | 5 | ### Q: What is Smoothy Inc.?** 6 | Smoothy Inc. is a company that specializes in creating smoothies in food trucks. We have been in business for over 10 years and have served thousands of customers. 7 | 8 | ### Q: What makes Smoothy Inc. unique?** 9 | Our smoothies are made with fresh fruits and vegetables and are a great way to get your daily dose of vitamins and minerals. 10 | 11 | ## Products and Ingredients 12 | 13 | ### Q: What ingredients do you use in your smoothies?** 14 | We use fresh fruits and vegetables to ensure that our smoothies are both delicious and nutritious. 15 | 16 | ### Q: Are your smoothies organic?** 17 | While not all of our ingredients are certified organic, we prioritize sourcing high-quality, fresh produce. 18 | 19 | ### Q: Do you offer any vegan or gluten-free options?** 20 | Yes, we offer a variety of vegan and gluten-free smoothies to cater to different dietary needs. 21 | 22 | ## Locations and Services 23 | 24 | ### Q: Where can I find Smoothy Inc. food trucks?** 25 | Our food trucks are located in various locations. You can check our website or social media pages for the latest updates on our locations. 26 | 27 | ### Q: Do you offer catering services?** 28 | Yes, we offer catering services for events. Please contact us for more details and to make arrangements. 29 | 30 | ## Health and Nutrition 31 | 32 | ### Q: Are your smoothies healthy?** 33 | Yes, our smoothies are made with fresh fruits and vegetables, providing a great source of vitamins and minerals. 34 | 35 | ### Q: Can I customize my smoothie?** 36 | Absolutely! You can customize your smoothie by choosing from our selection of fresh ingredients. 37 | 38 | ## Customer Experience 39 | 40 | ### Q: How can I provide feedback about my experience?** 41 | We value your feedback! You can provide feedback through our website or social media pages. 42 | 43 | ### Q: Do you have a loyalty program?** 44 | Yes, we offer a loyalty program for our regular customers. Please ask our staff for more details. 45 | 46 | ## Contact Information 47 | 48 | ### Q: How can I contact Smoothy Inc.?** 49 | You can contact us through our website, social media pages, or by visiting one of our food trucks. 50 | 51 | ### Q: Do you have a customer service phone number?** 52 | Yes, our customer service phone number is available on our website. 53 | -------------------------------------------------------------------------------- /examples/delete_pickle_experiment.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | from cel.gateway.model.conversation_lead import ConversationLead 3 | from cel.connectors.telegram import TelegramLead 4 | from cel.connectors.whatsapp.whatsapp_connector import WhatsappLead 5 | 6 | 7 | # Crear instancia y serializar 8 | lead = TelegramLead("123") 9 | serialized_lead = pickle.dumps(lead) 10 | 11 | # zip compress the serialized lead 12 | compressed_lead = pickle.dumps(lead, protocol=2) 13 | 14 | # convert to string url safe 15 | serialized_lead = serialized_lead.hex() 16 | compressed_lead = compressed_lead.hex() 17 | 18 | # Convertir a bytes 19 | serialized_lead = bytes.fromhex(serialized_lead) 20 | 21 | # Deserializar 22 | lead_instance = pickle.loads(serialized_lead) 23 | print(type(lead_instance)) # -------------------------------------------------------------------------------- /examples/sample.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | This is a sample markdown file. 4 | 5 | ## Section 1 6 | 7 | This is the first section. 8 | 9 | ## Section 2 10 | 11 | This is the second section. 12 | 13 | # Demo Table 14 | 15 | | Name | Age | 16 | | ---- | --- | 17 | | Alice | 20 | 18 | | Bob | 30 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /examples/simple_assistant_cli_cmds.py: -------------------------------------------------------------------------------- 1 | from loguru import logger as log 2 | from cel.assistants.base_assistant import BaseAssistant 3 | from cel.assistants.request_context import RequestContext 4 | from cel.connectors.telegram.telegram_connector import TelegramConnector 5 | 6 | 7 | def register_client_commands(ast: BaseAssistant): 8 | 9 | @ast.client_command("ping") 10 | async def handle_reset(session, ctx: RequestContext, command: str, args: list[str]): 11 | log.critical(f"Got reset command") 12 | await ctx.connector.send_text_message(ctx.lead, "Pong!") 13 | 14 | 15 | @ast.client_command("demo") 16 | async def handle_demo(session, ctx: RequestContext, command: str, args: list[str]): 17 | log.critical(f"Got demo command") 18 | 19 | demo = args[0] if len(args) > 0 else None 20 | if demo == "link": 21 | links = [ 22 | {"text": "Go to Google", "url": "https://www.google.com"}, 23 | {"text": "Go to Facebook", "url": "https://www.facebook.com"} 24 | ] 25 | if ctx.connector.name() == "telegram": 26 | assert isinstance(ctx.connector, TelegramConnector), "Connector must be an instance of TelegramConnector" 27 | await ctx.connector.send_link_message(ctx.lead, text="Please follow this link", links=links) 28 | return RequestContext.cancel_ai_response() 29 | 30 | 31 | if demo == "select": 32 | conn = ctx.connector 33 | num = int(args[1] if len(args) > 1 else 3) 34 | options = [f"Option {i}" for i in range(1, num+1)] 35 | if conn.name() == "telegram": 36 | assert isinstance(conn, TelegramConnector), "Connector must be an instance of TelegramConnector" 37 | await conn.send_select_message(ctx.lead, "Select an option", options=options) 38 | 39 | return RequestContext.response_text("", disable_ai_response=True) 40 | 41 | # help command 42 | await ctx.connector.send_text_message(ctx.lead, "Available demos: link, select") 43 | return RequestContext.cancel_ai_response() -------------------------------------------------------------------------------- /examples/smoothy.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | SMOOTHY INC. is a company that specializes in creating smoothies in food trucks. We have been in business for over 10 years and have served thousands of customers. Our smoothies are made with fresh fruits and vegetables and are a great way to get your daily dose of vitamins and minerals. 4 | 5 | ## Products 6 | 7 | We offer a wide variety of smoothies to choose from, including: 8 | - Strawberry Banana 9 | - Mango Pineapple 10 | - Green Detox 11 | 12 | ## Locations 13 | 14 | We have food trucks located in the following cities: 15 | - New York 16 | - Los Angeles 17 | - Chicago 18 | 19 | # Demo Table 20 | 21 | | Product | Price | size | 22 | | ---- | --- | --- | 23 | | Strawberry Banana | $5 | 16 oz | 24 | | Mango Pineapple | $6 | 16 oz | 25 | | Green Detox | $7 | 16 oz | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: Cel.ai Documentation 2 | site_url: https://mydomain.org/mysite 3 | 4 | # nav: 5 | # - Introduction: index.md 6 | # - Getting Started: getting_started.md 7 | # - Connectors: ./connectors/ 8 | 9 | # theme: 10 | theme: 11 | features: 12 | # - navigation.tabs 13 | - navigation.sections 14 | - toc.integrate 15 | - navigation.top 16 | - search.suggest 17 | - content.tabs.link 18 | - content.code.annotation 19 | # - content.code.highlight 20 | - content.code.copy 21 | language: en 22 | 23 | name: material 24 | logo: assets/celia_logo.png 25 | favicon: assets/celia_logo.png 26 | palette: 27 | # Palette toggle for light mode 28 | - media: "(prefers-color-scheme: dark)" 29 | scheme: slate 30 | # scheme: slate 31 | # toggle: 32 | # icon: material/brightness-7 33 | # name: Switch to dark mode 34 | 35 | # # Palette toggle for dark mode 36 | # - media: "(prefers-color-scheme: dark)" 37 | # scheme: default 38 | # toggle: 39 | # icon: material/brightness-4 40 | # name: Switch to light mode 41 | 42 | markdown_extensions: 43 | - pymdownx.highlight: 44 | use_pygments: true 45 | - pymdownx.superfences 46 | - footnotes 47 | - admonition 48 | - pymdownx.details 49 | 50 | extra: 51 | social: 52 | - icon: fontawesome/brands/github 53 | link: https://github.com/cel-ai/celai 54 | # - icon: fontawesome/brands/twitter 55 | # link: https://twitter.com 56 | 57 | copyright: Copyright © 2024 Cel.ai - All Rights Reserved -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "celai" 3 | version = "0.6.0" 4 | description = "AI Driven Communication Platform. Assistants made easy." 5 | authors = ["Alex Martin "] 6 | readme = "README.md" 7 | classifiers = [ 8 | "Programming Language :: Python :: 3", 9 | "License :: OSI Approved :: MIT License", 10 | "Operating System :: OS Independent", 11 | ] 12 | packages = [ 13 | { include = "cel" } 14 | ] 15 | 16 | [tool.poetry.urls] 17 | Homepage = "https://github.com/cel-ai/celai" 18 | Issues = "https://github.com/cel-ai/celai/issues" 19 | 20 | [tool.poetry.dependencies] 21 | #Runtime 22 | python = "^3.11" 23 | # Dependencies for the project: 24 | redis = ">=4.6.0" 25 | numpy = ">=1.26.4" 26 | loguru = ">=0.7.2" 27 | chardet = ">=5.2.0" 28 | marko = "^2.1.0" 29 | chromadb = ">=0.5.0" 30 | python-dotenv = "^1.0.1" 31 | openai = ">=1.30.1" 32 | diskcache = ">=5.6.3" 33 | openai-responses = ">=0.3.2" 34 | halo = ">=0.0.31" 35 | aioredis = ">=2.0.1" 36 | fakeredis = ">=2.23.2" 37 | pybars3 = ">=0.9.7" 38 | beautifulsoup4 = ">=4.12.3" 39 | lxml = ">=5.2.2" 40 | shortuuid = ">=1.0.13" 41 | dictdiffer = ">=0.9.0" 42 | pysbd = ">=0.3.4" 43 | deepgram-sdk = ">=3.2.7" 44 | geopy = ">=2.4.1" 45 | langchain = ">=0.2.0" 46 | langchain-chroma = ">=0.1.1" 47 | langchain-community = ">=0.2.1" 48 | langchain-openai = ">=0.1.8" 49 | aiogram = ">=3.6.0" 50 | ollama = ">=0.3.1" 51 | qrcode = ">=7.4.2" 52 | pymongo = ">=4.8.0" 53 | together = ">=1.2.12" 54 | elevenlabs = ">=1.9.0" 55 | PyJWT = ">=2.10.1" 56 | cryptography = ">=44.0.0" 57 | pywa = ">=2.7.0" 58 | 59 | [tool.poetry.group.dev.dependencies] 60 | pytest = ">=8.2.0" 61 | pytest-asyncio = ">=0.23.7" 62 | 63 | [build-system] 64 | requires = ["poetry-core"] 65 | build-backend = "poetry.core.masonry.api" 66 | 67 | [tool.pytest.ini_options] 68 | pythonpath = [ 69 | "." 70 | ] -------------------------------------------------------------------------------- /tests/assistants/macaw/macaw_blend_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | from cel.assistants.macaw.macaw_assistant import MacawAssistant 4 | from cel.assistants.macaw.macaw_history_adapter import MacawHistoryAdapter 5 | from cel.assistants.macaw.macaw_inference_context import MacawNlpInferenceContext 6 | from cel.assistants.macaw.macaw_nlp import blend_message 7 | from cel.assistants.macaw.macaw_settings import MacawSettings 8 | from cel.gateway.model.conversation_lead import ConversationLead 9 | from cel.stores.history.history_inmemory_provider import InMemoryHistoryProvider 10 | from cel.stores.state.state_inmemory_provider import InMemoryStateProvider 11 | from langchain_core.messages import HumanMessage 12 | 13 | is_openai_available = 'OPENAI_API_KEY' not in os.environ 14 | 15 | @pytest.mark.asyncio 16 | @pytest.mark.skipif(is_openai_available, reason="Disable in Github Actions") 17 | async def test_blend_message(): 18 | 19 | ctx = MacawNlpInferenceContext( 20 | lead = ConversationLead(), 21 | prompt="Your are a helpful assistant that can get the current price of a cryptocurrency. Get the price of a cryptocurrency.", 22 | history_store=InMemoryHistoryProvider(), 23 | state_store=InMemoryStateProvider(), 24 | settings=MacawSettings() 25 | ) 26 | 27 | history_adapter = MacawHistoryAdapter(ctx.history_store) 28 | 29 | await history_adapter.append_to_history(ctx.lead, HumanMessage("Hola! dime por favor cuento es 2+2?")) 30 | 31 | res = await blend_message(ctx, message="You have a 10% discount on all products.") 32 | 33 | assert res is not None 34 | assert "descuento" in res 35 | assert "10%" in res 36 | 37 | 38 | @pytest.mark.asyncio 39 | @pytest.mark.skipif(is_openai_available, reason="Disable in Github Actions") 40 | async def test_assistant_blend(): 41 | 42 | lead = ConversationLead() 43 | history_store=InMemoryHistoryProvider() 44 | history_adapter = MacawHistoryAdapter(history_store) 45 | await history_adapter.append_to_history(lead, HumanMessage("Hola! dime por favor cuento es 2+2?")) 46 | 47 | ast = MacawAssistant(history_store=history_store) 48 | 49 | res = await ast.blend(lead, text="You have a 10% discount on all products.") 50 | 51 | assert res is not None 52 | assert "descuento" in res 53 | assert "10%" in res 54 | 55 | -------------------------------------------------------------------------------- /tests/assistants/macaw/macaw_history_adapter_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from cel.assistants.macaw.macaw_history_adapter import MacawHistoryAdapter 3 | from cel.assistants.macaw.macaw_utils import map_function_to_tool_message 4 | from cel.gateway.model.conversation_lead import ConversationLead 5 | from cel.stores.history.history_inmemory_provider import InMemoryHistoryProvider 6 | from langchain_core.messages import HumanMessage, AIMessage 7 | 8 | @pytest.mark.asyncio 9 | async def test_macaw_history_store_adapter(): 10 | adapter = MacawHistoryAdapter(store=InMemoryHistoryProvider()) 11 | 12 | lead = ConversationLead() 13 | await adapter.append_to_history(lead, HumanMessage("Hello")) 14 | await adapter.append_to_history(lead, AIMessage("Hi")) 15 | 16 | 17 | history = await adapter.get_history(lead) 18 | assert len(history) == 2 19 | assert isinstance(history[0], HumanMessage), "Expected HumanMessage" 20 | assert isinstance(history[1], AIMessage), "Expected AIMessage" -------------------------------------------------------------------------------- /tests/assistants/macaw/macaw_new_message_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | from cel.assistants.common import FunctionDefinition, Param 4 | from cel.assistants.macaw.macaw_inference_context import MacawNlpInferenceContext 5 | from cel.assistants.macaw.macaw_nlp import process_new_message 6 | from cel.assistants.macaw.macaw_settings import MacawSettings 7 | from cel.gateway.model.conversation_lead import ConversationLead 8 | from cel.prompt.prompt_template import PromptTemplate 9 | from cel.stores.history.history_inmemory_provider import InMemoryHistoryProvider 10 | from cel.stores.state.state_inmemory_provider import InMemoryStateProvider 11 | 12 | is_github_actions = 'OPENAI_API_KEY' not in os.environ 13 | 14 | func1 = FunctionDefinition( 15 | name='get_crypto_price', 16 | description='Get the current price of a cryptocurrency.', 17 | parameters=[ 18 | Param(name='crypto', 19 | type='string', 20 | description='The name of the cryptocurrency ex: BTC, ETH, ADA, etc.', 21 | required=True 22 | ), 23 | Param(name='currency', 24 | type='string', 25 | description='Currency name eg. USD, ARS', 26 | required=False, 27 | enum=['USD', 'ARS'] 28 | ) 29 | ] 30 | ) 31 | 32 | 33 | @pytest.mark.asyncio 34 | @pytest.mark.skipif(is_github_actions, reason="Disable in Github Actions") 35 | async def test_new_message_fail_func(): 36 | 37 | prompt = PromptTemplate( 38 | "Your are a helpful assistant that can get the current price of a cryptocurrency. Get the price of a cryptocurrency." 39 | ) 40 | 41 | 42 | ctx = MacawNlpInferenceContext( 43 | lead = ConversationLead(), 44 | prompt=prompt, 45 | functions=[func1], 46 | history_store=InMemoryHistoryProvider(), 47 | state_store=InMemoryStateProvider(), 48 | settings=MacawSettings() 49 | ) 50 | 51 | async for chunk in process_new_message(ctx, message="What is the price of BTC?"): 52 | assert chunk 53 | print(chunk) 54 | 55 | 56 | assert True -------------------------------------------------------------------------------- /tests/assistants/macaw/macaw_tooling_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | from cel.assistants.common import FunctionDefinition, Param 4 | from cel.assistants.macaw.macaw_utils import map_function_to_tool_message 5 | 6 | 7 | is_github_actions = 'OPENAI_API_KEY' not in os.environ 8 | 9 | 10 | @pytest.mark.asyncio 11 | @pytest.mark.skipif(is_github_actions, reason="Disable in Github Actions") 12 | async def test_map_function_to_tool_message(): 13 | 14 | 15 | func = FunctionDefinition( 16 | name='get_crypto_price', 17 | description='Get the current price of a cryptocurrency.', 18 | parameters=[ 19 | Param(name='crypto', 20 | type='string', 21 | description='The name of the cryptocurrency ex: BTC, ETH, ADA, etc.', 22 | required=True 23 | ), 24 | Param(name='currency', 25 | type='string', 26 | description='Currency name eg. USD, ARS', 27 | required=False, 28 | enum=['USD', 'ARS'] 29 | ) 30 | ] 31 | ) 32 | 33 | spected = { 34 | "type": "function", 35 | "function": { 36 | "name": "get_crypto_price", 37 | "description": "Get the current price of a cryptocurrency.", 38 | "parameters": { 39 | "type": "object", 40 | "properties": { 41 | "crypto": { 42 | "type": "string", 43 | "description": "The name of the cryptocurrency ex: BTC, ETH, ADA, etc." 44 | }, 45 | "currency": { 46 | "type": "string", 47 | "description": "Currency name eg. USD, ARS", 48 | "enum": ["USD", "ARS"] 49 | } 50 | }, 51 | "required": ["crypto"] 52 | } 53 | } 54 | } 55 | 56 | d = map_function_to_tool_message(func) 57 | 58 | 59 | 60 | assert d == spected -------------------------------------------------------------------------------- /tests/assistants/stream_content_chunk_test.py: -------------------------------------------------------------------------------- 1 | # test StreamContentChunk 2 | import pytest 3 | from cel.assistants.stream_content_chunk import StreamContentChunk 4 | 5 | 6 | @pytest.mark.asyncio 7 | async def test_ai_content_chunk(): 8 | a = StreamContentChunk(content="Hello", is_partial=True) 9 | b = StreamContentChunk(content=" World", is_partial=False) 10 | c = a + b 11 | assert c.content == "Hello World" 12 | assert c.is_partial is False -------------------------------------------------------------------------------- /tests/connectors/registry.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from cel.connectors.telegram.telegram_connector import TelegramConnector 3 | from cel.connectors.whatsapp.whatsapp_connector import WhatsappConnector 4 | from cel.gateway.model.base_connector import BaseConnector 5 | 6 | @pytest.mark.asyncio 7 | async def test_registry(): 8 | 9 | conns = BaseConnector.get_all_connectors() 10 | assert len(conns) == 0 11 | 12 | # Create TelegramConnector instance 13 | conn1 = TelegramConnector( 14 | token="123:ASD" 15 | ) 16 | conns = BaseConnector.get_all_connectors() 17 | assert len(conns) == 1 18 | 19 | conn2 = TelegramConnector( 20 | token="321:QWE" 21 | ) 22 | conns = BaseConnector.get_all_connectors() 23 | assert len(conns) == 2 24 | 25 | conn3 = WhatsappConnector( 26 | token="123:ASD", 27 | phone_number_id="123", 28 | verify_token="123" 29 | ) 30 | conns = BaseConnector.get_all_connectors() 31 | assert len(conns) == 3 32 | 33 | # Test get_connector_by_name 34 | conn = BaseConnector.get_connector_by_name(conn2.name()) 35 | 36 | assert conn == conn2 -------------------------------------------------------------------------------- /tests/history/history_in_memory.py: -------------------------------------------------------------------------------- 1 | # import random 2 | import asyncio 3 | import time 4 | import pytest 5 | # import fakeredis.aioredis 6 | from cel.gateway.model.conversation_lead import ConversationLead 7 | from cel.stores.history.history_inmemory_provider import InMemoryHistoryProvider 8 | # from prompter.stores.history.history_redis_provider import RedisHistoryProvider 9 | 10 | 11 | 12 | @pytest.fixture 13 | def lead(): 14 | lead = ConversationLead() 15 | return lead.get_session_id() 16 | 17 | 18 | @pytest.fixture 19 | def history() -> InMemoryHistoryProvider: 20 | h = InMemoryHistoryProvider(key_prefix='test') 21 | return h 22 | 23 | @pytest.mark.asyncio 24 | async def test_get_key(history: InMemoryHistoryProvider, lead): 25 | await history.append_to_history(lead, {'message': 'test'}) 26 | l = await history.get_history(lead) 27 | assert l == [{'message': 'test'}] 28 | 29 | @pytest.mark.asyncio 30 | async def test_append_to_history(history: InMemoryHistoryProvider, lead): 31 | await history.append_to_history(lead, {'message': 'test0'}) 32 | await history.append_to_history(lead, {'message': 'test1'}) 33 | await history.append_to_history(lead, {'message': 'test2'}) 34 | l = await history.get_history(lead) 35 | assert l == [{'message': 'test0'}, {'message': 'test1'}, {'message': 'test2'}] 36 | 37 | @pytest.mark.asyncio 38 | async def test_clear_history(history: InMemoryHistoryProvider, lead): 39 | await history.append_to_history(lead, {'message': 'test0'}) 40 | await history.clear_history(lead) 41 | l = await history.get_history(lead) 42 | assert l == [] 43 | 44 | @pytest.mark.asyncio 45 | async def test_get_last_messages(history: InMemoryHistoryProvider, lead): 46 | await history.append_to_history(lead, {'message': 'test0'}) 47 | await history.append_to_history(lead, {'message': 'test1'}) 48 | await history.append_to_history(lead, {'message': 'test2'}) 49 | l = await history.get_last_messages(lead, 2) 50 | assert l == [{'message': 'test1'}, {'message': 'test2'}] 51 | 52 | # TODO: 53 | # @pytest.mark.asyncio 54 | # async def test_ttl(history: InMemoryHistoryProvider, lead): 55 | # await history.append_to_history(lead, {'message': 'test0'}, ttl=1) 56 | # asyncio.sleep(2) 57 | # l = await history.get_history(lead) 58 | # assert l == [] 59 | 60 | 61 | -------------------------------------------------------------------------------- /tests/history/history_redis.py: -------------------------------------------------------------------------------- 1 | import random 2 | import time 3 | import pytest 4 | import fakeredis.aioredis 5 | from cel.gateway.model.conversation_lead import ConversationLead 6 | from cel.stores.history.history_inmemory_provider import InMemoryHistoryProvider 7 | from cel.stores.history.history_redis_provider import RedisHistoryProvider 8 | 9 | 10 | 11 | @pytest.fixture 12 | def lead(): 13 | lead = ConversationLead() 14 | return lead.get_session_id() 15 | 16 | # @pytest.fixture 17 | # def redis_client(): 18 | # redis_client = fakeredis.FakeRedis() 19 | # return redis_client 20 | 21 | @pytest.fixture 22 | def history() -> InMemoryHistoryProvider: 23 | h = InMemoryHistoryProvider(key_prefix='test') 24 | return h 25 | 26 | 27 | def test_get_key(history: RedisHistoryProvider, lead): 28 | history.append_to_history(lead, {'message': 'test'}) 29 | l = history.get_history(lead) 30 | assert l == [{'message': 'test'}] 31 | 32 | 33 | def test_append_to_history(history: RedisHistoryProvider, lead): 34 | history.append_to_history(lead, {'message': 'test0'}) 35 | history.append_to_history(lead, {'message': 'test1'}) 36 | history.append_to_history(lead, {'message': 'test2'}) 37 | l = history.get_history(lead) 38 | assert l == [{'message': 'test0'}, {'message': 'test1'}, {'message': 'test2'}] 39 | 40 | 41 | def test_clear_history(history: RedisHistoryProvider, lead): 42 | history.append_to_history(lead, {'message': 'test0'}) 43 | history.clear_history(lead) 44 | l = history.get_history(lead) 45 | assert l == [] 46 | 47 | 48 | def test_get_last_messages(history: RedisHistoryProvider, lead): 49 | history.append_to_history(lead, {'message': 'test0'}) 50 | history.append_to_history(lead, {'message': 'test1'}) 51 | history.append_to_history(lead, {'message': 'test2'}) 52 | l = history.get_last_messages(lead, 2) 53 | assert l == [{'message': 'test1'}, {'message': 'test2'}] 54 | 55 | 56 | # def test_ttl(history: RedisHistoryProvider, lead): 57 | # history.append_to_history(lead, {'message': 'test0'}, ttl=1) 58 | # time.sleep(2) 59 | # l = history.get_history(lead) 60 | # assert l == [] 61 | 62 | 63 | -------------------------------------------------------------------------------- /tests/messages/lead/test_lead_serialization.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from cel.connectors.telegram.telegram_connector import TelegramConnector 3 | from cel.gateway.model.conversation_lead import ConversationLead 4 | from cel.connectors.telegram.model.telegram_lead import TelegramLead 5 | from cel.rag.stores.chroma.chroma_store import ChromaStore 6 | from cel.rag.text2vec.cached_openai import CachedOpenAIEmbedding 7 | import dotenv 8 | 9 | dotenv.load_dotenv() 10 | 11 | sample_message = { 12 | "message_id":123, 13 | "from":{ 14 | "id":1320141991, 15 | "is_bot": False, 16 | "first_name":"John Doe", 17 | "username":"john_doe", 18 | "language_code":"en" 19 | }, 20 | "chat":{ 21 | "id":1320141991, 22 | "first_name":"John Doe", 23 | "username":"john_doe", 24 | "type":"private" 25 | }, 26 | "date":1716850049, 27 | "text":"Hello!" 28 | } 29 | 30 | 31 | @pytest.fixture 32 | def connector(): 33 | # Create Telegram Connector 34 | return TelegramConnector(token="123:ASD") 35 | 36 | 37 | def test_lead(connector): 38 | 39 | lead = ConversationLead(connector=connector) 40 | 41 | d = lead.to_dict() 42 | 43 | assert d.get("connector_name") == connector.name() 44 | 45 | 46 | -------------------------------------------------------------------------------- /tests/messages/outgoing/test_factory.py: -------------------------------------------------------------------------------- 1 | from cel.gateway.model.conversation_lead import ConversationLead 2 | from cel.gateway.model.outgoing.outgoing_message_factory import outgoing_message_from_dict 3 | from cel.gateway.model.outgoing.outgoing_message_text import OutgoingTextMessage 4 | import dotenv 5 | 6 | dotenv.load_dotenv() 7 | 8 | 9 | 10 | def test_factory_text(): 11 | 12 | lead = ConversationLead() 13 | sample = { 14 | "lead": lead, 15 | "type": "text", 16 | "content": "message content", 17 | } 18 | 19 | res = outgoing_message_from_dict(sample) 20 | 21 | assert isinstance(res, OutgoingTextMessage) 22 | assert res.type == "text" 23 | assert res.content == "message content" 24 | assert res.lead == lead 25 | 26 | 27 | 28 | def test_factory_select(): 29 | lead = ConversationLead() 30 | sample = { 31 | "lead": lead, 32 | "type": "select", 33 | "content": "message content", 34 | "options": ["option1", "option2"] 35 | } 36 | 37 | res = outgoing_message_from_dict(sample) 38 | 39 | assert res.type == "select" 40 | assert res.content == "message content" 41 | assert res.lead == lead 42 | assert res.options == ["option1", "option2"] 43 | 44 | 45 | def test_factory_link(): 46 | lead = ConversationLead() 47 | sample = { 48 | "lead": lead, 49 | "type": "link", 50 | "content": "message content", 51 | "links": [{"text": "link text", "url": "https://example.com"}] 52 | } 53 | 54 | res = outgoing_message_from_dict(sample) 55 | 56 | assert res.type == "link" 57 | assert res.content == "message content" 58 | assert res.lead == lead 59 | assert res.links[0]["url"] == "https://example.com" -------------------------------------------------------------------------------- /tests/messages/telegram/text_message.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from cel.connectors.telegram.model.telegram_message import TelegramMessage 3 | from cel.gateway.model.conversation_lead import ConversationLead 4 | from cel.connectors.telegram.model.telegram_lead import TelegramLead 5 | import dotenv 6 | 7 | dotenv.load_dotenv() 8 | 9 | sample_message = { 10 | "update_id": 169216955, 11 | "message": { 12 | "message_id":123, 13 | "from":{ 14 | "id":1320141991, 15 | "is_bot": False, 16 | "first_name":"John Doe", 17 | "username":"john_doe", 18 | "language_code":"en" 19 | }, 20 | "chat":{ 21 | "id":1320141991, 22 | "first_name":"John Doe", 23 | "username":"john_doe", 24 | "type":"private" 25 | }, 26 | "date":1716850049, 27 | "text":"Hello!" 28 | } 29 | } 30 | 31 | 32 | # @pytest.fixture 33 | # def fix(): 34 | # pass 35 | 36 | @pytest.mark.asyncio 37 | async def test_parse_message(): 38 | 39 | msg: TelegramMessage = await TelegramMessage.load_from_message(sample_message) 40 | 41 | assert isinstance(msg.lead, TelegramLead) 42 | assert isinstance(msg.lead, ConversationLead) 43 | 44 | assert msg.lead.chat_id == '1320141991' 45 | assert msg.lead.metadata['message_id'] == '123' 46 | assert msg.lead.metadata['date'] == 1716850049 47 | assert msg.lead.metadata['raw'] == sample_message['message'] 48 | 49 | assert msg.text == "Hello!" 50 | assert msg.date == 1716850049 51 | assert msg.metadata == {'raw': sample_message['message']} 52 | 53 | -------------------------------------------------------------------------------- /tests/messages/telegram/voice_message.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | import requests 4 | from cel.connectors.telegram.model.telegram_attachment import TelegramAttachment 5 | from cel.connectors.telegram.model.telegram_message import TelegramMessage 6 | from cel.gateway.model.conversation_lead import ConversationLead 7 | from cel.connectors.telegram.model.telegram_lead import TelegramLead 8 | import dotenv 9 | 10 | dotenv.load_dotenv() 11 | 12 | TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN") 13 | 14 | sample_message = { 15 | "update_id": 794947863, 16 | "message": { 17 | "message_id": 647, 18 | "from": { 19 | "id": 1320141, 20 | "is_bot": False, 21 | "first_name": "John", 22 | "username": "foobar", 23 | "language_code": "en" 24 | }, 25 | "chat": { 26 | "id": 1320141, 27 | "first_name": "John", 28 | "username": "foobar", 29 | "type": "private" 30 | }, 31 | "date": 1717182488, 32 | "voice": { 33 | "duration": 2, 34 | "mime_type": "audio/ogg", 35 | "file_id": "AwACAgEAAxkBAAICh2ZaIBgQeMjTu4_DOlmioRlXy6PGAAKeBQACWnjQRhHL8mHBhl-FNQQ", 36 | "file_unique_id": "AgADngUAAlp40EY", 37 | "file_size": 8537 38 | } 39 | } 40 | } 41 | 42 | # @pytest.fixture 43 | # def fix(): 44 | # pass 45 | 46 | @pytest.mark.asyncio 47 | async def test_parse_attachment(): 48 | 49 | attach = await TelegramAttachment.load_from_message(sample_message, TELEGRAM_TOKEN) 50 | 51 | assert attach.title == "audio" 52 | # assert attach.file_url == "https://es.wikipedia.org/static/images/icons/wikipedia.png" 53 | # http download file using attach.file_url 54 | 55 | response = requests.get(attach.file_url) 56 | 57 | # Asegúrate de que la solicitud fue exitosa 58 | assert response.status_code == 200 59 | assert attach.type == "voice" 60 | 61 | 62 | 63 | 64 | @pytest.mark.asyncio 65 | async def test_parse_message_with_image(): 66 | 67 | msg = await TelegramMessage.load_from_message(sample_message, TELEGRAM_TOKEN) 68 | 69 | assert isinstance(msg.lead, TelegramLead) 70 | assert isinstance(msg.lead, ConversationLead) 71 | 72 | assert msg.lead.chat_id == '1320141' 73 | assert msg.lead.metadata['message_id'] == '647' 74 | assert msg.lead.metadata['date'] == 1717182488 75 | assert msg.lead.metadata['raw'] == sample_message['message'] 76 | 77 | # assert msg.text == "This is an image 😍" 78 | assert msg.date == 1717182488 79 | assert msg.metadata == {'raw': sample_message['message']} 80 | assert msg.is_voice_message() == True 81 | 82 | -------------------------------------------------------------------------------- /tests/messages/vapi/message.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from cel.connectors.telegram.model.telegram_message import TelegramMessage 3 | from cel.connectors.vapi.model.vapi_lead import VAPILead 4 | from cel.connectors.vapi.model.vapi_message import VAPIMessage 5 | from cel.connectors.vapi.vapi_connector import VAPIConnector 6 | from cel.gateway.model.conversation_lead import ConversationLead 7 | from cel.connectors.telegram.model.telegram_lead import TelegramLead 8 | import dotenv 9 | 10 | dotenv.load_dotenv() 11 | 12 | request = { 13 | "model": "gpt-3.5-turbo", 14 | "messages": [ 15 | { 16 | "role": "system", 17 | "content": "This is a blank template with minimal defaults, you can change the model, temperature, and messages." 18 | }, 19 | { 20 | "role": "assistant", 21 | "content": "Hi. My name is John." 22 | }, 23 | { 24 | "role": "user", 25 | "content": "Hi." 26 | } 27 | ], 28 | "temperature": 0.7, 29 | "stream": True, 30 | "max_tokens": 250, 31 | "call": { 32 | "id": "c7719e5c-ea98-40e1-b1dc-66131da31532", 33 | "orgId": "2ac97024-f9e9-425e-a846-ce5e2e3540f1", 34 | "createdAt": "2024-07-02T05:29:55.903Z", 35 | "updatedAt": "2024-07-02T05:29:55.903Z", 36 | "type": "webCall", 37 | "status": "queued", 38 | "assistantId": "1d9d46ba-618e-4867-8797-5a8dc2f9f42x", 39 | "webCallUrl": "https://vapi.daily.co/E3pM5r6l7Q82gThElS7" 40 | }, 41 | "metadata": {} 42 | } 43 | 44 | @pytest.fixture 45 | def connector(): 46 | # Create Connector 47 | return VAPIConnector() 48 | 49 | 50 | @pytest.mark.asyncio 51 | async def test_parse_message(): 52 | 53 | msg: VAPIMessage = await VAPIMessage.load_from_message(request) 54 | 55 | assert isinstance(msg.lead, VAPILead) 56 | assert isinstance(msg.lead, ConversationLead) 57 | 58 | assert msg.lead.get_session_id() == f"vapi:{request['call']['id']}" 59 | assert msg.lead.call_object == request['call'] 60 | 61 | assert msg.text == "Hi." 62 | assert msg.date is not None 63 | 64 | 65 | @pytest.mark.asyncio 66 | async def test_parse_message_lead_connector(connector): 67 | 68 | msg: VAPIMessage = await VAPIMessage.load_from_message(request, connector=connector) 69 | 70 | assert isinstance(msg.lead, VAPILead) 71 | assert isinstance(msg.lead, ConversationLead) 72 | 73 | assert msg.lead.connector_name == connector.name() 74 | assert msg.lead.connector == connector -------------------------------------------------------------------------------- /tests/middlewares/in_mem_blacklist_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from cel.connectors.telegram.model.telegram_lead import TelegramLead 4 | from cel.gateway.model.message import Message 5 | from cel.middlewares.in_mem_blacklist import InMemBlackListMiddleware 6 | 7 | class MockMessage(Message): 8 | def __init__(self, lead): 9 | self.lead = lead 10 | 11 | def is_voice_message(self): 12 | """This method should be implemented by the subclass to check if the message is a voice message""" 13 | return False 14 | 15 | @classmethod 16 | def load_from_dict(cls, message_dict: dict): 17 | pass 18 | 19 | 20 | @pytest.mark.asyncio 21 | async def test_in_mem_black_list_middleware(): 22 | # Crear una instancia de la clase 23 | middleware = InMemBlackListMiddleware() 24 | 25 | # Prueba el método __call__ 26 | lead = TelegramLead("123") 27 | message = MockMessage(lead) # Deberías definir este objeto 28 | assert await middleware(message, None, None) == True 29 | 30 | # Prueba el método add_to_black_list 31 | middleware.add_to_black_list(lead.get_session_id(), 'test reason') 32 | assert lead.get_session_id() in middleware.black_list 33 | assert middleware.black_list[lead.get_session_id()].reason == 'test reason' 34 | 35 | # Prueba el método remove_from_black_list 36 | middleware.remove_from_black_list('telegram:123') 37 | assert 'telegram:123' not in middleware.black_list 38 | 39 | 40 | 41 | @pytest.mark.asyncio 42 | async def test_in_mem_black_list_middleware_block(): 43 | # Crear una instancia de la clase 44 | middleware = InMemBlackListMiddleware() 45 | 46 | # Prueba el método __call__ 47 | lead = TelegramLead("123") 48 | message = MockMessage(lead) # Deberías definir este objeto 49 | assert await middleware(message, None, None) == True 50 | 51 | # Prueba el método add_to_black_list 52 | middleware.add_to_black_list(lead.get_session_id(), 'test reason') 53 | 54 | # Prueba el método __call__ con un usuario en la lista negra 55 | assert await middleware(message, None, None) == False -------------------------------------------------------------------------------- /tests/middlewares/redis_blacklist_async_test.py: -------------------------------------------------------------------------------- 1 | import fakeredis 2 | import pytest 3 | import pytest_asyncio 4 | import redis 5 | import shortuuid 6 | from cel.connectors.telegram.model.telegram_lead import TelegramLead 7 | from cel.gateway.model.message import Message 8 | from cel.middlewares.redis_blacklist_async import RedisBlackListAsyncMiddleware 9 | 10 | class MockMessage(Message): 11 | def __init__(self, lead): 12 | self.lead = lead 13 | 14 | def is_voice_message(self): 15 | """This method should be implemented by the subclass to check if the message is a voice message""" 16 | return False 17 | 18 | @classmethod 19 | def load_from_dict(cls, message_dict: dict): 20 | pass 21 | 22 | 23 | @pytest_asyncio.fixture() 24 | async def client() -> redis.asyncio.Redis: 25 | return fakeredis.aioredis.FakeRedis() 26 | 27 | @pytest.mark.asyncio 28 | async def test_redis_black_list_async_middleware(client): 29 | # Create an instance of the class 30 | middleware = RedisBlackListAsyncMiddleware(redis=client) 31 | 32 | # Test the __call__ method 33 | chat_id = shortuuid.uuid() 34 | lead = TelegramLead(chat_id) 35 | message = MockMessage(lead) # Deberías definir este objeto 36 | assert await middleware(message, None, None) == True 37 | 38 | # Test the add_to_black_list method 39 | await middleware.add_to_black_list(lead.get_session_id(), 'test reason') 40 | entry = await middleware.get_entry(lead.get_session_id()) 41 | assert entry is not None 42 | assert entry['reason'] == 'test reason' 43 | 44 | # Test the remove_from_black_list method 45 | await middleware.remove_from_black_list(lead.get_session_id()) 46 | entry = await middleware.get_entry(lead.get_session_id()) 47 | assert entry is None 48 | 49 | 50 | 51 | 52 | @pytest.mark.asyncio 53 | async def test_redis_black_list_async_middleware_block(client): 54 | # Create an instance of the class 55 | middleware = RedisBlackListAsyncMiddleware(redis=client) 56 | 57 | # Test the __call__ method 58 | chat_id = shortuuid.uuid() 59 | lead = TelegramLead(chat_id) 60 | message = MockMessage(lead) # Deberías definir este objeto 61 | assert await middleware(message, None, None) == True 62 | 63 | # Test the add_to_black_list method 64 | await middleware.add_to_black_list(lead.get_session_id(), 'test reason') 65 | 66 | # Test the __call__ method with a user in the blacklist 67 | assert await middleware(message, None, None) == False -------------------------------------------------------------------------------- /tests/middlewares/redis_blacklist_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import shortuuid 3 | from cel.connectors.telegram.model.telegram_lead import TelegramLead 4 | from cel.gateway.model.message import Message 5 | from cel.middlewares.redis_blacklist import RedisBlackListMiddleware 6 | import fakeredis 7 | 8 | class MockMessage(Message): 9 | def __init__(self, lead): 10 | self.lead = lead 11 | 12 | def is_voice_message(self): 13 | """This method should be implemented by the subclass to check if the message is a voice message""" 14 | return False 15 | 16 | @classmethod 17 | def load_from_dict(cls, message_dict: dict): 18 | pass 19 | 20 | 21 | 22 | @pytest.fixture 23 | def redis_client(): 24 | redis_client = fakeredis.FakeRedis() 25 | return redis_client 26 | 27 | @pytest.mark.asyncio 28 | async def test_in_mem_black_list_middleware(redis_client): 29 | # Create an instance of the class 30 | middleware = RedisBlackListMiddleware(redis=redis_client) 31 | 32 | # Test the __call__ method 33 | chat_id = shortuuid.uuid() 34 | lead = TelegramLead(chat_id) 35 | message = MockMessage(lead) # Deberías definir este objeto 36 | assert await middleware(message, None, None) == True 37 | 38 | # Test the add_to_black_list method 39 | middleware.add_to_black_list(lead.get_session_id(), 'test reason') 40 | entry = middleware.get_entry(lead.get_session_id()) 41 | assert entry is not None 42 | assert entry['reason'] == 'test reason' 43 | 44 | # Test the remove_from_black_list method 45 | middleware.remove_from_black_list(lead.get_session_id()) 46 | entry = middleware.get_entry(lead.get_session_id()) 47 | assert entry is None 48 | 49 | 50 | 51 | 52 | @pytest.mark.asyncio 53 | async def test_in_mem_black_list_middleware_block(redis_client): 54 | # Create an instance of the class 55 | middleware = RedisBlackListMiddleware(redis=redis_client) 56 | 57 | # Test the __call__ method 58 | chat_id = shortuuid.uuid() 59 | lead = TelegramLead(chat_id) 60 | message = MockMessage(lead) # Deberías definir este objeto 61 | assert await middleware(message, None, None) == True 62 | 63 | # Test the add_to_black_list method 64 | middleware.add_to_black_list(lead.get_session_id(), 'test reason') 65 | 66 | # Test the __call__ method with a user in the blacklist 67 | assert await middleware(message, None, None) == False -------------------------------------------------------------------------------- /tests/prompt_template/prompt_template.py: -------------------------------------------------------------------------------- 1 | # PromptTemplate 2 | import pytest 3 | from cel.gateway.model.conversation_lead import ConversationLead 4 | from cel.gateway.model.message import Message 5 | from cel.prompt.prompt_template import PromptTemplate 6 | 7 | 8 | @pytest.fixture 9 | def lead(): 10 | lead = ConversationLead() 11 | return lead 12 | 13 | 14 | @pytest.mark.asyncio 15 | async def test_prompt_template(lead): 16 | 17 | async def get_contacts_async(lead: ConversationLead, state: dict, session_id: str): 18 | assert isinstance(lead, ConversationLead) 19 | assert isinstance(state, dict) 20 | assert isinstance(session_id, str) 21 | state["contacts"] = ["Juan", "Pedro", "Maria"] 22 | return ["Juan", "Pedro", "Maria"] 23 | 24 | def get_balance(lead: ConversationLead, message: str): 25 | assert isinstance(lead, ConversationLead) 26 | assert isinstance(message, str) 27 | assert message == "Hola" 28 | 29 | return { 30 | "checking": 1000, 31 | "savings": 5000 32 | } 33 | 34 | # Ejemplo de uso 35 | prompt = """Hola, {name}. Tienes {messages} mensajes nuevos. 36 | Tiene los siguientes contactos: {contacts}. 37 | Su saldo es: \n{balance}""" 38 | 39 | state = { 40 | "name": "Juan", 41 | "messages": lambda: 5, 42 | "contacts": get_contacts_async, 43 | "balance": get_balance 44 | } 45 | 46 | p = PromptTemplate(prompt) 47 | res = await p.compile(state, lead, "Hola") 48 | 49 | assert res == """Hola, Juan. Tienes 5 mensajes nuevos. 50 | Tiene los siguientes contactos: ['Juan', 'Pedro', 'Maria']. 51 | Su saldo es: 52 | {"checking": 1000, "savings": 5000}""" -------------------------------------------------------------------------------- /tests/rag_stores/chroma_store_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | from cel.rag.text2vec.cached_openai import CachedOpenAIEmbedding 4 | import dotenv 5 | 6 | dotenv.load_dotenv() 7 | 8 | is_openai_available = 'OPENAI_API_KEY' not in os.environ 9 | 10 | 11 | texts=[ 12 | "This is a document about pineapple", 13 | "This is a document about oranges", 14 | "This is a document about lemons", 15 | "This is a document about dogs", 16 | "This is a document about parrots", 17 | ] 18 | 19 | @pytest.fixture 20 | def client(): 21 | return CachedOpenAIEmbedding() 22 | 23 | 24 | 25 | @pytest.mark.skipif(is_openai_available, reason="Disable in Github Actions") 26 | def test_do2(client: CachedOpenAIEmbedding): 27 | # test with texts 28 | res = client.texts2vec(texts) 29 | assert len(res) == len(texts) -------------------------------------------------------------------------------- /tests/slicers/markdown_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from cel.rag.slicers.markdown import MarkdownSlicer 3 | from cel.rag.slicers.markdown.utils import build_breadcrumbs 4 | 5 | # @pytest.fixture 6 | # def lead(): 7 | # lead = ChatLead('123', 'test', 'tenant1', 'assistant1') 8 | # return lead 9 | 10 | # @pytest.fixture 11 | # def redis_client(): 12 | # redis_client = fakeredis.FakeRedis() 13 | # return redis_client 14 | 15 | # @pytest.fixture 16 | # def store(redis_client): 17 | # return RedisChatStateProvider(redis_client, 's') 18 | 19 | def test_do(): 20 | mds = MarkdownSlicer('test', './tests/slicers/sample.md') 21 | slices = mds.slice() 22 | print(slices) 23 | assert 1==1 24 | 25 | -------------------------------------------------------------------------------- /tests/slicers/sample.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | This is a sample markdown file. 4 | 5 | ## Section 1 6 | 7 | This is the first section. 8 | 9 | ## Section 2 10 | 11 | This is the second section. 12 | 13 | # Demo Table 14 | 15 | | Name | Age | 16 | | ---- | --- | 17 | | Alice | 20 | 18 | | Bob | 30 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /tests/slicers/smoothy.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | SMOOTHY INC. is a company that specializes in creating smoothies in food trucks. We have been in business for over 10 years and have served thousands of customers. Our smoothies are made with fresh fruits and vegetables and are a great way to get your daily dose of vitamins and minerals. 4 | 5 | ## Products 6 | 7 | We offer a wide variety of smoothies to choose from, including: 8 | - Strawberry Banana 9 | - Mango Pineapple 10 | - Green Detox 11 | 12 | ## Locations 13 | 14 | We have food trucks located in the following cities: 15 | - New York 16 | - Los Angeles 17 | - Chicago 18 | 19 | # Demo Table 20 | 21 | | Product | Price | size | 22 | | ---- | --- | --- | 23 | | Strawberry Banana | $5 | 16 oz | 24 | | Mango Pineapple | $6 | 16 oz | 25 | | Green Detox | $7 | 16 oz | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /tests/state/state_manager_test.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import pytest 3 | from fakeredis import FakeRedis 4 | import pytest_asyncio 5 | import redis 6 | from cel.assistants.state_manager import AsyncStateManager 7 | from cel.gateway.model.conversation_lead import ConversationLead 8 | from cel.stores.history.history_redis_provider_async import RedisHistoryProviderAsync 9 | from cel.stores.state.state_inmemory_provider import InMemoryStateProvider 10 | from cel.stores.state.state_redis_provider import RedisChatStateProvider 11 | 12 | 13 | 14 | @pytest.fixture 15 | def lead(): 16 | lead = ConversationLead() 17 | return lead 18 | 19 | @pytest.fixture 20 | def redis_client(request) -> redis.Redis: 21 | # fake redis 22 | return FakeRedis() 23 | 24 | @pytest.fixture 25 | def state_store(redis_client) -> RedisChatStateProvider: 26 | store = RedisChatStateProvider(redis_client, key_prefix='h') 27 | return store 28 | 29 | 30 | @pytest.mark.asyncio 31 | async def test_state_manager(lead, state_store): 32 | state = AsyncStateManager(lead, state_store) 33 | 34 | async with state: 35 | state['amount'] = 1000 36 | # do something 37 | state['result'] = "done" 38 | 39 | async with state: 40 | assert state['amount'] == 1000 41 | assert state['result'] == "done" 42 | 43 | 44 | @pytest.mark.asyncio 45 | async def test_state_manager_inmemory(lead): 46 | state = AsyncStateManager(lead, InMemoryStateProvider()) 47 | 48 | async with state: 49 | state['amount'] = 1000 50 | # do something 51 | state['result'] = "done" 52 | 53 | async with state: 54 | assert state['amount'] == 1000 55 | assert state['result'] == "done" 56 | 57 | 58 | @pytest.mark.asyncio 59 | async def test_state_manager_exception(lead): 60 | state = AsyncStateManager(lead, InMemoryStateProvider()) 61 | 62 | try: 63 | async with state: 64 | state['amount'] = 1000 65 | # do something and raise an exception 66 | # then the state should be empty 67 | raise ValueError("Error") 68 | 69 | except ValueError as e: 70 | pass 71 | 72 | # Check if the state is empty 73 | state = await state.load_state() 74 | assert 'amount' not in state -------------------------------------------------------------------------------- /tests/stores/common/list_redis_store_async_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pytest_asyncio 3 | import redis 4 | import fakeredis.aioredis 5 | from cel.stores.common.list_redis_store_async import ListRedisStoreAsync 6 | 7 | 8 | @pytest_asyncio.fixture() 9 | async def aioredis(request) -> redis.asyncio.Redis: 10 | return fakeredis.aioredis.FakeRedis() 11 | 12 | @pytest.fixture 13 | def store(aioredis): 14 | return ListRedisStoreAsync(aioredis, key_prefix='h', ttl=10) 15 | 16 | 17 | # TESTS 18 | # ------------------------------------------- 19 | 20 | @pytest.mark.asyncio 21 | async def test_list_append(store): 22 | await store.list_append('key', 'value0') 23 | await store.list_append('key', 'value1') 24 | l = await store.list_get('key') 25 | assert l == ['value0', 'value1'] 26 | # assert 1 27 | 28 | @pytest.mark.asyncio 29 | async def test_list_clear(store): 30 | await store.list_append('key', 'value0') 31 | await store.list_clear('key') 32 | l = await store.list_get('key') 33 | assert l == [] 34 | 35 | @pytest.mark.asyncio 36 | async def test_list_get_last(store): 37 | await store.list_append('key', 'value0') 38 | await store.list_append('key', 'value1') 39 | await store.list_append('key', 'value2') 40 | l = await store.list_get_last('key', 2) 41 | assert l == ['value1', 'value2'] 42 | -------------------------------------------------------------------------------- /tests/stores/common/memory_cache_test.py: -------------------------------------------------------------------------------- 1 | from cel.stores.common.memory_cache import MemoryCache 2 | 3 | 4 | def test_memory_cache(): 5 | cache = MemoryCache('test', memory_maxsize=2) 6 | 7 | # Test set method 8 | cache.set('key1', 'value1') 9 | assert cache.get('key1') == 'value1' 10 | 11 | # Test get method with callback 12 | result = cache.get('key2', callback=lambda: 'value2') 13 | assert result == 'value2' 14 | assert cache.get('key2') == 'value2' 15 | 16 | # Test LRU property 17 | cache.set('key3', 'value3') 18 | assert cache.get('key1') is None 19 | assert cache.get('key2') == 'value2' 20 | assert cache.get('key3') == 'value3' 21 | 22 | # Test delete method 23 | cache.delete('key2') 24 | assert cache.get('key2') is None 25 | 26 | # Test clear method 27 | cache.clear() 28 | assert cache.get('key3') is None 29 | 30 | # Test all method 31 | cache.set('key1', 'value1') 32 | cache.set('key2', 'value2') 33 | assert set(cache.all()) == {('key1', 'value1'), ('key2', 'value2')} -------------------------------------------------------------------------------- /tests/stores/state/state_inmemory_provider_test.py: -------------------------------------------------------------------------------- 1 | import fakeredis 2 | import pytest 3 | from cel.gateway.model.conversation_lead import ConversationLead 4 | from cel.stores.state.state_inmemory_provider import InMemoryStateProvider 5 | from cel.stores.state.state_redis_provider import RedisChatStateProvider 6 | 7 | @pytest.fixture 8 | def lead() -> str: 9 | lead = ConversationLead() 10 | return lead.get_session_id() 11 | 12 | 13 | @pytest.fixture 14 | def store(): 15 | return InMemoryStateProvider() 16 | 17 | @pytest.mark.asyncio 18 | async def test_set_key_value(store: InMemoryStateProvider, lead): 19 | await store.set_key_value(lead, 'key1', 'value1') 20 | v = await store.get_key_value(lead, 'key1') 21 | assert v == 'value1' 22 | 23 | @pytest.mark.asyncio 24 | async def test_get_store(store: InMemoryStateProvider, lead): 25 | await store.set_key_value(lead, 'key0', 'value0') 26 | await store.set_key_value(lead, 'key1', 'value1') 27 | s = await store.get_store(lead) 28 | assert s == {'key0': 'value0', 'key1': 'value1'} 29 | 30 | @pytest.mark.asyncio 31 | async def test_clear_store(store: InMemoryStateProvider, lead): 32 | await store.set_key_value(lead, 'key0', 'value0') 33 | await store.set_key_value(lead, 'key1', 'value1') 34 | s = await store.get_store(lead) 35 | assert s == {'key0': 'value0', 'key1': 'value1'} 36 | await store.clear_store(lead) 37 | s = await store.get_store(lead) 38 | assert s == None 39 | 40 | @pytest.mark.asyncio 41 | async def test_clear_all_stores(store: InMemoryStateProvider): 42 | sessionId1 = ConversationLead() 43 | sessionId2 = ConversationLead() 44 | await store.set_key_value(sessionId1, 'key0', 'value0') 45 | await store.set_key_value(sessionId1, 'key1', 'value1') 46 | await store.set_key_value(sessionId2, 'key0', 'value0') 47 | await store.set_key_value(sessionId2, 'key1', 'value1') 48 | 49 | s = await store.get_store(sessionId1) 50 | assert s == {'key0': 'value0', 'key1': 'value1'} 51 | 52 | s = await store.get_store(sessionId2) 53 | assert s == {'key0': 'value0', 'key1': 'value1'} 54 | 55 | await store.clear_all_stores() 56 | s = await store.get_store(sessionId1) 57 | assert s == None 58 | 59 | s = await store.get_store(sessionId2) 60 | assert s == None 61 | -------------------------------------------------------------------------------- /tests/stores/state/state_redis_provider_test.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import random 3 | import pytest 4 | import fakeredis.aioredis 5 | import pytest_asyncio 6 | import redis 7 | from cel.gateway.model.conversation_lead import ConversationLead 8 | from cel.stores.state.state_redis_provider import RedisChatStateProvider 9 | 10 | @pytest_asyncio.fixture() 11 | def lead() -> str: 12 | lead = ConversationLead() 13 | return lead.get_session_id() 14 | 15 | @pytest_asyncio.fixture() 16 | def redis_client(): 17 | redis_client = fakeredis.FakeRedis() 18 | return redis_client 19 | 20 | @pytest.fixture 21 | def store(redis_client): 22 | return RedisChatStateProvider(redis_client, 's') 23 | 24 | @pytest.mark.asyncio 25 | async def test_set_key_value(store: RedisChatStateProvider, lead): 26 | await store.set_key_value(lead, 'key1', 'value1') 27 | v = await store.get_key_value(lead, 'key1') 28 | assert v == 'value1' 29 | 30 | @pytest.mark.asyncio 31 | async def test_get_store(store: RedisChatStateProvider, lead): 32 | await store.set_key_value(lead, 'key0', 'value0') 33 | await store.set_key_value(lead, 'key1', 'value1') 34 | s = await store.get_store(lead) 35 | assert s == {'key0': 'value0', 'key1': 'value1'} 36 | 37 | @pytest.mark.asyncio 38 | async def test_clear_store(store: RedisChatStateProvider, lead): 39 | await store.set_key_value(lead, 'key0', 'value0') 40 | await store.set_key_value(lead, 'key1', 'value1') 41 | s = await store.get_store(lead) 42 | assert s == {'key0': 'value0', 'key1': 'value1'} 43 | await store.clear_store(lead) 44 | s = await store.get_store(lead) 45 | assert s == None 46 | 47 | @pytest.mark.asyncio 48 | async def test_clear_all_stores(store: RedisChatStateProvider): 49 | sessionId1 = ConversationLead() 50 | sessionId2 = ConversationLead() 51 | await store.set_key_value(sessionId1, 'key0', 'value0') 52 | await store.set_key_value(sessionId1, 'key1', 'value1') 53 | await store.set_key_value(sessionId2, 'key0', 'value0') 54 | await store.set_key_value(sessionId2, 'key1', 'value1') 55 | 56 | s = await store.get_store(sessionId1) 57 | assert s == {'key0': 'value0', 'key1': 'value1'} 58 | 59 | s = await store.get_store(sessionId2) 60 | assert s == {'key0': 'value0', 'key1': 'value1'} 61 | 62 | await store.clear_all_stores() 63 | s = await store.get_store(sessionId1) 64 | assert s == None 65 | 66 | s = await store.get_store(sessionId2) 67 | assert s == None 68 | -------------------------------------------------------------------------------- /tests/text2vec/ollama_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from cel.rag.stores.chroma.chroma_store import ChromaStore 3 | from cel.rag.text2vec.cached_ollama import CachedOllamaEmbedding 4 | import os 5 | 6 | is_github_actions = 'OPENAI_API_KEY' not in os.environ 7 | 8 | 9 | texts=[ 10 | "This is a document about pineapple", 11 | "This is a document about oranges", 12 | "This is a document about lemons", 13 | "This is a document about dogs", 14 | "This is a document about parrots", 15 | ] 16 | 17 | @pytest.fixture 18 | def client(): 19 | text2vec = CachedOllamaEmbedding() 20 | return ChromaStore(text2vec, collection_name='test_ollama_collection') 21 | 22 | 23 | @pytest.mark.skipif(is_github_actions, reason="Disable in Github Actions") 24 | def test_store(client): 25 | for t in texts: 26 | index = texts.index(t) 27 | client.upsert_text(f"{index}", t, {'metadata': 'metadata'}) 28 | 29 | res = client.search('parrots', top_k=3) 30 | 31 | # check length 32 | assert len(res) == 3 33 | # check nearest 34 | assert res[0].id == '4' 35 | # check order 36 | assert res[0].distance < res[1].distance < res[2].distance 37 | # check metadata 38 | assert res[0].metadata == {'metadata': 'metadata'} 39 | 40 | 41 | @pytest.mark.skipif(is_github_actions, reason="Disable in Github Actions") 42 | def test_store_get_vector(client): 43 | 44 | res = client.get_vector('4') 45 | 46 | assert res.id == '4' 47 | assert res.text == 'This is a document about parrots' 48 | assert res.metadata == {'metadata': 'metadata'} 49 | 50 | @pytest.mark.skipif(is_github_actions, reason="Disable in Github Actions") 51 | def test_get_similar(client): 52 | res = client.get_vector('1') 53 | 54 | similar = client.get_similar(res.vector, top_k=1) 55 | 56 | assert len(similar) == 1 57 | assert similar[0].id == '1' 58 | 59 | @pytest.mark.skipif(is_github_actions, reason="Disable in Github Actions") 60 | def test_delete(client): 61 | for t in texts: 62 | index = texts.index(t) 63 | client.upsert_text(f"{index}", t, {'metadata': 'metadata'}) 64 | 65 | client.delete('4') 66 | 67 | res = client.search('parrots', top_k=1) 68 | 69 | assert len(res) == 1 70 | # the nearest should not be parrots 71 | assert res[0].id != '4' -------------------------------------------------------------------------------- /tests/text2vec/openai_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | from cel.rag.stores.chroma.chroma_store import ChromaStore 4 | from cel.rag.text2vec.cached_openai import CachedOpenAIEmbedding 5 | import dotenv 6 | dotenv.load_dotenv() 7 | 8 | is_github_actions = 'OPENAI_API_KEY' not in os.environ 9 | 10 | 11 | texts=[ 12 | "This is a document about pineapple", 13 | "This is a document about oranges", 14 | "This is a document about lemons", 15 | "This is a document about dogs", 16 | "This is a document about parrots", 17 | ] 18 | 19 | @pytest.fixture 20 | def client(): 21 | text2vec = CachedOpenAIEmbedding() 22 | return ChromaStore(text2vec, collection_name='test_openai_collection') 23 | 24 | 25 | @pytest.mark.skipif(is_github_actions, reason="Disable in Github Actions") 26 | def test_store(client): 27 | for t in texts: 28 | index = texts.index(t) 29 | client.upsert_text(f"{index}", t, {'metadata': 'metadata'}) 30 | 31 | res = client.search('parrots', top_k=3) 32 | 33 | # check length 34 | assert len(res) == 3 35 | # check nearest 36 | assert res[0].id == '4' 37 | # check order 38 | assert res[0].distance < res[1].distance < res[2].distance 39 | # check metadata 40 | assert res[0].metadata == {'metadata': 'metadata'} 41 | 42 | 43 | @pytest.mark.skipif(is_github_actions, reason="Disable in Github Actions") 44 | def test_store_get_vector(client): 45 | 46 | res = client.get_vector('4') 47 | 48 | assert res.id == '4' 49 | assert res.text == 'This is a document about parrots' 50 | assert res.metadata == {'metadata': 'metadata'} 51 | 52 | @pytest.mark.skipif(is_github_actions, reason="Disable in Github Actions") 53 | def test_get_similar(client): 54 | 55 | res = client.get_vector('1') 56 | 57 | similar = client.get_similar(res.vector, top_k=1) 58 | 59 | assert len(similar) == 1 60 | assert similar[0].id == '1' 61 | 62 | 63 | @pytest.mark.skipif(is_github_actions, reason="Disable in Github Actions") 64 | def test_delete(client): 65 | for t in texts: 66 | index = texts.index(t) 67 | client.upsert_text(f"{index}", t, {'metadata': 'metadata'}) 68 | 69 | client.delete('4') 70 | 71 | res = client.search('parrots', top_k=1) 72 | 73 | assert len(res) == 1 74 | # the nearest should not be parrots 75 | assert res[0].id != '4' -------------------------------------------------------------------------------- /tests/text2vec/openai_test_redis.py: -------------------------------------------------------------------------------- 1 | import fakeredis 2 | import pytest 3 | from cel.rag.text2vec.cache.redis_cache import RedisCache 4 | from cel.rag.stores.chroma.chroma_store import ChromaStore 5 | from cel.rag.text2vec.cached_openai import CachedOpenAIEmbedding 6 | import dotenv 7 | dotenv.load_dotenv() 8 | 9 | 10 | texts=[ 11 | "This is a document about pineapple", 12 | "This is a document about oranges", 13 | "This is a document about lemons", 14 | "This is a document about dogs", 15 | "This is a document about parrots", 16 | ] 17 | @pytest.fixture 18 | def redis_client(): 19 | redis_client = fakeredis.FakeRedis() 20 | return redis_client 21 | @pytest.fixture 22 | def client(): 23 | redis_client = fakeredis.FakeRedis() 24 | text2vec = CachedOpenAIEmbedding(cache_backend=RedisCache(redis=redis_client)) 25 | return ChromaStore(text2vec, collection_name='test_openai_collection') 26 | 27 | 28 | 29 | def test_store(client): 30 | for t in texts: 31 | index = texts.index(t) 32 | client.upsert_text(f"{index}", t, {'metadata': 'metadata'}) 33 | 34 | res = client.search('parrots', top_k=3) 35 | 36 | # check length 37 | assert len(res) == 3 38 | # check nearest 39 | assert res[0].id == '4' 40 | # check order 41 | assert res[0].distance < res[1].distance < res[2].distance 42 | # check metadata 43 | assert res[0].metadata == {'metadata': 'metadata'} 44 | 45 | 46 | def test_store_get_vector(client): 47 | 48 | res = client.get_vector('4') 49 | 50 | assert res.id == '4' 51 | assert res.text == 'This is a document about parrots' 52 | assert res.metadata == {'metadata': 'metadata'} 53 | 54 | def test_get_similar(client): 55 | 56 | res = client.get_vector('1') 57 | 58 | similar = client.get_similar(res.vector, top_k=1) 59 | 60 | assert len(similar) == 1 61 | assert similar[0].id == '1' 62 | 63 | 64 | def test_delete(client): 65 | for t in texts: 66 | index = texts.index(t) 67 | client.upsert_text(f"{index}", t, {'metadata': 'metadata'}) 68 | 69 | client.delete('4') 70 | 71 | res = client.search('parrots', top_k=1) 72 | 73 | assert len(res) == 1 74 | # the nearest should not be parrots 75 | assert res[0].id != '4' -------------------------------------------------------------------------------- /tests/voice/deepgram_tests.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from cel.voice.deepgram_adapter import DeepgramAdapter 4 | 5 | 6 | # TESTS 7 | # ------------------------------------------- 8 | @pytest.mark.asyncio 9 | async def test_deepgram_stt(): 10 | 11 | dg = DeepgramAdapter() 12 | text = dg.STT("https://api.telegram.org/file/bot5843053461:AAHjt8DMBEjFrjuep4i3HblRwKTmQZeRy_A/voice/file_1.oga") 13 | assert len(text) > 5 14 | assert 'hola' in text.lower() --------------------------------------------------------------------------------