├── .gitignore ├── LICENSE ├── PIPREADME.md ├── README.md ├── assets ├── flow_example.jpg ├── flow_graph_example.jpg ├── flow_logo-round.png ├── flow_logo.png ├── intelli_concept.png └── samples │ ├── complex_workflow.png │ ├── length_based_routing.png │ ├── multi_assistant_flow.jpg │ └── travel_assistant_flow.png ├── instructions ├── how to docs │ ├── DYNAMIC_TOOL.md │ ├── FLOW_AUTO_SAVE.md │ └── GEMINI_LATEST_FEATURES.md ├── publish.sh ├── run_integration_text.sh └── useful_comands.sh ├── intelli ├── .example.env ├── __init__.py ├── config.py ├── controller │ ├── __init__.py │ ├── remote_embed_model.py │ ├── remote_image_model.py │ ├── remote_recognition_model.py │ ├── remote_speech_model.py │ └── remote_vision_model.py ├── flow │ ├── __init__.py │ ├── agents │ │ ├── __init__.py │ │ ├── agent.py │ │ ├── handlers.py │ │ └── kagent.py │ ├── dynamic_connector.py │ ├── flow.py │ ├── input │ │ ├── __init__.py │ │ ├── agent_input.py │ │ └── task_input.py │ ├── processors │ │ ├── __init__.py │ │ └── basic_processor.py │ ├── sequence_flow.py │ ├── store │ │ ├── __init__.py │ │ ├── basememory.py │ │ ├── dbmemory.py │ │ └── memory.py │ ├── tasks │ │ ├── __init__.py │ │ └── task.py │ ├── template │ │ ├── __init__.py │ │ └── basic_template.py │ ├── tool_connector.py │ ├── types.py │ └── utils │ │ ├── __init__.py │ │ ├── dynamic_utils.py │ │ └── flow_helper.py ├── function │ ├── __init__.py │ └── chatbot.py ├── mcp │ ├── __init__.py │ ├── dataframe_utils.py │ └── utils.py ├── model │ ├── __init__.py │ └── input │ │ ├── __init__.py │ │ ├── chatbot_input.py │ │ ├── embed_input.py │ │ ├── image_input.py │ │ ├── text_recognition_input.py │ │ ├── text_speech_input.py │ │ └── vision_input.py ├── requirements.txt ├── resource │ ├── __init__.py │ └── templates │ │ ├── __init__.py │ │ └── augmented_chatbot.in ├── test │ ├── __init__.py │ ├── integration │ │ ├── __init__.py │ │ ├── data │ │ │ └── sample_data.csv │ │ ├── mcp_dataframe_server.py │ │ ├── mcp_math_server.py │ │ ├── test_anthropic_wrapper.py │ │ ├── test_azure_chatbot.py │ │ ├── test_chatbot.py │ │ ├── test_chatbot_cpp.py │ │ ├── test_chatbot_nvidia.py │ │ ├── test_chatbot_tools.py │ │ ├── test_chatbot_vllm.py │ │ ├── test_chatbot_with_data.py │ │ ├── test_cohereai_wrapper.py │ │ ├── test_dynamic_flow.py │ │ ├── test_elevenlabs_wrapper.py │ │ ├── test_flow_auto_save.py │ │ ├── test_flow_icons.py │ │ ├── test_flow_map.py │ │ ├── test_flow_master.py │ │ ├── test_flow_mcp_tools.py │ │ ├── test_flow_memory.py │ │ ├── test_flow_sequence.py │ │ ├── test_flow_tool_routing.py │ │ ├── test_flow_with_dbmemory.py │ │ ├── test_gemini_latest_features.py │ │ ├── test_geminiai_wrapper.py │ │ ├── test_googleai_wrapper.py │ │ ├── test_intellicloud_wrapper.py │ │ ├── test_keras_agent.py │ │ ├── test_keras_whisper.py │ │ ├── test_llama_cpp_extend_wrapper.py │ │ ├── test_llama_cpp_wrapper.py │ │ ├── test_mcp_agent.py │ │ ├── test_mcp_dataframe_flow.py │ │ ├── test_mcp_openai_flow.py │ │ ├── test_memory.py │ │ ├── test_mistralai_wrapper.py │ │ ├── test_nvidia_wrapper.py │ │ ├── test_nvidia_wrapper_nim.py │ │ ├── test_openai_wrapper.py │ │ ├── test_remote_embed_model.py │ │ ├── test_remote_embed_model_nvidia.py │ │ ├── test_remote_image_model.py │ │ ├── test_remote_recognition_model.py │ │ ├── test_remote_speech_model.py │ │ ├── test_remote_vision_model.py │ │ ├── test_stability_wrapper.py │ │ └── test_vllm_wrapper.py │ └── unit │ │ └── test_chatbot_input.py ├── test_gpt_image_1.py ├── utils │ ├── __init__.py │ ├── cohere_stream_parser.py │ ├── conn_helper.py │ ├── dataframe_mcp_utils.py │ ├── logging.py │ ├── proxy_helper.py │ ├── system_helper.py │ └── whisper_helper.py └── wrappers │ ├── __init__.py │ ├── anthropic_wrapper.py │ ├── cohereai_wrapper.py │ ├── elevenlabs_wrapper.py │ ├── geminiai_wrapper.py │ ├── googleai_wrapper.py │ ├── intellicloud_wrapper.py │ ├── keras_wrapper.py │ ├── llama_cpp_wrapper.py │ ├── mcp_config.py │ ├── mcp_wrapper.py │ ├── mistralai_wrapper.py │ ├── nvidia_wrapper.py │ ├── openai_wrapper.py │ ├── stability_wrapper.py │ └── vllm_wrapper.py ├── sample ├── basic_mcp │ ├── README.md │ ├── math_flow_client.py │ └── mcp_math_server.py ├── flow_multimodel.py ├── http_dataframe_mcp │ ├── http_dataframe_flow_client.py │ ├── http_mcp_dataframe_server.py │ └── sample_data.csv └── http_mcp │ ├── README.md │ ├── http_math_flow_client.py │ └── http_mcp_calculator_server.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | .idea/ 161 | 162 | # custom 163 | temp 164 | .DS_Store 165 | **/.DS_Store 166 | **/._.DS_Store 167 | intelli/test/integration/test_prod_data.py 168 | intelli/init_prompt.txt 169 | -------------------------------------------------------------------------------- /PIPREADME.md: -------------------------------------------------------------------------------- 1 | # Intelli 2 |
12 | 13 | A framework for creating chatbots and AI agent workflows. It enables seamless integration with multiple AI models, including OpenAI, LLaMA, deepseek, Stable Diffusion, and Mistral, through a unified access layer. Intelli also supports Model Context Protocol (MCP) for standardized interaction with AI models. 14 | 15 | ## Features 16 | 17 | - Unified API for multiple AI providers. 18 | - Async flow-based agent orchestration. 19 | - Multi-modal support (text, images, speech). 20 | - Model Context Protocol (MCP) integration for standardized model interactions. 21 | 22 | ```bash 23 | pip install intelli[mcp] 24 | ``` 25 | 26 | # Latest changes 27 | - Support MCP capabilities [doc](https://docs.intellinode.ai/docs/python/mcp/get-started). 28 | - Improved multi-model collaboration [doc](https://docs.intellinode.ai/docs/python/use-cases/travel-assistant). 29 | - Support llama.cpp & GGUF models for fast inference [doc](https://docs.intellinode.ai/docs/python/offline-chatbot/llamacpp). 30 | - Add deepseek and Llama3 integration. 31 | - Add offline speech2text Whisper [doc](https://docs.intellinode.ai/docs/python/offline-chatbot/whisper). 32 | - Add Anthropic claude 3.7. 33 | 34 | For detailed instructions, refer to [intelli documentation](https://docs.intellinode.ai/docs/python). 35 | 36 | # Code Examples 37 | 38 | ## Create Chatbot 39 | Switch between multiple chatbot providers without changing your code. 40 | 41 | ```python 42 | from intelli.function.chatbot import Chatbot, ChatProvider 43 | from intelli.model.input.chatbot_input import ChatModelInput 44 | 45 | def call_chatbot(provider, model=None, api_key=None, options=None): 46 | # prepare common input 47 | input = ChatModelInput("You are a helpful assistant.", model) 48 | input.add_user_message("What is the capital of France?") 49 | 50 | # creating chatbot instance 51 | chatbot = Chatbot(api_key, provider, options=options) 52 | response = chatbot.chat(input) 53 | 54 | return response 55 | 56 | # call chatGPT 57 | call_chatbot(ChatProvider.OPENAI, "gpt-4o") 58 | 59 | # call claude3 60 | call_chatbot(ChatProvider.ANTHROPIC, "claude-3-7-sonnet-20250219") 61 | 62 | # call google gemini 63 | call_chatbot(ChatProvider.GEMINI) 64 | 65 | # Call NVIDIA Deepseek 66 | call_chatbot(ChatProvider.NVIDIA, "deepseek-ai/deepseek-r1") 67 | 68 | # Call vLLM (self-hosted) 69 | call_chatbot(ChatProvider.VLLM, "meta-llama/Llama-3.1-8B-Instruct", options={"baseUrl": "http://localhost:8000"}) 70 | ``` 71 | 72 | ## Chat With Docs 73 | Chat with your docs using multiple LLMs. To connect your data, visit the [IntelliNode App](https://app.intellinode.ai/), start a project using the Document option, upload your documents or images, and copy the generated One Key. This key will be used to connect the chatbot to your uploaded data. 74 | 75 | ```python 76 | # creating chatbot with the intellinode one key 77 | bot = Chatbot(YOUR_OPENAI_API_KEY, "openai", {"one_key": YOUR_ONE_KEY}) 78 | 79 | input = ChatModelInput("You are a helpful assistant.", "gpt-4o") 80 | input.add_user_message("What is the procedure for requesting a refund according to the user manual?") 81 | 82 | response = bot.chat(input) 83 | ``` 84 | 85 | ## Generate Images 86 | Use the image controller to generate arts from multiple models with minimum code change: 87 | ```python 88 | from intelli.controller.remote_image_model import RemoteImageModel 89 | from intelli.model.input.image_input import ImageModelInput 90 | 91 | # model details - change only two words to switch 92 | provider = "openai" 93 | model_name = "dall-e-3" 94 | 95 | # prepare the input details 96 | prompts = "cartoonishly-styled solitary snake logo, looping elegantly to form both the body of the python and an abstract play on data nodes." 97 | image_input = ImageModelInput(prompt=prompt, width=1024, height=1024, model=model_name) 98 | 99 | # call the model openai/stability 100 | wrapper = RemoteImageModel(your_api_key, provider) 101 | results = wrapper.generate_images(image_input) 102 | ``` 103 | 104 | ## Create AI Flows 105 | You can create a flow of tasks executed by different AI models. Here's an example of creating a blog post flow: 106 | - ChatGPT agent to write a post. 107 | - Google gemini agent to write image description. 108 | - Stable diffusion to generate images. 109 | 110 | ```python 111 | from intelli.flow.agents.agent import Agent 112 | from intelli.flow.tasks.task import Task 113 | from intelli.flow.sequence_flow import SequenceFlow 114 | from intelli.flow.input.task_input import TextTaskInput 115 | from intelli.flow.processors.basic_processor import TextProcessor 116 | 117 | # define agents 118 | blog_agent = Agent(agent_type='text', provider='openai', mission='write blog posts', model_params={'key': YOUR_OPENAI_API_KEY, 'model': 'gpt-4'}) 119 | copy_agent = Agent(agent_type='text', provider='gemini', mission='generate description', model_params={'key': YOUR_GEMINI_API_KEY, 'model': 'gemini'}) 120 | artist_agent = Agent(agent_type='image', provider='stability', mission='generate image', model_params={'key': YOUR_STABILITY_API_KEY}) 121 | 122 | # define tasks 123 | task1 = Task(TextTaskInput('blog post about electric cars'), blog_agent, log=True) 124 | task2 = Task(TextTaskInput('Generate short image description for image model'), copy_agent, pre_process=TextProcessor.text_head, log=True) 125 | task3 = Task(TextTaskInput('Generate cartoon style image'), artist_agent, log=True) 126 | 127 | # start sequence flow 128 | flow = SequenceFlow([task1, task2, task3], log=True) 129 | final_result = flow.start() 130 | ``` 131 | 132 | To build async AI flows with multiple paths, refer to the [flow tutorial](https://doc.intellinode.ai/docs/python/flows/async-flow). 133 | 134 | # Pillars 135 | - **The wrapper layer** provides low-level access to the latest AI models. 136 | - **The controller layer** offers a unified input to any AI model by handling the differences. 137 | - **The function layer** provides abstract functionality that extends based on the app's use cases. 138 | - **Flows**: create a flow of ai agents working toward user tasks. 139 | -------------------------------------------------------------------------------- /assets/flow_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/assets/flow_example.jpg -------------------------------------------------------------------------------- /assets/flow_graph_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/assets/flow_graph_example.jpg -------------------------------------------------------------------------------- /assets/flow_logo-round.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/assets/flow_logo-round.png -------------------------------------------------------------------------------- /assets/flow_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/assets/flow_logo.png -------------------------------------------------------------------------------- /assets/intelli_concept.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/assets/intelli_concept.png -------------------------------------------------------------------------------- /assets/samples/complex_workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/assets/samples/complex_workflow.png -------------------------------------------------------------------------------- /assets/samples/length_based_routing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/assets/samples/length_based_routing.png -------------------------------------------------------------------------------- /assets/samples/multi_assistant_flow.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/assets/samples/multi_assistant_flow.jpg -------------------------------------------------------------------------------- /assets/samples/travel_assistant_flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/assets/samples/travel_assistant_flow.png -------------------------------------------------------------------------------- /instructions/publish.sh: -------------------------------------------------------------------------------- 1 | python3 setup.py sdist bdist_wheel 2 | 3 | twine upload dist/* 4 | -------------------------------------------------------------------------------- /instructions/run_integration_text.sh: -------------------------------------------------------------------------------- 1 | ## wrapper tests 2 | # mistral 3 | python -m unittest intelli.test.integration.test_mistralai_wrapper 4 | 5 | # gemini 6 | python -m unittest intelli.test.integration.test_geminiai_wrapper 7 | 8 | # openai 9 | python -m unittest intelli.test.integration.test_openai_wrapper 10 | 11 | # intellicloud 12 | python -m unittest intelli.test.integration.test_intellicloud_wrapper 13 | 14 | # stability testing 15 | python -m unittest intelli.test.integration.test_stability_wrapper 16 | 17 | # google 18 | python -m unittest intelli.test.integration.test_googleai_wrapper 19 | 20 | # anthropic 21 | python -m unittest intelli.test.integration.test_anthropic_wrapper 22 | 23 | # wrapper with llama.cpp 24 | pytest -s intelli/test/integration/test_llama_cpp_wrapper.py 25 | 26 | ## controllers 27 | # embedding 28 | python -m unittest intelli.test.integration.test_remote_embed_model 29 | 30 | # images 31 | python -m unittest intelli.test.integration.test_remote_image_model 32 | 33 | # vision 34 | python -m unittest intelli.test.integration.test_remote_vision_model 35 | 36 | # speech 37 | python -m unittest intelli.test.integration.test_remote_speech_model 38 | 39 | ## functions 40 | # chatbot 41 | python -m unittest intelli.test.integration.test_chatbot 42 | 43 | # chatbot azure 44 | python -m unittest intelli.test.integration.test_azure_chatbot 45 | 46 | # chatbot with data 47 | python -m unittest intelli.test.integration.test_chatbot_with_data 48 | 49 | # chatbot with llama.cpp 50 | pytest -s intelli/test/integration/test_chatbot_cpp.py 51 | 52 | ## flows 53 | # basic flow 54 | python -m unittest intelli.test.integration.test_flow_sequence 55 | # map flow 56 | python -m unittest intelli.test.integration.test_flow_map 57 | # keras nlp 58 | python -m unittest intelli.test.integration.test_keras_agent 59 | # memory 60 | python -m unittest intelli.test.integration.test_flow_memory 61 | python -m unittest intelli.test.integration.test_flow_with_dbmemory 62 | 63 | # mcp 64 | python -m unittest intelli.test.integration.test_mcp_openai_flow 65 | python -m unittest intelli.test.integration.test_mcp_dataframe_flow 66 | 67 | 68 | # mcp tools routing 69 | python -m unittest intelli.test.integration.test_flow_mcp_tools 70 | python -m unittest intelli.test.integration.test_chatbot_tools 71 | python -m unittest intelli.test.integration.test_flow_tool_routing 72 | 73 | 74 | -------------------------------------------------------------------------------- /instructions/useful_comands.sh: -------------------------------------------------------------------------------- 1 | # print the project tree 2 | tree -I '__pycache__|test|Instructions|assets|intelli.egg-info|build|dist|instructions' 3 | 4 | # print python files 5 | find . -name "*.py" -type f | while read file; do echo -e "\n===== $file ====="; cat "$file"; done -------------------------------------------------------------------------------- /intelli/.example.env: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= 2 | COHERE_API_KEY= 3 | STABILITY_API_KEY= 4 | HUGGING_API_KEY= 5 | AZURE_OPENAI_API_KEY= 6 | REPLICATE_API_KEY= 7 | MISTRAL_API_KEY= 8 | INTELLI_ONE_KEY= 9 | GEMINI_API_KEY= 10 | ANTHROPIC_API_KEY= 11 | KAGGLE_USERNAME= 12 | KAGGLE_API_KEY= -------------------------------------------------------------------------------- /intelli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/__init__.py -------------------------------------------------------------------------------- /intelli/controller/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/controller/__init__.py -------------------------------------------------------------------------------- /intelli/controller/remote_embed_model.py: -------------------------------------------------------------------------------- 1 | from intelli.model.input.embed_input import EmbedInput 2 | from intelli.wrappers.geminiai_wrapper import GeminiAIWrapper 3 | from intelli.wrappers.mistralai_wrapper import MistralAIWrapper 4 | from intelli.wrappers.openai_wrapper import OpenAIWrapper 5 | from intelli.wrappers.nvidia_wrapper import NvidiaWrapper 6 | from intelli.wrappers.vllm_wrapper import VLLMWrapper 7 | 8 | 9 | class RemoteEmbedModel: 10 | def __init__(self, api_key, provider_name, options=None): 11 | self.provider_name = provider_name.lower() 12 | self.options = options or {} 13 | providers = { 14 | 'openai': OpenAIWrapper, 15 | 'mistral': MistralAIWrapper, 16 | 'gemini': GeminiAIWrapper, 17 | 'nvidia': NvidiaWrapper, 18 | 'vllm': VLLMWrapper 19 | } 20 | 21 | if self.provider_name == 'vllm': 22 | base_url = self.options.get("baseUrl") 23 | if not base_url: 24 | raise ValueError("VLLM provider requires baseUrl in options") 25 | self.provider = providers[self.provider_name](base_url, api_key) 26 | elif self.provider_name in providers: 27 | self.provider = providers[self.provider_name](api_key) 28 | else: 29 | if api_key in providers: 30 | raise Exception(f"Send the provider name as second parameter (api_key, provider_name).") 31 | else: 32 | raise Exception(f"Provider {provider_name} not supported.") 33 | 34 | def get_embeddings(self, embed_input): 35 | if not isinstance(embed_input, EmbedInput): 36 | raise Exception("embed_input must be an instance of EmbedInput.") 37 | 38 | if self.provider_name == 'openai': 39 | params = embed_input.get_openai_inputs() 40 | elif self.provider_name == 'mistral': 41 | params = embed_input.get_mistral_inputs() 42 | elif self.provider_name == 'gemini': 43 | params = embed_input.get_gemini_inputs() 44 | elif self.provider_name == 'nvidia': 45 | params = embed_input.get_nvidia_inputs() 46 | elif self.provider_name == 'vllm': 47 | params = embed_input.get_vllm_inputs() 48 | else: 49 | raise Exception("Invalid provider name.") 50 | 51 | return self.provider.get_embeddings(params) 52 | -------------------------------------------------------------------------------- /intelli/controller/remote_image_model.py: -------------------------------------------------------------------------------- 1 | from intelli.model.input.image_input import ImageModelInput 2 | from intelli.wrappers.openai_wrapper import OpenAIWrapper 3 | from intelli.wrappers.stability_wrapper import StabilityAIWrapper 4 | from intelli.wrappers.geminiai_wrapper import GeminiAIWrapper 5 | 6 | 7 | class RemoteImageModel: 8 | supported_image_models = { 9 | "openai": OpenAIWrapper, 10 | "stability": StabilityAIWrapper, 11 | "gemini": GeminiAIWrapper, 12 | } 13 | 14 | def __init__(self, api_key, provider="openai"): 15 | if provider in self.supported_image_models: 16 | self.provider_name = provider 17 | self.provider = self.supported_image_models[provider](api_key) 18 | else: 19 | supported_models = ", ".join(self.supported_image_models.keys()) 20 | raise ValueError(f"The received provider {provider} not supported. Supported providers: {supported_models}") 21 | 22 | def generate_images(self, image_input): 23 | if isinstance(image_input, dict): 24 | inputs = image_input 25 | elif isinstance(image_input, ImageModelInput): 26 | if self.provider_name == "gemini": 27 | inputs = image_input.get_gemini_inputs() 28 | elif self.provider_name == "openai": 29 | inputs = image_input.get_openai_inputs() 30 | else: # stability 31 | inputs = image_input.get_stability_inputs() 32 | else: 33 | raise ValueError("image_input must be an instance of ImageModelInput or a dictionary.") 34 | 35 | if self.provider_name == "gemini": 36 | # Extract model override if provided 37 | model_override = inputs.get("model") 38 | results = self.provider.generate_image( 39 | inputs.get("prompt", ""), 40 | inputs.get("config_params"), 41 | model_override=model_override 42 | ) 43 | # Extract image data from Gemini response 44 | images = [] 45 | if 'candidates' in results: 46 | for candidate in results['candidates']: 47 | if 'content' in candidate and 'parts' in candidate['content']: 48 | for part in candidate['content']['parts']: 49 | if 'inline_data' in part and part['inline_data'].get('mime_type', '').startswith('image/'): 50 | images.append(part['inline_data']['data']) 51 | return images 52 | elif self.provider_name == "openai": 53 | results = self.provider.generate_images(inputs) 54 | return [data['url'] if 'url' in data else data['b64_json'] for data in results['data']] 55 | else: # stability 56 | results = self.provider.generate_images(inputs) 57 | return [image_obj['base64'] for image_obj in results['artifacts']] 58 | -------------------------------------------------------------------------------- /intelli/controller/remote_speech_model.py: -------------------------------------------------------------------------------- 1 | from intelli.model.input.text_speech_input import Text2SpeechInput 2 | from intelli.wrappers.googleai_wrapper import GoogleAIWrapper 3 | from intelli.wrappers.openai_wrapper import OpenAIWrapper 4 | from intelli.wrappers.elevenlabs_wrapper import ElevenLabsWrapper 5 | from intelli.wrappers.geminiai_wrapper import GeminiAIWrapper 6 | 7 | SupportedSpeechModels = { 8 | 'GOOGLE': 'google', 9 | 'OPENAI': 'openai', 10 | 'ELEVENLABS': 'elevenlabs', 11 | 'GEMINI': 'gemini', 12 | } 13 | 14 | 15 | class RemoteSpeechModel: 16 | 17 | def __init__(self, key_value, provider=None): 18 | if not provider: 19 | provider = SupportedSpeechModels['GOOGLE'] 20 | 21 | supported_models = self.get_supported_models() 22 | 23 | if provider in supported_models: 24 | self.initiate(key_value, provider) 25 | else: 26 | models = " - ".join(supported_models) 27 | raise ValueError(f"The received key value is not supported. Send any model from: {models}") 28 | 29 | def initiate(self, key_value, key_type): 30 | self.key_type = key_type 31 | if key_type == SupportedSpeechModels['GOOGLE']: 32 | self.google_wrapper = GoogleAIWrapper(key_value) 33 | elif key_type == SupportedSpeechModels['OPENAI']: 34 | self.openai_wrapper = OpenAIWrapper(key_value) 35 | elif key_type == SupportedSpeechModels['ELEVENLABS']: 36 | self.elevenlabs_wrapper = ElevenLabsWrapper(key_value) 37 | elif key_type == SupportedSpeechModels['GEMINI']: 38 | self.gemini_wrapper = GeminiAIWrapper(key_value) 39 | else: 40 | raise ValueError('Invalid provider name') 41 | 42 | def get_supported_models(self): 43 | return list(SupportedSpeechModels.values()) 44 | 45 | def generate_speech(self, input_params): 46 | if not isinstance(input_params, Text2SpeechInput): 47 | raise ValueError('Invalid input: Must be an instance of Text2SpeechInput') 48 | 49 | if self.key_type == SupportedSpeechModels['GOOGLE']: 50 | params = input_params.get_google_input() 51 | response = self.google_wrapper.generate_speech(params) 52 | return response 53 | 54 | elif self.key_type == SupportedSpeechModels['OPENAI']: 55 | params = input_params.get_openai_input() 56 | response = self.openai_wrapper.text_to_speech(params) 57 | return response 58 | 59 | elif self.key_type == SupportedSpeechModels['ELEVENLABS']: 60 | params = input_params.get_elevenlabs_input() 61 | response = self.elevenlabs_wrapper.text_to_speech( 62 | text=params['text'], 63 | voice_id=params['voice_id'], 64 | model_id=params.get('model_id'), 65 | output_format=params.get('output_format', 'mp3_44100_128') 66 | ) 67 | return response 68 | 69 | elif self.key_type == SupportedSpeechModels['GEMINI']: 70 | params = input_params.get_gemini_input() 71 | response = self.gemini_wrapper.generate_speech(params['text'], params.get('voice_config')) 72 | # Extract audio data from Gemini response 73 | if 'candidates' in response: 74 | for candidate in response['candidates']: 75 | if 'content' in candidate and 'parts' in candidate['content']: 76 | for part in candidate['content']['parts']: 77 | if 'inline_data' in part and part['inline_data'].get('mime_type', '').startswith('audio/'): 78 | return part['inline_data']['data'] 79 | return response 80 | else: 81 | raise ValueError('The keyType is not supported') 82 | 83 | def list_voices(self): 84 | """Get available voices for the current provider""" 85 | if self.key_type == SupportedSpeechModels['ELEVENLABS']: 86 | return self.elevenlabs_wrapper.list_voices() 87 | else: 88 | raise ValueError(f"Voice listing not supported for provider: {self.key_type}") 89 | 90 | def stream_speech(self, input_params): 91 | """Stream speech for providers that support it""" 92 | if not isinstance(input_params, Text2SpeechInput): 93 | raise ValueError('Invalid input: Must be an instance of Text2SpeechInput') 94 | 95 | if self.key_type == SupportedSpeechModels['ELEVENLABS']: 96 | params = input_params.get_elevenlabs_input() 97 | response = self.elevenlabs_wrapper.stream_text_to_speech( 98 | text=params['text'], 99 | voice_id=params['voice_id'], 100 | model_id=params.get('model_id'), 101 | output_format=params.get('output_format', 'mp3_44100_128') 102 | ) 103 | return response 104 | else: 105 | raise ValueError(f"Streaming not supported for provider: {self.key_type}") 106 | -------------------------------------------------------------------------------- /intelli/controller/remote_vision_model.py: -------------------------------------------------------------------------------- 1 | from intelli.model.input.vision_input import VisionModelInput 2 | from intelli.wrappers.geminiai_wrapper import GeminiAIWrapper 3 | from intelli.wrappers.openai_wrapper import OpenAIWrapper 4 | from intelli.wrappers.googleai_wrapper import GoogleAIWrapper 5 | 6 | 7 | class RemoteVisionModel: 8 | supported_vision_models = { 9 | "openai": OpenAIWrapper, 10 | "gemini": GeminiAIWrapper, 11 | "google": GoogleAIWrapper, 12 | } 13 | 14 | def __init__(self, api_key, provider="openai"): 15 | self.api_key = api_key 16 | 17 | if provider in self.supported_vision_models: 18 | self.provider = provider 19 | self.provider_wrapper = self.supported_vision_models[provider](api_key) 20 | else: 21 | supported_models = ", ".join(self.supported_vision_models.keys()) 22 | raise ValueError( 23 | f"The provided provider {provider} not supported. Supported providers: {supported_models}" 24 | ) 25 | 26 | def image_to_text(self, vision_input): 27 | if isinstance(vision_input, dict): 28 | inputs = vision_input 29 | elif isinstance(vision_input, VisionModelInput): 30 | inputs = vision_input.get_provider_inputs(self.provider) 31 | else: 32 | raise ValueError( 33 | "vision_input must be an instance of VisionModelInput or a dictionary." 34 | ) 35 | 36 | if self.provider == "openai": 37 | return self.call_openai_vision(inputs) 38 | elif self.provider == "gemini": 39 | return self.call_gemini_vision(inputs) 40 | elif self.provider == "google": 41 | return self.call_google_vision(inputs) 42 | 43 | def call_openai_vision(self, inputs): 44 | data = self.provider_wrapper.image_to_text(inputs) 45 | return " ".join(choice["message"]["content"] for choice in data["choices"]) 46 | 47 | def call_gemini_vision(self, inputs): 48 | data = self.provider_wrapper.image_to_text_params(inputs) 49 | return " ".join( 50 | part["text"] for part in data["candidates"][0]["content"]["parts"] 51 | ) 52 | 53 | def call_google_vision(self, inputs): 54 | # Read the image file 55 | if "file_path" in inputs: 56 | with open(inputs["file_path"], "rb") as image_file: 57 | image_content = image_file.read() 58 | elif "image_content" in inputs: 59 | image_content = inputs["image_content"] 60 | else: 61 | raise ValueError( 62 | "Google Vision requires either 'file_path' or 'image_content'" 63 | ) 64 | 65 | result = self.provider_wrapper.describe_image(image_content) 66 | 67 | return result["summary"] 68 | -------------------------------------------------------------------------------- /intelli/flow/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Public API for the intelli.flow package. 3 | This file aggregates key classes and functions from submodules for easier access. 4 | """ 5 | 6 | # Agents 7 | from intelli.flow.agents.agent import Agent 8 | from intelli.flow.agents.kagent import KerasAgent 9 | from intelli.flow.agents.handlers import get_agent_handler 10 | 11 | # Input types for tasks and agents 12 | from intelli.flow.input.task_input import TaskInput, TextTaskInput, ImageTaskInput 13 | from intelli.flow.input.agent_input import AgentInput, TextAgentInput, ImageAgentInput 14 | 15 | # Processors and templates 16 | from intelli.flow.processors.basic_processor import TextProcessor 17 | from intelli.flow.template.basic_template import TextInputTemplate 18 | 19 | # Core flow 20 | from intelli.flow.sequence_flow import SequenceFlow 21 | from intelli.flow.tasks.task import Task 22 | from intelli.flow.flow import Flow 23 | 24 | # Dynamic routing 25 | from intelli.flow.dynamic_connector import DynamicConnector, ConnectorMode 26 | from intelli.flow.tool_connector import ToolDynamicConnector 27 | 28 | # Types 29 | from intelli.flow.types import AgentTypes, InputTypes 30 | 31 | # Additional utilities 32 | from intelli.flow.utils.flow_helper import FlowHelper 33 | from intelli.flow.store.memory import Memory 34 | from intelli.flow.store.dbmemory import DBMemory 35 | 36 | try: 37 | from intelli.mcp import ( 38 | MCPServerBuilder, 39 | MCPJSONExtractor, 40 | create_mcp_preprocessor 41 | ) 42 | except ImportError: 43 | # MCP utilities may not be available if MCP isn't installed 44 | pass -------------------------------------------------------------------------------- /intelli/flow/agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/flow/agents/__init__.py -------------------------------------------------------------------------------- /intelli/flow/dynamic_connector.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import Callable, Dict, List, Any, Union, Optional 3 | 4 | 5 | class ConnectorMode(Enum): 6 | """Enum for different dynamic connector modes.""" 7 | 8 | CONTENT_BASED = "content" 9 | LENGTH_BASED = "length" 10 | ERROR_BASED = "error" 11 | TYPE_BASED = "type" 12 | CUSTOM = "custom" 13 | 14 | 15 | class DynamicConnector: 16 | """ 17 | A class to handle dynamic routing in a flow based on the output of a task. 18 | Allows for up to 4 possible paths based on the decision function. 19 | """ 20 | 21 | def __init__( 22 | self, 23 | decision_fn: Callable[[Any, str], str], 24 | destinations: Dict[str, str], 25 | name: str = "dynamic_connector", 26 | description: str = "Routes based on previous output", 27 | mode: ConnectorMode = ConnectorMode.CUSTOM, 28 | ): 29 | """ 30 | Initialize a dynamic connector. 31 | 32 | Args: 33 | decision_fn: A function that takes (output, output_type) and returns a destination key 34 | destinations: A dictionary mapping destination keys to task names (max 4) 35 | name: Name of the connector (for visualization) 36 | description: Description of the connector logic 37 | mode: The connector mode (for visualization) 38 | """ 39 | if destinations and len(destinations) > 4: 40 | raise ValueError("Dynamic connector can have at most 4 destinations") 41 | 42 | self.decision_fn = decision_fn 43 | self.destinations = destinations 44 | self.name = name 45 | self.description = description 46 | self.mode = mode 47 | 48 | def get_next_task(self, output: Any, output_type: str) -> Optional[str]: 49 | """ 50 | Determine the next task based on the output and its type. 51 | 52 | Args: 53 | output: The output from the previous task 54 | output_type: The type of the output (text, image, audio, etc.) 55 | 56 | Returns: 57 | The name of the next task to execute, or None if no matching destination 58 | """ 59 | try: 60 | destination_key = self.decision_fn(output, output_type) 61 | return self.destinations.get(destination_key) 62 | except Exception as e: 63 | print(f"Error in dynamic connector {self.name}: {e}") 64 | return None 65 | -------------------------------------------------------------------------------- /intelli/flow/input/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/flow/input/__init__.py -------------------------------------------------------------------------------- /intelli/flow/input/agent_input.py: -------------------------------------------------------------------------------- 1 | class AgentInput: 2 | def __init__(self, desc=None, img=None, audio=None): 3 | self.desc = desc 4 | self.img = img 5 | self.audio = audio 6 | 7 | 8 | class TextAgentInput(AgentInput): 9 | def __init__(self, desc): 10 | super().__init__(desc=desc) 11 | 12 | 13 | class ImageAgentInput(AgentInput): 14 | def __init__(self, desc, img): 15 | super().__init__(desc=desc, img=img) 16 | -------------------------------------------------------------------------------- /intelli/flow/input/task_input.py: -------------------------------------------------------------------------------- 1 | class TaskInput: 2 | def __init__(self, desc=None, img=None, audio=None): 3 | self.desc = desc 4 | self.img = img 5 | self.audio = audio 6 | 7 | 8 | class TextTaskInput(TaskInput): 9 | def __init__(self, desc): 10 | super().__init__(desc=desc) 11 | 12 | 13 | class ImageTaskInput(TaskInput): 14 | def __init__(self, desc, img): 15 | super().__init__(desc=desc, img=img) 16 | -------------------------------------------------------------------------------- /intelli/flow/processors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/flow/processors/__init__.py -------------------------------------------------------------------------------- /intelli/flow/processors/basic_processor.py: -------------------------------------------------------------------------------- 1 | class TextProcessor: 2 | 3 | @staticmethod 4 | def text_head(text, size=800): 5 | return text[:size] 6 | -------------------------------------------------------------------------------- /intelli/flow/sequence_flow.py: -------------------------------------------------------------------------------- 1 | from intelli.utils.logging import Logger 2 | from intelli.flow.types import Matcher, InputTypes 3 | 4 | 5 | class SequenceFlow: 6 | """ 7 | A simple sequential flow that executes tasks in the given order, 8 | passing the output of each task as input to the next task. 9 | 10 | This version handles compatibility between different input/output types 11 | for all agent types supported in the intelli library. 12 | """ 13 | 14 | def __init__(self, order, log=False, memory=None, output_memory_map=None): 15 | """Initialize the sequential flow with ordered tasks and optional memory.""" 16 | self.order = order 17 | self.log = log 18 | self.logger = Logger(log) 19 | 20 | # Memory handling 21 | if memory is not None: 22 | self.memory = memory 23 | else: 24 | # Import here to avoid circular imports 25 | from intelli.flow.store.memory import Memory 26 | 27 | self.memory = Memory() 28 | 29 | self.output_memory_map = output_memory_map or {} 30 | 31 | def start(self): 32 | """ 33 | Execute tasks in sequence, passing outputs as inputs. 34 | 35 | Returns: 36 | dict: Dictionary mapping task indices to task outputs 37 | """ 38 | result = {} 39 | flow_input = None 40 | flow_input_type = None 41 | 42 | for index, task in enumerate(self.order, start=1): 43 | # Log task execution 44 | self.logger.log_head(f"- Executing task {index}: {task.desc}") 45 | self.logger.log( 46 | f" Agent type: {task.agent.type}, Provider: {task.agent.provider}" 47 | ) 48 | 49 | # Check input compatibility 50 | expected_input_type = Matcher.input.get(task.agent.type) 51 | if flow_input is not None: 52 | if flow_input_type != expected_input_type: 53 | self.logger.log( 54 | f" Note: Previous output type ({flow_input_type}) differs from expected input type ({expected_input_type})" 55 | ) 56 | # For text output being fed to non-text input, we might need special handling 57 | if ( 58 | flow_input_type == InputTypes.TEXT.value 59 | and expected_input_type != InputTypes.TEXT.value 60 | ): 61 | self.logger.log( 62 | f" Warning: Passing text to {expected_input_type} agent may not work as expected" 63 | ) 64 | 65 | # Execute the task with previous output and memory 66 | task.execute(flow_input, input_type=flow_input_type, memory=self.memory) 67 | 68 | # Store output in memory if specified in output_memory_map 69 | task_index_str = f"task{index}" 70 | if task_index_str in self.output_memory_map: 71 | memory_key = self.output_memory_map[task_index_str] 72 | self.memory.store(memory_key, task.output) 73 | self.logger.log( 74 | f"Stored output of task {index} in memory with key '{memory_key}'" 75 | ) 76 | 77 | # Store the result if not excluded 78 | if not task.exclude: 79 | result[task_index_str] = task.output 80 | 81 | # Log output information 82 | self.logger.log(f" Output type: {task.output_type}") 83 | 84 | # Update flow input for next task 85 | flow_input = task.output 86 | flow_input_type = task.output_type 87 | 88 | return result 89 | -------------------------------------------------------------------------------- /intelli/flow/store/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/flow/store/__init__.py -------------------------------------------------------------------------------- /intelli/flow/store/basememory.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class BaseMemory(ABC): 5 | """Abstract base class for memory stores in Intelli flow.""" 6 | 7 | @abstractmethod 8 | def store(self, key, value): 9 | """ 10 | Store a value in memory with the given key. 11 | 12 | Args: 13 | key (str): The key to use for storing the value 14 | value: The value to store 15 | 16 | Returns: 17 | Self for method chaining 18 | """ 19 | pass 20 | 21 | @abstractmethod 22 | def retrieve(self, key, default=None): 23 | """ 24 | Retrieve a value from memory using the given key. 25 | 26 | Args: 27 | key (str): The key for the value to retrieve 28 | default: Value to return if key doesn't exist 29 | 30 | Returns: 31 | The stored value, or default if the key doesn't exist 32 | """ 33 | pass 34 | 35 | @abstractmethod 36 | def has_key(self, key): 37 | """ 38 | Check if a key exists in memory. 39 | 40 | Args: 41 | key (str): The key to check 42 | 43 | Returns: 44 | bool: True if the key exists, False otherwise 45 | """ 46 | pass 47 | 48 | @abstractmethod 49 | def all(self): 50 | """ 51 | Get all stored data. 52 | 53 | Returns: 54 | dict: A copy of all stored data 55 | """ 56 | pass 57 | 58 | @abstractmethod 59 | def clear(self): 60 | """ 61 | Clear all stored data. 62 | 63 | Returns: 64 | Self for method chaining 65 | """ 66 | pass 67 | 68 | @abstractmethod 69 | def keys(self): 70 | """ 71 | Get all keys in memory. 72 | 73 | Returns: 74 | list: All keys in memory 75 | """ 76 | pass 77 | 78 | def __contains__(self, key): 79 | """Support for 'in' operator.""" 80 | return self.has_key(key) 81 | -------------------------------------------------------------------------------- /intelli/flow/store/memory.py: -------------------------------------------------------------------------------- 1 | from intelli.flow.store.basememory import BaseMemory 2 | 3 | class Memory(BaseMemory): 4 | """In-memory store for sharing data between tasks in a flow.""" 5 | 6 | def __init__(self): 7 | """Initialize an empty memory store.""" 8 | self._data = {} 9 | 10 | def store(self, key, value): 11 | """ 12 | Store a value in memory with the given key. 13 | 14 | Args: 15 | key (str): The key to use for storing the value 16 | value: The value to store 17 | 18 | Returns: 19 | Memory: Self for method chaining 20 | """ 21 | self._data[key] = value 22 | return self # Allow method chaining 23 | 24 | def retrieve(self, key, default=None): 25 | """ 26 | Retrieve a value from memory using the given key. 27 | 28 | Args: 29 | key (str): The key for the value to retrieve 30 | default: Value to return if key doesn't exist 31 | 32 | Returns: 33 | The stored value, or default if the key doesn't exist 34 | """ 35 | return self._data.get(key, default) 36 | 37 | def has_key(self, key): 38 | """ 39 | Check if a key exists in memory. 40 | 41 | Args: 42 | key (str): The key to check 43 | 44 | Returns: 45 | bool: True if the key exists, False otherwise 46 | """ 47 | return key in self._data 48 | 49 | def all(self): 50 | """ 51 | Get all stored data. 52 | 53 | Returns: 54 | dict: A copy of all stored data 55 | """ 56 | return dict(self._data) 57 | 58 | def clear(self): 59 | """ 60 | Clear all stored data. 61 | 62 | Returns: 63 | Memory: Self for method chaining 64 | """ 65 | self._data.clear() 66 | return self 67 | 68 | def keys(self): 69 | """ 70 | Get all keys in memory. 71 | 72 | Returns: 73 | list: All keys in memory 74 | """ 75 | return list(self._data.keys()) 76 | -------------------------------------------------------------------------------- /intelli/flow/tasks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/flow/tasks/__init__.py -------------------------------------------------------------------------------- /intelli/flow/template/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/flow/template/__init__.py -------------------------------------------------------------------------------- /intelli/flow/template/basic_template.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | import re 3 | import json 4 | 5 | 6 | class Template(ABC): 7 | @abstractmethod 8 | def apply_input(self, data): 9 | pass 10 | 11 | @abstractmethod 12 | def apply_output(self, data): 13 | pass 14 | 15 | 16 | class TextInputTemplate(Template): 17 | """ 18 | A template for text input with enhanced structure preservation 19 | and robust JSON handling. 20 | """ 21 | 22 | def __init__(self, template_text: str, previous_input_tag='PREVIOUS_ANALYSIS', user_request_tag='CURRENT_TASK'): 23 | if '{0}' not in template_text: 24 | context = previous_input_tag + ': {0}\n' 25 | request = user_request_tag + ': ' + template_text 26 | template_text = context + request 27 | 28 | self.template_text = template_text.strip() 29 | self.previous_input_tag = previous_input_tag 30 | self.user_request_tag = user_request_tag 31 | 32 | def apply_input(self, data): 33 | """ 34 | Apply the template to input data with improved structure preservation 35 | and robust JSON handling. 36 | """ 37 | # Keep original handling for None data 38 | if data is None: 39 | return self.template_text 40 | 41 | # Handle dictionary data 42 | if isinstance(data, dict): 43 | try: 44 | # Convert to JSON string 45 | formatted_json = json.dumps(data, indent=2) 46 | return f"{self.template_text}\n\n```json\n{formatted_json}\n```" 47 | except Exception as e: 48 | # If serialization fails, fallback to string representation 49 | return f"{self.template_text}\n\n{str(data)}" 50 | 51 | # Handle string data that might contain JSON 52 | if isinstance(data, str): 53 | # For JSON-like strings, first try to parse and reformat 54 | if ('{' in data and '}' in data) or ('[' in data and ']' in data): 55 | try: 56 | json_data = json.loads(data) 57 | # Format 58 | formatted_json = json.dumps(json_data, indent=2) 59 | return f"{self.template_text}\n\n```json\n{formatted_json}\n```" 60 | except json.JSONDecodeError: 61 | # Not valid JSON or has already escaped braces 62 | pass 63 | 64 | # Preserve section headers with newlines 65 | enhanced_data = data 66 | header_pattern = r'(^|\n)(#+\s+[A-Z\s]+:?|[A-Z\s]+(ASSESSMENT|ANALYSIS|PREDICTION|STATUS):?)' 67 | enhanced_data = re.sub(header_pattern, r'\1\n\2\n', enhanced_data) 68 | 69 | return f"{self.template_text}\n\n{enhanced_data}" 70 | 71 | # Handle other data types 72 | return f"{self.template_text}\n\n{str(data)}" 73 | 74 | def apply_output(self, data): 75 | """Apply template to output data (not implemented).""" 76 | pass -------------------------------------------------------------------------------- /intelli/flow/tool_connector.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tool-aware dynamic connector for routing based on LLM tool/function calls. 3 | 4 | This connector examines the output from LLM agents and routes to different 5 | tasks based on whether tools were invoked or not. 6 | """ 7 | 8 | from intelli.flow.dynamic_connector import DynamicConnector, ConnectorMode 9 | from typing import Any, Optional, Dict, Callable 10 | 11 | 12 | class ToolDynamicConnector(DynamicConnector): 13 | """ 14 | Dynamic connector that routes based on tool/function calls in LLM output. 15 | 16 | This connector enables flows to dynamically decide whether to execute 17 | MCP or other tool-based tasks based on LLM decisions. 18 | """ 19 | 20 | def __init__( 21 | self, 22 | decision_fn: Optional[Callable[[Any, str], str]] = None, 23 | destinations: Dict[str, str] = None, 24 | name: str = "tool_aware_connector", 25 | description: str = "Routes based on tool usage in LLM output", 26 | mode: ConnectorMode = ConnectorMode.CUSTOM, 27 | ): 28 | """ 29 | Initialize the tool-aware connector. 30 | 31 | Args: 32 | decision_fn: Optional custom decision function (defaults to tool detection) 33 | destinations: Must include "tool_called" and "no_tool" keys 34 | name: Connector name 35 | description: Connector description 36 | mode: Connector mode 37 | """ 38 | # Use custom decision function or default tool detection 39 | if decision_fn is None: 40 | decision_fn = self._default_tool_decision 41 | 42 | super().__init__(decision_fn, destinations, name, description, mode) 43 | 44 | # Validate required destinations 45 | if not destinations or "tool_called" not in destinations or "no_tool" not in destinations: 46 | raise ValueError( 47 | "ToolDynamicConnector requires destinations with 'tool_called' and 'no_tool' keys" 48 | ) 49 | 50 | def _default_tool_decision(self, output: Any, output_type: str) -> str: 51 | """ 52 | Default decision function that detects tool usage. 53 | 54 | Returns: 55 | "tool_called" if tools were invoked 56 | "no_tool" if direct response 57 | None if cannot determine 58 | """ 59 | # Check for tool response structure 60 | if isinstance(output, dict): 61 | # Check for standard tool response format 62 | if output.get("type") in ["tool_response", "function_response"]: 63 | if output.get("tool_calls") or output.get("function_call"): 64 | return "tool_called" 65 | 66 | # Check for text content (direct response) 67 | if isinstance(output, str) and output.strip(): 68 | return "no_tool" 69 | elif isinstance(output, dict) and output.get("content") and not output.get("tool_calls"): 70 | return "no_tool" 71 | 72 | # Cannot determine 73 | return None 74 | 75 | def get_tool_info(self, output: Any) -> Optional[Dict[str, Any]]: 76 | """ 77 | Extract tool information from the output. 78 | 79 | Returns: 80 | Dict with tool name and arguments, or None if no tools 81 | """ 82 | if not isinstance(output, dict): 83 | return None 84 | 85 | # Handle new format (tool_calls) 86 | if output.get("type") == "tool_response" and output.get("tool_calls"): 87 | first_tool = output["tool_calls"][0] 88 | return { 89 | "name": first_tool["function"]["name"], 90 | "arguments": first_tool["function"].get("arguments", "{}"), 91 | "id": first_tool.get("id") 92 | } 93 | # Handle legacy format (function_call) 94 | elif output.get("type") == "function_response" and output.get("function_call"): 95 | return { 96 | "name": output["function_call"]["name"], 97 | "arguments": output["function_call"].get("arguments", "{}"), 98 | "id": None 99 | } 100 | 101 | return None -------------------------------------------------------------------------------- /intelli/flow/types.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class AgentTypes(Enum): 5 | TEXT = 'text' 6 | IMAGE = 'image' 7 | VISION = 'vision' 8 | SPEECH = 'speech' 9 | RECOGNITION = 'recognition' 10 | EMBED = 'embed' 11 | SEARCH = 'search' 12 | MCP = 'mcp' 13 | 14 | 15 | class InputTypes(Enum): 16 | TEXT = 'text' 17 | IMAGE = 'image' 18 | VISION = 'vision' 19 | SPEECH = 'speech' 20 | AUDIO = 'audio' 21 | EMBED = 'embed' 22 | 23 | 24 | class Matcher(): 25 | """ 26 | Maps the expected input type for each agent type and 27 | the expected output type for each agent type. 28 | This ensures compatibility between connected tasks. 29 | """ 30 | 31 | # What each agent type expects as input 32 | input = { 33 | 'text': 'text', 34 | 'image': 'text', 35 | 'vision': 'image', 36 | 'speech': 'text', 37 | 'recognition': 'audio', 38 | 'embed': 'text', 39 | 'search': 'text', 40 | 'mcp': 'text' 41 | } 42 | 43 | # What each agent type produces as output 44 | output = { 45 | 'text': 'text', 46 | 'image': 'image', 47 | 'vision': 'text', 48 | 'speech': 'audio', 49 | 'recognition': 'text', 50 | 'embed': 'embed', 51 | 'search': 'text', 52 | 'mcp': 'text' 53 | } 54 | -------------------------------------------------------------------------------- /intelli/flow/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Intelli Flow utilities 3 | 4 | This package provides helper utilities for working with Intelli Flows. 5 | """ 6 | 7 | # Import utility modules and expose main components 8 | try: 9 | from intelli.mcp import ( 10 | MCPServerBuilder, 11 | MCPJSONExtractor, 12 | create_mcp_preprocessor 13 | ) 14 | except ImportError: 15 | # MCP utilities may not be available if MCP isn't installed 16 | pass 17 | -------------------------------------------------------------------------------- /intelli/function/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/function/__init__.py -------------------------------------------------------------------------------- /intelli/mcp/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | MCP (Model Context Protocol) Package for Intelli 3 | 4 | This package provides utilities for working with MCP (Model Context Protocol), 5 | making it easier to create MCP servers and integrate them with Intelli. 6 | """ 7 | 8 | from mcp.utils import MCPServerBuilder, MCPJSONExtractor, create_mcp_preprocessor 9 | 10 | # Import DataFrame utilities with graceful fallback 11 | try: 12 | from mcp.dataframe_utils import ( 13 | BaseDataFrameMCPServerBuilder, 14 | PandasMCPServerBuilder, 15 | PolarsMCPServerBuilder, 16 | PANDAS_AVAILABLE, 17 | POLARS_AVAILABLE 18 | ) 19 | _DATAFRAME_UTILS_AVAILABLE = True 20 | except ImportError: 21 | _DATAFRAME_UTILS_AVAILABLE = False 22 | 23 | __all__ = [ 24 | 'MCPServerBuilder', 25 | 'MCPJSONExtractor', 26 | 'create_mcp_preprocessor' 27 | ] 28 | 29 | # Add DataFrame utilities to __all__ if available 30 | if _DATAFRAME_UTILS_AVAILABLE: 31 | __all__.extend([ 32 | 'BaseDataFrameMCPServerBuilder', 33 | 'PandasMCPServerBuilder', 34 | 'PolarsMCPServerBuilder', 35 | 'PANDAS_AVAILABLE', 36 | 'POLARS_AVAILABLE' 37 | ]) -------------------------------------------------------------------------------- /intelli/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/model/__init__.py -------------------------------------------------------------------------------- /intelli/model/input/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/model/input/__init__.py -------------------------------------------------------------------------------- /intelli/model/input/embed_input.py: -------------------------------------------------------------------------------- 1 | class EmbedInput: 2 | def __init__(self, texts, model=None): 3 | self.texts = texts 4 | self.model = model 5 | 6 | def get_openai_inputs(self): 7 | inputs = {"input": self.texts} 8 | if self.model: 9 | inputs["model"] = self.model 10 | return inputs 11 | 12 | def get_mistral_inputs(self): 13 | return self.get_openai_inputs() 14 | 15 | def get_gemini_inputs(self): 16 | return { 17 | "model": self.model, 18 | "content": {"parts": [{"text": text} for text in self.texts]}, 19 | } 20 | 21 | def get_nvidia_inputs(self): 22 | inputs = { 23 | "input": self.texts, 24 | "model": self.model, 25 | "input_type": "query", 26 | "encoding_format": "float", 27 | "truncate": "NONE", 28 | } 29 | return inputs 30 | 31 | def get_vllm_inputs(self): 32 | """ 33 | Returns: 34 | dict: Parameters for vLLM embedding request. 35 | """ 36 | # Unlike other providers, vLLM directly expects texts without being wrapped in a "texts" key 37 | return {"texts": self.texts, **({"model": self.model} if self.model else {})} 38 | 39 | def set_default_values(self, provider): 40 | if provider == "openai": 41 | self.model = self.model or "text-embedding-3-small" 42 | elif provider == "gemini": 43 | self.model = self.model or "models/embedding-001" 44 | elif provider == "mistral": 45 | self.model = self.model or "mistral-embed" 46 | else: 47 | raise ValueError(f"No default value for provider: {provider}") 48 | -------------------------------------------------------------------------------- /intelli/model/input/image_input.py: -------------------------------------------------------------------------------- 1 | class ImageModelInput: 2 | 3 | def __init__(self, prompt, number_images=1, imageSize=None, 4 | response_format=None, width=None, height=None, 5 | diffusion_cfgScale=None, diffusion_style_preset=None, 6 | diffusion_steps=None, engine=None, model=None, 7 | # New OpenAI parameters 8 | background=None, moderation=None, output_compression=None, 9 | output_format=None, quality=None, style=None, user=None): 10 | 11 | self.prompt = prompt 12 | self.number_images = number_images 13 | self.imageSize = imageSize 14 | self.response_format = response_format 15 | self.width = width 16 | self.height = height 17 | self.diffusion_cfgScale = diffusion_cfgScale 18 | self.diffusion_style_preset = diffusion_style_preset 19 | self.diffusion_steps = diffusion_steps 20 | self.engine = engine 21 | self.model = model 22 | 23 | # New OpenAI parameters 24 | self.background = background 25 | self.moderation = moderation 26 | self.output_compression = output_compression 27 | self.output_format = output_format 28 | self.quality = quality 29 | self.style = style 30 | self.user = user 31 | 32 | if imageSize and not width: 33 | sizes_parts = imageSize.split('x') if imageSize else [None, None] 34 | self.width = self.width or sizes_parts[0] 35 | self.height = self.height or sizes_parts[1] 36 | 37 | if not self.imageSize: 38 | self.imageSize = str(self.width) + 'x' + str(self.height) 39 | 40 | def get_openai_inputs(self): 41 | inputs = { 42 | "prompt": self.prompt, 43 | "n": self.number_images, 44 | "model": self.model, 45 | "size": self.imageSize, 46 | "response_format": self.response_format, 47 | "background": self.background, 48 | "moderation": self.moderation, 49 | "output_compression": self.output_compression, 50 | "output_format": self.output_format, 51 | "quality": self.quality, 52 | "style": self.style, 53 | "user": self.user 54 | } 55 | 56 | return {key: value for key, value in inputs.items() if value is not None} 57 | 58 | def get_stability_inputs(self): 59 | 60 | inputs = { 61 | "text_prompts": [{"text": self.prompt}], 62 | "samples": self.number_images, 63 | "height": self.height, 64 | "width": self.width, 65 | "cfg_scale": self.diffusion_cfgScale, 66 | "engine": self.engine, 67 | "style_preset": self.diffusion_style_preset, 68 | "steps": self.diffusion_steps 69 | } 70 | 71 | inputs = {key: value for key, value in inputs.items() if value is not None} 72 | return inputs 73 | 74 | def get_gemini_inputs(self): 75 | """Get input parameters for Gemini image generation""" 76 | config_params = { 77 | "responseModalities": ["TEXT", "IMAGE"] 78 | } 79 | 80 | # Add any additional config parameters if available 81 | if self.quality: 82 | config_params["quality"] = self.quality 83 | 84 | inputs = { 85 | "prompt": self.prompt, 86 | "config_params": config_params 87 | } 88 | 89 | # Include model override if specified 90 | if self.model: 91 | inputs["model"] = self.model 92 | 93 | return inputs 94 | 95 | def set_default_values(self, provider): 96 | if provider == "openai": 97 | self.number_images = 1 98 | self.imageSize = '1024x1024' 99 | self.model = self.model or 'gpt-image-1' # Set latest model as default 100 | elif provider == "stability": 101 | self.number_images = 1 102 | self.height = 1024 103 | self.width = 1024 104 | self.engine = 'stable-diffusion-xl-1024-v1-0' 105 | elif provider == "gemini": 106 | self.number_images = 1 107 | self.imageSize = '1024x1024' 108 | # Gemini uses default model from config 109 | else: 110 | raise ValueError(f"Invalid provider name: {provider}") 111 | -------------------------------------------------------------------------------- /intelli/model/input/text_speech_input.py: -------------------------------------------------------------------------------- 1 | class Text2SpeechInput: 2 | Gender = {"FEMALE": "FEMALE", "MALE": "MALE"} 3 | 4 | def __init__( 5 | self, 6 | text, 7 | language="en-gb", 8 | gender="FEMALE", 9 | voice=None, 10 | model="tts-1", 11 | stream=True, 12 | ): 13 | self.text = text 14 | self.language = language.lower() 15 | # gender is in uppercase for consistency with Gender dictionary keys 16 | self.gender = gender.upper() 17 | self.voice = voice 18 | self.model = model 19 | self.stream = stream 20 | 21 | def get_google_input(self): 22 | params = {"text": self.text, "languageCode": self.language} 23 | 24 | language_name_map = { 25 | "en-gb": "en-GB", 26 | "en": "en-GB", 27 | "tr-tr": "tr-TR", 28 | "tr": "tr-TR", 29 | "cmn-cn": "cmn-CN", 30 | "cn": "cmn-CN", 31 | "de-de": "de-DE", 32 | "de": "de-DE", 33 | "ar-xa": "ar-XA", 34 | "ar": "ar-XA", 35 | } 36 | 37 | gender_name_map = {"FEMALE": "A", "MALE": "B"} 38 | 39 | base_language = language_name_map.get(self.language, None) 40 | 41 | if base_language: 42 | params["name"] = ( 43 | f"{base_language}-Standard-{gender_name_map.get(self.gender, 'A')}" 44 | ) 45 | params["ssmlGender"] = self.gender 46 | else: 47 | raise ValueError(f"Unsupported language code: {self.language}") 48 | 49 | return params 50 | 51 | def get_openai_input(self): 52 | return { 53 | "input": self.text, 54 | "voice": self.voice, 55 | "model": self.model, 56 | "stream": self.stream, 57 | } 58 | 59 | def get_elevenlabs_input(self): 60 | """Get input parameters for Eleven Labs text-to-speech""" 61 | params = { 62 | "text": self.text, 63 | "voice_id": self.voice_id if hasattr(self, "voice_id") else None, 64 | } 65 | 66 | if hasattr(self, "model_id") and self.model_id: 67 | params["model_id"] = self.model_id 68 | 69 | if hasattr(self, "output_format") and self.output_format: 70 | params["output_format"] = self.output_format 71 | 72 | return params 73 | 74 | def get_gemini_input(self): 75 | """Get input parameters for Gemini text-to-speech""" 76 | voice_config = { 77 | "prebuilt_voice_config": { 78 | "voice_name": "Kore" # Default voice 79 | } 80 | } 81 | 82 | # Map gender to voice if needed 83 | if self.gender == "MALE": 84 | voice_config["prebuilt_voice_config"]["voice_name"] = "Puck" 85 | 86 | # Use custom voice if specified 87 | if self.voice: 88 | voice_config["prebuilt_voice_config"]["voice_name"] = self.voice 89 | 90 | return { 91 | "text": self.text, 92 | "voice_config": voice_config 93 | } 94 | -------------------------------------------------------------------------------- /intelli/model/input/vision_input.py: -------------------------------------------------------------------------------- 1 | import os 2 | import base64 3 | 4 | 5 | class VisionModelInput: 6 | 7 | def __init__( 8 | self, 9 | content="", 10 | image_data=None, 11 | file_path=None, 12 | model=None, 13 | extension="png", 14 | max_tokens=300, 15 | ): 16 | 17 | self.content = content 18 | self.model = model 19 | self.max_tokens = max_tokens 20 | self.extension = extension 21 | self.file_path = file_path 22 | 23 | if file_path: 24 | with open(file_path, "rb") as image_file: 25 | self.image_data = base64.b64encode(image_file.read()).decode("utf-8") 26 | self.extension = os.path.splitext(file_path)[-1].strip(".") 27 | else: 28 | self.image_data = image_data 29 | 30 | def get_openai_inputs(self): 31 | 32 | inputs = { 33 | "model": self.model, 34 | "messages": [ 35 | { 36 | "role": "user", 37 | "content": [ 38 | {"type": "text", "text": self.content}, 39 | { 40 | "type": "image_url", 41 | "image_url": { 42 | "url": f"data:image/{self.extension};base64,{self.image_data}" 43 | }, 44 | }, 45 | ], 46 | } 47 | ], 48 | "max_tokens": self.max_tokens, 49 | } 50 | 51 | return inputs 52 | 53 | def get_gemini_inputs(self): 54 | 55 | inputs = { 56 | "contents": [ 57 | { 58 | "parts": [ 59 | {"text": f"{self.content}"}, 60 | { 61 | "inline_data": { 62 | "mime_type": f"image/{self.extension}", 63 | "data": self.image_data, 64 | } 65 | }, 66 | ] 67 | } 68 | ] 69 | } 70 | 71 | return inputs 72 | 73 | def get_google_inputs(self): 74 | """ 75 | Google Vision API works directly with binary image data. 76 | For convenience, we'll provide the file_path to let the GoogleAIWrapper 77 | read the file directly, or we'll decode the base64 image_data. 78 | """ 79 | # If we have a file path, return it 80 | if self.file_path and os.path.exists(self.file_path): 81 | return { 82 | "file_path": self.file_path, 83 | "content": self.content, # This can be used as a prompt or additional context 84 | } 85 | # Otherwise decode the base64 image data back to binary 86 | elif self.image_data: 87 | return { 88 | "image_content": base64.b64decode(self.image_data), 89 | "content": self.content, 90 | } 91 | else: 92 | raise ValueError( 93 | "No image data or file path provided for Google Vision API" 94 | ) 95 | 96 | def get_provider_inputs(self, provider): 97 | if provider == "openai": 98 | return self.get_openai_inputs() 99 | elif provider == "gemini": 100 | return self.get_gemini_inputs() 101 | elif provider == "google": 102 | return self.get_google_inputs() 103 | else: 104 | raise ValueError(f"Invalid provider name: {provider}") 105 | -------------------------------------------------------------------------------- /intelli/requirements.txt: -------------------------------------------------------------------------------- 1 | # Core dependencies 2 | python-dotenv~=1.0.1 3 | numpy<2.0 4 | mcp~=1.9.0 5 | matplotlib>=3.6.0 6 | networkx>=3.2.1 7 | huggingface_hub>=0.28.1 8 | 9 | # ML dependencies 10 | keras-nlp 11 | keras>=3 12 | librosa 13 | keras-hub 14 | 15 | # For testing 16 | pytest>=7.0.0 17 | 18 | # Other dependencies 19 | asyncio 20 | json5 -------------------------------------------------------------------------------- /intelli/resource/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/resource/__init__.py -------------------------------------------------------------------------------- /intelli/resource/templates/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/resource/templates/__init__.py -------------------------------------------------------------------------------- /intelli/resource/templates/augmented_chatbot.in: -------------------------------------------------------------------------------- 1 | Using the provided context, craft a cohesive response that directly addresses the user's query. If the context lacks relevance or is absent, focus on generating a knowledgeable and accurate answer based on the user's question alone. Aim for clarity and conciseness in your reply. 2 | Context: 3 | ${semantic_search} 4 | --------------------------------- 5 | User's Question: 6 | ${user_query} -------------------------------------------------------------------------------- /intelli/test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/test/__init__.py -------------------------------------------------------------------------------- /intelli/test/integration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/test/integration/__init__.py -------------------------------------------------------------------------------- /intelli/test/integration/data/sample_data.csv: -------------------------------------------------------------------------------- 1 | ID,Name,Age,City,Salary 2 | 1,Alice,30,New York,70000 3 | 2,Bob,24,Los Angeles,60000 4 | 3,Charlie,35,Chicago,80000 5 | 4,David,28,New York,75000 6 | 5,Eve,40,Chicago,90000 7 | 6,Frank,22,Los Angeles,55000 8 | 7,Grace,33,New York,82000 9 | 8,Hank,45,Chicago,95000 10 | 9,Ivy,29,Los Angeles,62000 11 | 10,Jack,31,New York,78000 12 | 11,Karen,38,Chicago,88000 13 | 12,Leo,26,Los Angeles,59000 14 | 13,Mona,32,New York,81000 15 | 14,Nick,42,Chicago,92000 16 | 15,Olivia,27,Los Angeles,61000 -------------------------------------------------------------------------------- /intelli/test/integration/mcp_dataframe_server.py: -------------------------------------------------------------------------------- 1 | """ 2 | MCP DataFrame Server for Integration Testing. 3 | 4 | This script runs an MCP server using either PandasMCPServerBuilder or PolarsMCPServerBuilder 5 | to serve a sample CSV file via stdio transport. It checks for the availability of 6 | PandaS or Polars and uses the first one found. 7 | 8 | It expects the sample CSV to be located at ./data/sample_data.csv relative to this script. 9 | """ 10 | import os 11 | import sys 12 | 13 | # Adjust path to import from the parent directory (Intelli root) 14 | current_dir = os.path.dirname(os.path.abspath(__file__)) 15 | parent_dir = os.path.dirname(os.path.dirname(current_dir)) 16 | # Add 'Intelli' to sys.path 17 | intelli_root_path = parent_dir 18 | sys.path.insert(0, intelli_root_path) 19 | 20 | # Import from new location with fallback 21 | from intelli.mcp.dataframe_utils import PandasMCPServerBuilder, PolarsMCPServerBuilder, PANDAS_AVAILABLE, POLARS_AVAILABLE 22 | 23 | if __name__ == "__main__": 24 | # Determine the path to the sample CSV file 25 | # Assuming this server script is in test/integration/ 26 | # and the data is in test/integration/data/ 27 | csv_file_name = "sample_data.csv" 28 | csv_file_path = os.path.join(current_dir, "data", csv_file_name) 29 | 30 | if not os.path.exists(csv_file_path): 31 | print(f"Error: Sample CSV file not found at {csv_file_path}") 32 | print("Please ensure the 'sample_data.csv' is in the 'test/integration/data' directory.") 33 | sys.exit(1) 34 | 35 | server_builder = None 36 | server_type = "" 37 | 38 | if PANDAS_AVAILABLE: 39 | print("Pandas is available. Attempting to start Pandas DataFrame MCP Server.") 40 | try: 41 | server_builder = PandasMCPServerBuilder( 42 | server_name="PandasDataFrameTestServer", 43 | csv_file_path=csv_file_path 44 | ) 45 | server_type = "Pandas" 46 | except Exception as e: 47 | print(f"Failed to initialize PandasMCPServerBuilder: {e}") 48 | server_builder = None # Ensure it's None if init fails 49 | 50 | if server_builder is None and POLARS_AVAILABLE: 51 | print("Pandas server failed or not available. Polars is available. Attempting to start Polars DataFrame MCP Server.") 52 | try: 53 | server_builder = PolarsMCPServerBuilder( 54 | server_name="PolarsDataFrameTestServer", 55 | csv_file_path=csv_file_path 56 | ) 57 | server_type = "Polars" 58 | except Exception as e: 59 | print(f"Failed to initialize PolarsMCPServerBuilder: {e}") 60 | server_builder = None 61 | 62 | if server_builder and server_builder.df is not None: 63 | print(f"Successfully initialized {server_type} DataFrame MCP Server.") 64 | # Run the server with stdio transport for integration testing 65 | server_builder.run(transport="stdio", print_info=True) 66 | elif server_builder and server_builder.df is None: 67 | print(f"Initialized {server_type} DataFrame MCP Server, but DataFrame failed to load from {csv_file_path}.") 68 | print("Server will not run effectively. Please check CSV file and library installations.") 69 | sys.exit(1) 70 | else: 71 | print("Error: Neither Pandas nor Polars is available or server initialization failed.") 72 | print("Please install pandas or polars and ensure MCP is installed: pip install intelli[mcp] pandas polars") 73 | sys.exit(1) -------------------------------------------------------------------------------- /intelli/test/integration/mcp_math_server.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import FastMCP 2 | 3 | # Create an MCP server 4 | mcp = FastMCP("MathTools") 5 | 6 | # Add an addition tool 7 | @mcp.tool() 8 | def add(a: int, b: int) -> int: 9 | """Add two numbers""" 10 | return a + b 11 | 12 | # Add a subtraction tool 13 | @mcp.tool() 14 | def subtract(a: int, b: int) -> int: 15 | """Subtract two numbers""" 16 | return a - b 17 | 18 | # Add a multiplication tool 19 | @mcp.tool() 20 | def multiply(a: int, b: int) -> int: 21 | """Multiply two numbers""" 22 | return a * b 23 | 24 | if __name__ == "__main__": 25 | mcp.run(transport="stdio") -------------------------------------------------------------------------------- /intelli/test/integration/test_anthropic_wrapper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | from intelli.wrappers.anthropic_wrapper import AnthropicWrapper 4 | from dotenv import load_dotenv 5 | 6 | load_dotenv() 7 | 8 | 9 | class TestAnthropicWrapperIntegration(unittest.TestCase): 10 | def setUp(self): 11 | """Set up for the test case.""" 12 | self.api_key = os.getenv("ANTHROPIC_API_KEY") 13 | self.assertIsNotNone(self.api_key, "ANTHROPIC_API_KEY must not be None.") 14 | self.anthropic = AnthropicWrapper(self.api_key) 15 | 16 | def test_generate_text_integration(self): 17 | """Integration test for generate_text method.""" 18 | params = { 19 | "model": "claude-3-7-sonnet-20250219", 20 | "messages": [ 21 | { 22 | "role": "user", 23 | "content": "Who is the most renowned French painter? Provide a single direct short answer." 24 | } 25 | ], 26 | "max_tokens": 256 27 | } 28 | 29 | # Call the model 30 | result = self.anthropic.generate_text(params) 31 | print(f"generate text result: {result['content'][0]['text']}") 32 | self.assertTrue('content' in result and isinstance(result['content'], list) and len(result['content']) > 0, 33 | "The API response should include 'content' and it should be a non-empty list.") 34 | self.assertIn('text', result['content'][0], "The API response content should have a 'text' field.") 35 | 36 | def test_stream_text_integration(self): 37 | """Integration test for stream_text method.""" 38 | params = { 39 | "model": "claude-3-7-sonnet-20250219", 40 | "messages": [ 41 | { 42 | "role": "user", 43 | "content": "Who is the American mathematician know as the father of \"information theory\"? " 44 | "Provide a single direct short answer." 45 | } 46 | ], 47 | "max_tokens": 256 48 | } 49 | 50 | event_count = 0 51 | try: 52 | for line in self.anthropic.stream_text(params): 53 | print(f"Received line: {line}") 54 | event_count += 1 55 | if event_count > 10: # Break after receiving a few events to avoid infinite loop 56 | break 57 | self.assertGreater(event_count, 0, "Should have received at least one streaming event.") 58 | except Exception as error: 59 | self.fail(f"Streaming failed with exception: {str(error)}") 60 | 61 | 62 | if __name__ == "__main__": 63 | unittest.main() 64 | -------------------------------------------------------------------------------- /intelli/test/integration/test_azure_chatbot.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import asyncio 4 | from intelli.function.chatbot import Chatbot 5 | from intelli.utils.proxy_helper import ProxyHelper 6 | from intelli.model.input.chatbot_input import ChatModelInput 7 | from dotenv import load_dotenv 8 | load_dotenv() 9 | 10 | class TestChatbot(unittest.TestCase): 11 | def setUp(self): 12 | # Get azure keys 13 | azure_api_key = os.getenv("AZURE_OPENAI_API_KEY") 14 | azure_resource = os.getenv("AZURE_RESOURCE") 15 | # Initiate the proxy 16 | proxy_helper = ProxyHelper() 17 | proxy_helper.set_azure_openai(azure_resource) 18 | # Wrapp the proxy as parameter 19 | options = { 20 | 'proxy_helper': proxy_helper 21 | } 22 | 23 | # Creating Chatbot instances 24 | self.openai_bot = Chatbot(azure_api_key, "openai", options=options) 25 | 26 | def test_openai_chat(self): 27 | print('---- start openai ----') 28 | input = ChatModelInput("You are a helpful assistant.", "gpt_basic") 29 | input.add_user_message("What is the capital of France?") 30 | 31 | response = self.openai_bot.chat(input) 32 | 33 | print('openai response: ', response) 34 | 35 | self.assertTrue(len(response) > 0, "OpenAI chat response should not be empty") 36 | 37 | if __name__ == '__main__': 38 | unittest.main() -------------------------------------------------------------------------------- /intelli/test/integration/test_chatbot.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import asyncio 4 | from intelli.function.chatbot import Chatbot, ChatProvider 5 | from intelli.model.input.chatbot_input import ChatModelInput 6 | from dotenv import load_dotenv 7 | 8 | load_dotenv() 9 | 10 | 11 | class TestChatbot(unittest.TestCase): 12 | def setUp(self): 13 | self.openai_api_key = os.getenv("OPENAI_API_KEY") 14 | self.gemini_api_key = os.getenv("GEMINI_API_KEY") 15 | self.mistral_api_key = os.getenv("MISTRAL_API_KEY") 16 | self.anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") 17 | 18 | # Creating Chatbot instances for each AI model 19 | self.openai_bot = Chatbot(self.openai_api_key, ChatProvider.OPENAI) 20 | self.gemini_bot = Chatbot(self.gemini_api_key, ChatProvider.GEMINI) 21 | self.mistral_bot = Chatbot(self.mistral_api_key, ChatProvider.MISTRAL) 22 | self.anthropic_bot = Chatbot(self.anthropic_api_key, ChatProvider.ANTHROPIC) 23 | 24 | def test_openai_chat(self): 25 | print('---- start openai ----') 26 | input = ChatModelInput("You are a helpful assistant.", "gpt-3.5-turbo") 27 | input.add_user_message("What is the capital of France?") 28 | 29 | response = self.openai_bot.chat(input) 30 | 31 | print('openai response: ', response) 32 | 33 | self.assertTrue(len(response) > 0, "OpenAI chat response should not be empty") 34 | 35 | def test_gemini_chat(self): 36 | print('---- start gemini ----') 37 | input = ChatModelInput("You are a helpful assistant.", "gemini-model") 38 | input.add_user_message("Describe a starry night.") 39 | 40 | response = self.gemini_bot.chat(input) 41 | 42 | print('gemini response: ', response) 43 | 44 | self.assertTrue(len(response) > 0, "Gemini chat response should not be empty") 45 | 46 | def test_mistral_chat(self): 47 | print('---- start mistral ----') 48 | input = ChatModelInput("You are a helpful assistant.", "mistral-tiny") 49 | input.add_user_message("Who is Leonardo da Vinci?") 50 | 51 | response = self.mistral_bot.chat(input) 52 | 53 | print('mistral response: ', response) 54 | 55 | self.assertTrue(len(response) > 0, "Mistral chat response should not be empty") 56 | 57 | def test_anthropic_chat(self): 58 | print('---- start anthropic ----') 59 | input = ChatModelInput("You are a helpful assistant.", "claude-3-7-sonnet-20250219") 60 | input.add_user_message("What is the capital of France?") 61 | 62 | response = self.anthropic_bot.chat(input) 63 | 64 | print('- anthropic response: ', response[0]) 65 | 66 | self.assertTrue(len(response) > 0, "Anthropic chat response should not be empty") 67 | 68 | def test_openai_stream(self): 69 | print('---- start openai stream ----') 70 | input = ChatModelInput("You are a helpful assistant.", "gpt-4o") 71 | input.add_user_message("Tell me a story about a lion in the savanna.") 72 | 73 | # Use asyncio.run() to get the result of the coroutine 74 | full_text = asyncio.run(self._get_openai_stream(input)) 75 | 76 | print('openai stream response: ', full_text) 77 | 78 | self.assertTrue(len(full_text) > 0, "OpenAI stream response should not be empty") 79 | 80 | async def _get_openai_stream(self, chat_input): 81 | full_text = '' 82 | 83 | for content in self.openai_bot.stream(chat_input): 84 | full_text += content 85 | print('content item: ', content) 86 | 87 | return full_text 88 | 89 | def test_anthropic_stream(self): 90 | print('---- start anthropic stream ----') 91 | input = ChatModelInput("You are a helpful assistant.", "claude-3-7-sonnet-20250219") 92 | input.add_user_message("Give me a detailed explanation of quantum computing.") 93 | 94 | # use asyncio.run() to get the result of the coroutine 95 | full_text = asyncio.run(self._get_anthropic_stream(input)) 96 | 97 | print('anthropic stream response: ', full_text) 98 | 99 | self.assertTrue(len(full_text) > 0, "Anthropic stream response should not be empty") 100 | 101 | async def _get_anthropic_stream(self, chat_input): 102 | full_text = '' 103 | 104 | for content in self.anthropic_bot.stream(chat_input): 105 | full_text += content 106 | print('content item: ', content) 107 | 108 | return full_text 109 | 110 | if __name__ == '__main__': 111 | print('test') 112 | unittest.main() 113 | -------------------------------------------------------------------------------- /intelli/test/integration/test_chatbot_nvidia.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import asyncio 4 | from dotenv import load_dotenv 5 | from intelli.function.chatbot import Chatbot, ChatProvider 6 | from intelli.model.input.chatbot_input import ChatModelInput 7 | 8 | load_dotenv() 9 | 10 | class TestChatbotNvidiaChatAndStream(unittest.TestCase): 11 | def setUp(self): 12 | self.nvidia_api_key = os.getenv("NVIDIA_API_KEY") 13 | assert self.nvidia_api_key, "NVIDIA_API_KEY is not set." 14 | self.chatbot = Chatbot(self.nvidia_api_key, ChatProvider.NVIDIA.value) 15 | 16 | def test_nvidia_chat_and_stream(self): 17 | 18 | # Test normal chat 19 | print("Testing Nvidia chat") 20 | normal_input = ChatModelInput("You are a helpful assistant.", model="deepseek-ai/deepseek-r1", max_tokens=1024, temperature=0.6) 21 | normal_input.add_user_message("What is the capital city of france?") 22 | response = self.chatbot.chat(normal_input) 23 | if isinstance(response, dict) and "result" in response: 24 | normal_output = response["result"] 25 | else: 26 | normal_output = response 27 | self.assertTrue(len(normal_output) > 0, "Nvidia normal chat response should not be empty") 28 | print("Nvidia normal chat output:", normal_output) 29 | 30 | # Test streaming chat 31 | print("Testing Nvidia stream") 32 | stream_input = ChatModelInput("You are a helpful assistant.", model="deepseek-ai/deepseek-r1", max_tokens=1024, temperature=0.6) 33 | stream_input.add_user_message("What is the capital city of france?") 34 | stream_output = asyncio.run(self.get_stream_output(stream_input)) 35 | self.assertTrue(len(stream_output) > 0, "Nvidia stream response should not be empty") 36 | print("Nvidia stream output:", stream_output) 37 | 38 | async def get_stream_output(self, chat_input): 39 | output = "" 40 | for chunk in self.chatbot.stream(chat_input): 41 | output += chunk 42 | return output 43 | 44 | if __name__ == "__main__": 45 | unittest.main() 46 | -------------------------------------------------------------------------------- /intelli/test/integration/test_chatbot_vllm.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import sys 4 | from dotenv import load_dotenv 5 | from intelli.function.chatbot import Chatbot, ChatProvider 6 | from intelli.model.input.chatbot_input import ChatModelInput 7 | 8 | load_dotenv() 9 | 10 | 11 | class TestChatbotVLLM(unittest.TestCase): 12 | """Tests for the VLLM provider in Chatbot.""" 13 | 14 | def setUp(self): 15 | """Set up the test environment.""" 16 | 17 | self.deepseek_url = os.getenv("DEEPSEEK_VLLM_URL") 18 | 19 | if not self.deepseek_url: 20 | self.skipTest("DEEPSEEK_VLLM_URL environment variable not set") 21 | 22 | # Initialize the chatbot with VLLM provider 23 | self.chatbot = Chatbot( 24 | api_key=None, 25 | provider=ChatProvider.VLLM, 26 | options={ 27 | "baseUrl": self.deepseek_url, 28 | "debug": True 29 | } 30 | ) 31 | 32 | def test_vllm_chat(self): 33 | print("\nTesting VLLM regular chat completion:") 34 | 35 | # Create chat input 36 | chat_input = ChatModelInput( 37 | system="You are a helpful assistant.", 38 | model="deepseek-ai/DeepSeek-R1-Distill-Llama-8B", 39 | max_tokens=150, 40 | temperature=0.7 41 | ) 42 | 43 | chat_input.add_user_message("What is machine learning?") 44 | response = self.chatbot.chat(chat_input) 45 | 46 | # Handle response format (could be dict with "result" or directly the text) 47 | if isinstance(response, dict) and "result" in response: 48 | chat_output = response["result"] 49 | else: 50 | chat_output = response 51 | 52 | # Print and verify output 53 | print(f"VLLM chat output: {chat_output}") 54 | self.assertTrue(len(chat_output) > 0, "VLLM chat response should not be empty") 55 | self.assertTrue(isinstance(chat_output, list), "VLLM chat response should be a list") 56 | self.assertTrue(len(chat_output[0]) > 0, "VLLM chat response content should not be empty") 57 | 58 | def test_vllm_text_chat(self): 59 | 60 | # Create chat input without system message 61 | chat_input = ChatModelInput( 62 | system="", # Empty system message to force text completion 63 | model="deepseek-ai/DeepSeek-R1-Distill-Llama-8B", 64 | max_tokens=150, 65 | temperature=0.7 66 | ) 67 | chat_input.add_user_message("What is machine learning?") 68 | response = self.chatbot.chat(chat_input) 69 | 70 | # Handle response format 71 | if isinstance(response, dict) and "result" in response: 72 | chat_output = response["result"] 73 | else: 74 | chat_output = response 75 | 76 | # Print and verify output 77 | print(f"VLLM text completion output: {chat_output}") 78 | self.assertTrue(len(chat_output) > 0, "VLLM text completion response should not be empty") 79 | self.assertTrue(isinstance(chat_output, list), "VLLM text completion response should be a list") 80 | self.assertTrue(len(chat_output[0]) > 0, "VLLM text completion content should not be empty") 81 | 82 | def test_vllm_stream(self): 83 | """Test streaming with VLLM.""" 84 | print("\nTesting VLLM streaming:") 85 | 86 | # Create chat input 87 | stream_input = ChatModelInput( 88 | system="You are a helpful assistant.", 89 | model="deepseek-ai/DeepSeek-R1-Distill-Llama-8B", 90 | max_tokens=150, 91 | temperature=0.7 92 | ) 93 | stream_input.add_user_message("What is machine learning?") 94 | 95 | # Collect streaming output 96 | stream_output = "" 97 | chunks_received = 0 98 | 99 | for chunk in self.chatbot.stream(stream_input): 100 | chunks_received += 1 101 | if chunks_received <= 5: # Print first 5 chunks for debugging 102 | print(f"Stream chunk {chunks_received}: {chunk}") 103 | stream_output += chunk 104 | 105 | # Print summary and verify output 106 | print(f"Received {chunks_received} chunks total") 107 | print(f"VLLM streaming first 100 chars: {stream_output[:100]}...") 108 | print(f"Total streaming output length: {len(stream_output)}") 109 | 110 | self.assertTrue(chunks_received > 0, "Should receive at least one chunk") 111 | self.assertTrue(len(stream_output) > 0, "VLLM streaming response should not be empty") 112 | 113 | 114 | if __name__ == "__main__": 115 | unittest.main() -------------------------------------------------------------------------------- /intelli/test/integration/test_chatbot_with_data.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | from intelli.function.chatbot import Chatbot 4 | from intelli.model.input.chatbot_input import ChatModelInput 5 | from dotenv import load_dotenv 6 | load_dotenv() 7 | 8 | class TestChatbotWithData(unittest.TestCase): 9 | def setUp(self): 10 | # Loading API keys from environment variables 11 | self.openai_api_key = os.getenv("OPENAI_API_KEY") 12 | self.gemini_api_key = os.getenv("GEMINI_API_KEY") 13 | self.mistral_api_key = os.getenv("MISTRAL_API_KEY") 14 | one_key = os.getenv("INTELLI_ONE_KEY") 15 | api_base = os.getenv("INTELLI_API_BASE") 16 | 17 | # Creating Chatbot instances for each AI model with attach_reference set to True 18 | self.openai_bot = Chatbot(self.openai_api_key, "openai", 19 | {"one_key": one_key, "api_base": api_base}) 20 | self.gemini_bot = Chatbot(self.gemini_api_key, "gemini", 21 | {"one_key": one_key, "api_base": api_base}) 22 | self.mistral_bot = Chatbot(self.mistral_api_key, "mistral", 23 | {"one_key": one_key, "api_base": api_base}) 24 | 25 | def test_openai_chat_with_data(self): 26 | print('---- start openai with data ----') 27 | input = ChatModelInput("You are a helpful assistant.", "gpt-4o") 28 | input.attach_reference = True # Explicitly attaching references 29 | input.add_user_message("Why is Mars called the Red Planet?") 30 | 31 | response = self.openai_bot.chat(input) 32 | 33 | print('openai response with data: ', response) 34 | 35 | # Checking the presence of response and references 36 | self.assertTrue('result' in response and len(response['result']) > 0, "OpenAI chat response should not be empty") 37 | self.assertTrue('references' in response, "References should be included in the response") 38 | 39 | def test_gemini_chat_with_data(self): 40 | print('---- start gemini with data ----') 41 | input = ChatModelInput("You are a helpful assistant.", "gemini-model") 42 | input.attach_reference = True 43 | input.add_user_message("Why is Mars called the Red Planet?") 44 | 45 | response = self.gemini_bot.chat(input) 46 | 47 | print('gemini response with data: ', response) 48 | 49 | # Gemini might not support reference attachment like OpenAI, so modify this test accordingly if needed 50 | self.assertTrue(len(response) > 0, "Gemini chat response should not be empty") 51 | 52 | def test_mistral_chat_with_data(self): 53 | print('---- start mistral with data ----') 54 | input = ChatModelInput("You are a helpful assistant.", "mistral-tiny") 55 | input.attach_reference = True 56 | input.add_user_message("Why is Mars called the Red Planet?") 57 | 58 | response = self.mistral_bot.chat(input) 59 | 60 | print('mistral response with data: ', response) 61 | 62 | # Like Gemini, adjust expectations based on Mistral's capabilities 63 | self.assertTrue(len(response) > 0, "Mistral chat response should not be empty") 64 | 65 | if __name__ == '__main__': 66 | unittest.main() -------------------------------------------------------------------------------- /intelli/test/integration/test_cohereai_wrapper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import json 4 | from intelli.wrappers.cohereai_wrapper import CohereAIWrapper 5 | from intelli.utils.cohere_stream_parser import CohereStreamParser 6 | from dotenv import load_dotenv 7 | 8 | 9 | load_dotenv() 10 | 11 | class TestCohereAIWrapperIntegration(unittest.TestCase): 12 | def setUp(self): 13 | """Set up for the test case.""" 14 | self.api_key = os.getenv("COHERE_API_KEY") 15 | self.assertIsNotNone(self.api_key, "COHERE_API_KEY must not be None.") 16 | self.cohere = CohereAIWrapper(self.api_key) 17 | 18 | def test_cohere_generate_model(self): 19 | try: 20 | params = { 21 | 'model': 'command', 22 | 'prompt': 'Write a blog outline for a blog titled "The Art of Effective Communication"', 23 | 'temperature': 0.7, 24 | 'max_tokens': 200, 25 | } 26 | 27 | result = self.cohere.generate_text(params) 28 | print('Cohere Language Model Result:', result['generations'][0]['text']) 29 | except Exception as error: 30 | print('Cohere Language Model Error:', error) 31 | 32 | async def test_cohere_web_chat(self): 33 | try: 34 | params = { 35 | 'model': 'command-nightly', 36 | 'message': 'what is the command to install intellinode npm module ?', 37 | 'temperature': 0.3, 38 | 'chat_history': [], 39 | 'prompt_truncation': 'auto', 40 | 'stream': False, 41 | 'citation_quality': 'accurate', 42 | 'connectors': [{'id': 'web-search'}], 43 | } 44 | result = self.cohere.generate_chat_text(params) 45 | 46 | print('Cohere Chat Result:', json.dumps(result, indent=2)) 47 | except Exception as error: 48 | print('Cohere Chat Error:', error) 49 | 50 | def test_cohere_embeddings(self): 51 | try: 52 | params = { 53 | 'texts': ['Hello from Cohere!', 'Hallo von Cohere!', '您好,来自 Cohere!'], 54 | 'model': 'embed-multilingual-v2.0', 55 | 'truncate': 'END', 56 | } 57 | 58 | result = self.cohere.get_embeddings(params) 59 | embeddings = result['embeddings'] 60 | print('Cohere Embeddings Result Sample:', embeddings[0][:50]) 61 | self.assertTrue(embeddings, 0, 'test_cohere_embeddings response length should be greater than 0') 62 | except Exception as error: 63 | print('Cohere Embeddings Error:', error) 64 | 65 | def test_cohere_chat_stream(self): 66 | try: 67 | params = { 68 | 'model': 'command', 69 | 'message': 'how to use intellinode npm module ?', 70 | 'stream': True, 71 | 'chat_history': [], 72 | 'prompt_truncation': 'auto', 73 | 'citation_quality': 'accurate', 74 | 'temperature': 0.3 75 | } 76 | 77 | response_chunks = '' 78 | stream_parser = CohereStreamParser() 79 | 80 | for chunk in self.cohere.generate_chat_text(params): 81 | chunk_text = chunk.decode('utf-8') 82 | for content_text in stream_parser.feed(chunk_text): 83 | print('Result Chunk:', content_text) 84 | response_chunks += content_text 85 | 86 | print('Concatenated Text:', response_chunks) 87 | self.assertTrue(response_chunks > 0, 88 | 'test_cohere_chat_stream response length should be greater than 0') 89 | except Exception as error: 90 | print('Cohere Chat Error:', error) 91 | 92 | if __name__ == "__main__": 93 | unittest.main() 94 | -------------------------------------------------------------------------------- /intelli/test/integration/test_elevenlabs_wrapper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import tempfile 4 | from intelli.wrappers.elevenlabs_wrapper import ElevenLabsWrapper 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv() 8 | 9 | 10 | class TestElevenLabsWrapper(unittest.TestCase): 11 | @classmethod 12 | def setUpClass(cls): 13 | cls.api_key = os.getenv("ELEVENLABS_API_KEY") 14 | if not cls.api_key: 15 | raise unittest.SkipTest("ELEVENLABS_API_KEY environment variable not set") 16 | 17 | cls.wrapper = ElevenLabsWrapper(cls.api_key) 18 | 19 | # For tests that require a voice ID, get the first available voice 20 | voices = cls.wrapper.list_voices() 21 | if not voices or 'voices' not in voices or not voices['voices']: 22 | raise unittest.SkipTest("No voices available for testing") 23 | 24 | cls.voice_id = voices['voices'][0]['voice_id'] 25 | 26 | def test_list_voices(self): 27 | 28 | result = self.wrapper.list_voices() 29 | self.assertIn('voices', result) 30 | self.assertTrue(len(result['voices']) > 0) 31 | print(f"Found {len(result['voices'])} voices") 32 | 33 | # Print first voice details 34 | first_voice = result['voices'][0] 35 | print(f"First voice: {first_voice['name']} ({first_voice['voice_id']})") 36 | 37 | def test_text_to_speech(self): 38 | 39 | text = "Hello, this is a test of the Eleven Labs text to speech API." 40 | 41 | # Get audio data 42 | audio_data = self.wrapper.text_to_speech( 43 | text=text, 44 | voice_id=self.voice_id 45 | ) 46 | 47 | with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as temp_file: 48 | temp_file.write(audio_data) 49 | temp_path = temp_file.name 50 | 51 | # Check file exists and has content 52 | self.assertTrue(os.path.exists(temp_path)) 53 | self.assertTrue(os.path.getsize(temp_path) > 0) 54 | 55 | print(f"Generated audio file at {temp_path}") 56 | 57 | # Clean up 58 | os.unlink(temp_path) 59 | 60 | def test_stream_text_to_speech(self): 61 | 62 | text = "This is a test of streaming audio from Eleven Labs." 63 | 64 | # Get streaming response 65 | response = self.wrapper.stream_text_to_speech( 66 | text=text, 67 | voice_id=self.voice_id 68 | ) 69 | 70 | with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as temp_file: 71 | for chunk in response.iter_content(chunk_size=1024): 72 | if chunk: 73 | temp_file.write(chunk) 74 | temp_path = temp_file.name 75 | 76 | # Check file exists and has content 77 | self.assertTrue(os.path.exists(temp_path)) 78 | self.assertTrue(os.path.getsize(temp_path) > 0) 79 | 80 | print(f"Generated streaming audio file at {temp_path}") 81 | 82 | # Clean up 83 | os.unlink(temp_path) 84 | 85 | def test_speech_to_text(self): 86 | 87 | text = "This is a test of the speech to text capability." 88 | 89 | # Get audio data and save to temporary file 90 | audio_data = self.wrapper.text_to_speech( 91 | text=text, 92 | voice_id=self.voice_id 93 | ) 94 | 95 | with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as temp_file: 96 | temp_file.write(audio_data) 97 | audio_path = temp_file.name 98 | 99 | try: 100 | 101 | result = self.wrapper.speech_to_text(audio_path) 102 | 103 | # Verify response 104 | self.assertIn('text', result) 105 | print(f"Transcribed text: {result['text']}") 106 | 107 | finally: 108 | # Clean up 109 | os.unlink(audio_path) 110 | 111 | def test_speech_to_text_with_bytes(self): 112 | 113 | text = "This is a test of the speech to text capability with bytes input." 114 | 115 | # Get audio data 116 | audio_data = self.wrapper.text_to_speech( 117 | text=text, 118 | voice_id=self.voice_id 119 | ) 120 | 121 | # Use the bytes directly 122 | result = self.wrapper.speech_to_text(audio_data) 123 | 124 | # Verify response 125 | self.assertIn('text', result) 126 | print(f"Transcribed text from bytes: {result['text']}") 127 | 128 | def test_speech_to_speech(self): 129 | 130 | text = "This is a test of the voice transformation capability." 131 | 132 | # Get audio data and save to temporary file 133 | audio_data = self.wrapper.text_to_speech( 134 | text=text, 135 | voice_id=self.voice_id 136 | ) 137 | 138 | with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as temp_file: 139 | temp_file.write(audio_data) 140 | audio_path = temp_file.name 141 | 142 | try: 143 | 144 | transformed_audio = self.wrapper.speech_to_speech( 145 | audio_file=audio_path, 146 | voice_id=self.voice_id # Use same voice for simplicity 147 | ) 148 | 149 | # Save transformed audio to temporary file 150 | with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as out_file: 151 | out_file.write(transformed_audio) 152 | out_path = out_file.name 153 | 154 | # Check file exists and has content 155 | self.assertTrue(os.path.exists(out_path)) 156 | self.assertTrue(os.path.getsize(out_path) > 0) 157 | 158 | print(f"Generated transformed audio file at {out_path}") 159 | 160 | # Clean up transformed audio 161 | os.unlink(out_path) 162 | 163 | finally: 164 | # Clean up original audio 165 | os.unlink(audio_path) 166 | 167 | if __name__ == "__main__": 168 | unittest.main() -------------------------------------------------------------------------------- /intelli/test/integration/test_flow_icons.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import os 3 | import unittest 4 | from dotenv import load_dotenv 5 | 6 | from intelli.flow.agents.agent import Agent 7 | from intelli.flow.input.task_input import TextTaskInput, ImageTaskInput 8 | from intelli.flow.processors.basic_processor import TextProcessor 9 | from intelli.flow.sequence_flow import SequenceFlow 10 | from intelli.flow.tasks.task import Task 11 | from intelli.flow.types import * 12 | 13 | load_dotenv() 14 | 15 | 16 | class TestFlows(unittest.TestCase): 17 | def setUp(self): 18 | # Initiate the keys 19 | self.openai_api_key = os.getenv("OPENAI_API_KEY") 20 | self.gemini_key = os.getenv("GEMINI_API_KEY") 21 | self.stability_key = os.getenv("STABILITY_API_KEY") 22 | 23 | def test_icon_generate_flow(self): 24 | print("---- start icons flow ----") 25 | 26 | # Define agents 27 | desc_agent = Agent( 28 | agent_type=AgentTypes.TEXT.value, 29 | provider="openai", 30 | mission="generate image description from the user input to use it for DALL·E icon generation", 31 | model_params={"key": self.openai_api_key, "model": "gpt-3.5-turbo"}, 32 | ) 33 | 34 | image_agent = Agent( 35 | agent_type=AgentTypes.IMAGE.value, 36 | provider="openai", 37 | mission="generate image", 38 | model_params={"key": self.openai_api_key, "model": "dall-e-3", "width": 1024, "height": 1024}, 39 | ) 40 | 41 | # Define tasks 42 | task2 = Task( 43 | TextTaskInput("flat icon about {0}"), image_agent, log=False 44 | ) 45 | 46 | task1_list = [] 47 | topics = ["unified ai models access", "evaluate large language models", "workflows"] 48 | 49 | for topic in topics: 50 | task1 = Task( 51 | TextTaskInput( 52 | "Write simple icon description cartoon style inspired from docusaurus style about: {}".format( 53 | topic)), 54 | desc_agent, 55 | log=False, 56 | ) 57 | task1_list.append(task1) 58 | 59 | # Start SequenceFlow 60 | for index, task1 in enumerate(task1_list): 61 | print(f'---- Execute task {index+1} ----') 62 | flow = SequenceFlow([task1, task2], log=False) 63 | final_result = flow.start() 64 | 65 | print(f"{index + 1}- flow result:", final_result) 66 | 67 | 68 | if __name__ == "__main__": 69 | unittest.main() 70 | -------------------------------------------------------------------------------- /intelli/test/integration/test_flow_sequence.py: -------------------------------------------------------------------------------- 1 | import os 2 | import base64 3 | import unittest 4 | from intelli.flow.types import * 5 | from intelli.flow.agents.agent import Agent 6 | from intelli.flow.input.task_input import TextTaskInput, ImageTaskInput 7 | from intelli.flow.processors.basic_processor import TextProcessor 8 | from intelli.flow.sequence_flow import SequenceFlow 9 | from intelli.flow.tasks.task import Task 10 | from dotenv import load_dotenv 11 | 12 | 13 | load_dotenv() 14 | 15 | 16 | class TestFlows(unittest.TestCase): 17 | def setUp(self): 18 | # Initiate the keys 19 | self.openai_api_key = os.getenv("OPENAI_API_KEY") 20 | self.gemini_key = os.getenv("GEMINI_API_KEY") 21 | self.stability_key = os.getenv("STABILITY_API_KEY") 22 | 23 | def test_blog_post_flow(self): 24 | print("---- start blog portal flow ----") 25 | 26 | # Define agents 27 | blog_agent = Agent( 28 | agent_type=AgentTypes.TEXT.value, 29 | provider="openai", 30 | mission="write blog posts", 31 | model_params={"key": self.openai_api_key, "model": "gpt-3.5-turbo"}, 32 | ) 33 | description_agent = Agent( 34 | agent_type=AgentTypes.TEXT.value, 35 | provider="gemini", 36 | mission="generate description only", 37 | model_params={"key": self.gemini_key, "model": "gemini"}, 38 | ) 39 | image_agent = Agent( 40 | agent_type=AgentTypes.IMAGE.value, 41 | provider="stability", 42 | mission="generate image", 43 | model_params={"key": self.stability_key}, 44 | ) 45 | 46 | # Define tasks 47 | task1 = Task( 48 | TextTaskInput("blog post about electric cars"), blog_agent, log=True 49 | ) 50 | task2 = Task( 51 | TextTaskInput("Write short image description for image generation model"), 52 | description_agent, 53 | pre_process=TextProcessor.text_head, 54 | log=True, 55 | ) 56 | task3 = Task( 57 | TextTaskInput("Generate cartoon style image"), image_agent, exclude=True, log=True 58 | ) 59 | 60 | # Start SequenceFlow 61 | flow = SequenceFlow([task1, task2, task3], log=True) 62 | final_result = flow.start() 63 | 64 | print("Final result:", final_result) 65 | 66 | def test_flow_chart_image_flow(self): 67 | print("---- start vision coder flow ----") 68 | 69 | analyst = Agent( 70 | agent_type=AgentTypes.VISION.value, 71 | provider="openai", 72 | mission="describe flow charts from images", 73 | model_params={"key": self.openai_api_key, "extension": "jpg", "model": "gpt-4o"}, 74 | ) 75 | 76 | coder = Agent( 77 | agent_type=AgentTypes.TEXT.value, 78 | provider="openai", 79 | mission="write python code. response only with the code without explination or text or marks.", 80 | model_params={"key": self.openai_api_key, "model": "gpt-3.5-turbo"}, 81 | ) 82 | 83 | # Define tasks 84 | with open('./temp/code_flow_char.jpg', "rb") as image_file: 85 | image_data = base64.b64encode(image_file.read()).decode('utf-8') 86 | 87 | task1 = Task( 88 | ImageTaskInput(desc="describe the steps of the code flow chat for an engineer.", img=image_data), agent=analyst, log=True 89 | ) 90 | 91 | task2 = Task( 92 | TextTaskInput("write python code from the provided context"), agent=coder, log=True 93 | ) 94 | 95 | # Start SequenceFlow 96 | flow = SequenceFlow([task1, task2], log=True) 97 | final_result = flow.start() 98 | 99 | print("Final result:", final_result) 100 | 101 | if __name__ == "__main__": 102 | unittest.main() 103 | -------------------------------------------------------------------------------- /intelli/test/integration/test_gemini_latest_features.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Simple test suite for the latest Gemini API features. 4 | Tests only the most important new capabilities: system instructions, image generation, and TTS. 5 | """ 6 | 7 | import unittest 8 | import os 9 | import base64 10 | from dotenv import load_dotenv 11 | from intelli.wrappers.geminiai_wrapper import GeminiAIWrapper 12 | 13 | load_dotenv() 14 | 15 | 16 | class TestGeminiLatestFeaturesSimple(unittest.TestCase): 17 | """Simple test suite for the latest Gemini API features""" 18 | 19 | @classmethod 20 | def setUpClass(cls): 21 | """Set up test environment""" 22 | cls.api_key = os.getenv("GEMINI_API_KEY") 23 | 24 | if not cls.api_key: 25 | raise unittest.SkipTest("GEMINI_API_KEY not set") 26 | 27 | cls.wrapper = GeminiAIWrapper(cls.api_key) 28 | 29 | def test_system_instructions(self): 30 | """Test system instructions functionality""" 31 | print("\n=== Testing System Instructions ===") 32 | 33 | system_instruction = "You are a helpful AI assistant. Always provide concise answers with examples." 34 | content_parts = [{"text": "What is machine learning?"}] 35 | 36 | try: 37 | result = self.wrapper.generate_content_with_system_instructions( 38 | content_parts, system_instruction 39 | ) 40 | 41 | response_text = result['candidates'][0]['content']['parts'][0]['text'] 42 | print(f"Response with system instruction: {response_text[:200]}...") 43 | 44 | self.assertIsNotNone(response_text) 45 | self.assertGreater(len(response_text), 50) 46 | print("✅ System instructions working correctly") 47 | 48 | except Exception as e: 49 | print(f"❌ System instructions error: {e}") 50 | # Don't fail the test as this might require special access 51 | self.skipTest(f"System instructions not available: {e}") 52 | 53 | def test_image_generation(self): 54 | """Test image generation using Gemini 2.0 Flash""" 55 | print("\n=== Testing Image Generation ===") 56 | 57 | prompt = "A simple cartoon cat sitting on a blue cushion" 58 | 59 | try: 60 | result = self.wrapper.generate_image(prompt) 61 | print(f"Image generation result keys: {list(result.keys())}") 62 | 63 | self.assertIn('candidates', result) 64 | 65 | # Check for image data in response 66 | candidate = result['candidates'][0] 67 | image_found = False 68 | 69 | if 'content' in candidate and 'parts' in candidate['content']: 70 | for part in candidate['content']['parts']: 71 | if 'inline_data' in part and part['inline_data'].get('mime_type', '').startswith('image/'): 72 | image_data = part['inline_data']['data'] 73 | print(f"✅ Image generated successfully - data length: {len(image_data)}") 74 | image_found = True 75 | break 76 | 77 | if not image_found: 78 | print("⚠️ No image data found in response") 79 | # Check if there's text response instead 80 | if 'content' in candidate and 'parts' in candidate['content']: 81 | for part in candidate['content']['parts']: 82 | if 'text' in part: 83 | print(f"Text response: {part['text'][:100]}...") 84 | 85 | except Exception as e: 86 | print(f"❌ Image generation error: {e}") 87 | print("Note: Image generation may require special access or billing setup") 88 | self.skipTest(f"Image generation not available: {e}") 89 | 90 | def test_text_to_speech(self): 91 | """Test text-to-speech generation""" 92 | print("\n=== Testing Text-to-Speech ===") 93 | 94 | text = "Hello! This is a test of Gemini's text-to-speech capabilities." 95 | 96 | try: 97 | result = self.wrapper.generate_speech(text) 98 | print(f"TTS result keys: {list(result.keys())}") 99 | 100 | self.assertIn('candidates', result) 101 | 102 | # Check for audio data 103 | candidate = result['candidates'][0] 104 | audio_found = False 105 | 106 | if 'content' in candidate and 'parts' in candidate['content']: 107 | for part in candidate['content']['parts']: 108 | if 'inline_data' in part and part['inline_data'].get('mime_type', '').startswith('audio/'): 109 | audio_data = part['inline_data']['data'] 110 | print(f"✅ Audio generated successfully - data length: {len(audio_data)}") 111 | audio_found = True 112 | break 113 | 114 | if not audio_found: 115 | print("⚠️ No audio data found in response") 116 | # Check if there's text response instead 117 | if 'content' in candidate and 'parts' in candidate['content']: 118 | for part in candidate['content']['parts']: 119 | if 'text' in part: 120 | print(f"Text response: {part['text'][:100]}...") 121 | 122 | except Exception as e: 123 | print(f"❌ TTS error: {e}") 124 | print("Note: TTS may require special model access") 125 | self.skipTest(f"TTS not available: {e}") 126 | 127 | 128 | if __name__ == "__main__": 129 | # Create temp directory if it doesn't exist 130 | os.makedirs('./temp', exist_ok=True) 131 | 132 | # Run tests 133 | unittest.main(verbosity=2) -------------------------------------------------------------------------------- /intelli/test/integration/test_intellicloud_wrapper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | from intelli.wrappers.intellicloud_wrapper import IntellicloudWrapper 4 | from dotenv import load_dotenv 5 | 6 | load_dotenv() 7 | 8 | class TestIntellicloudWrapper(unittest.TestCase): 9 | 10 | def setUp(self): 11 | """Initialize the wrapper with an API key.""" 12 | api_key = os.getenv("INTELLI_ONE_KEY") 13 | # get the dev intelli url for the test case 14 | api_base = os.getenv("INTELLI_API_BASE") 15 | self.assertIsNotNone(api_key, "INTELLI_ONE_KEY must not be None.") 16 | self.intellicloud = IntellicloudWrapper(api_key, api_base) 17 | 18 | def test_semantic_search(self): 19 | query_text = "Why is Mars called the Red Planet?" 20 | k = 2 21 | result = self.intellicloud.semantic_search(query_text, k) 22 | print('Semantic Search Result: ', result) 23 | self.assertTrue(len(result) > 0, "Semantic search should return at least one result") 24 | 25 | def test_semantic_search_with_filter(self): 26 | query_text = "Why is Mars called the Red Planet?" 27 | k = 2 28 | filters = {'document_name': 'test_mars_article.pdf'} 29 | result = self.intellicloud.semantic_search(query_text, k, filters) 30 | print('Semantic Search Result with Filter: ', result) 31 | self.assertTrue(len(result) > 0, "Semantic search with filter should return at least one result") 32 | 33 | if __name__ == "__main__": 34 | unittest.main() 35 | -------------------------------------------------------------------------------- /intelli/test/integration/test_keras_agent.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | from intelli.flow.types import * 4 | from intelli.flow.agents.agent import Agent 5 | from intelli.flow.agents.kagent import KerasAgent 6 | from intelli.flow.input.task_input import TextTaskInput 7 | from intelli.flow.sequence_flow import SequenceFlow 8 | from intelli.flow.tasks.task import Task 9 | from dotenv import load_dotenv 10 | # set keras back 11 | os.environ["KERAS_BACKEND"] = "jax" 12 | # load env 13 | load_dotenv() 14 | 15 | class TestKerasFlows(unittest.TestCase): 16 | def setUp(self): 17 | self.kaggle_username = os.getenv("KAGGLE_USERNAME") 18 | self.kaggle_pass = os.getenv("KAGGLE_API_KEY") 19 | 20 | def test_blog_post_flow(self): 21 | print("---- start simple blog post flow ----") 22 | 23 | # Define agents 24 | gemma_model_params = { 25 | "model": "gemma_instruct_2b_en", 26 | "max_length": 64, 27 | "KAGGLE_USERNAME": self.kaggle_username, 28 | "KAGGLE_KEY": self.kaggle_pass, 29 | } 30 | gemma_agent = KerasAgent(agent_type="text", 31 | mission="write blog posts", 32 | model_params=gemma_model_params) 33 | 34 | # Define tasks 35 | task1 = Task( 36 | TextTaskInput("blog post about electric cars"), gemma_agent, log=True 37 | ) 38 | 39 | # Start SequenceFlow 40 | flow = SequenceFlow([task1], log=True) 41 | final_result = flow.start() 42 | 43 | print("Final result:", final_result) 44 | self.assertIsNotNone(final_result) 45 | 46 | 47 | if __name__ == "__main__": 48 | unittest.main() 49 | -------------------------------------------------------------------------------- /intelli/test/integration/test_keras_whisper.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import pytest 4 | from intelli.wrappers.keras_wrapper import KerasWrapper 5 | 6 | 7 | def test_whisper_real_audio(): 8 | import soundfile as sf 9 | 10 | test_file = "temp/long_audio.ogg" 11 | if not os.path.exists(test_file): 12 | pytest.skip("The file not found.") 13 | audio_data, sample_rate = sf.read(test_file) 14 | 15 | # TODO : add your kaggle username and key 16 | wrapper = KerasWrapper( 17 | model_name="whisper_tiny_en", 18 | model_params={ 19 | "KAGGLE_USERNAME": "", 20 | "KAGGLE_KEY": "", 21 | }, 22 | ) 23 | 24 | result = wrapper.transcript( 25 | audio_data, 26 | sample_rate=sample_rate, 27 | language="<|en|>", 28 | user_prompt="You are a medical expert responsible for transcribing notes from a doctor’s speech.", 29 | condition_on_previous_text=True, 30 | ) 31 | assert result is not None, "Transcription result is None." 32 | print("Transcription output:", result) 33 | 34 | 35 | test_whisper_real_audio() 36 | -------------------------------------------------------------------------------- /intelli/test/integration/test_llama_cpp_wrapper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import shutil 4 | 5 | from intelli.wrappers.llama_cpp_wrapper import IntelliLlamaCPPWrapper 6 | 7 | try: 8 | from huggingface_hub import hf_hub_download 9 | except ImportError: 10 | hf_hub_download = None 11 | 12 | 13 | class TestIntelliLlamaCPPWrapper(unittest.TestCase): 14 | 15 | @classmethod 16 | def setUpClass(cls): 17 | """ 18 | Create a temp directory and download a small LLaMA-based GGUF model 19 | for offline tests. 20 | """ 21 | cls.temp_dir = os.path.join("..", "temp", "tinyllama_tests") 22 | os.makedirs(cls.temp_dir, exist_ok=True) 23 | 24 | if hf_hub_download is None: 25 | raise ImportError( 26 | "huggingface_hub is not installed. Use 'pip install intelli[llamacpp]'." 27 | ) 28 | 29 | # Use TheBloke's TinyLlama model for testing 30 | cls.repo_id = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF" 31 | cls.filename = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf" 32 | 33 | try: 34 | print(f"Downloading {cls.filename} from {cls.repo_id} to {cls.temp_dir}") 35 | cls.model_path = hf_hub_download( 36 | repo_id=cls.repo_id, filename=cls.filename, local_dir=cls.temp_dir 37 | ) 38 | print(f"Model downloaded: {cls.model_path}") 39 | except Exception as e: 40 | raise RuntimeError(f"Failed downloading the TinyLlama model: {e}") 41 | 42 | @classmethod 43 | def tearDownClass(cls): 44 | """ 45 | Remove the temp directory to clean up after tests. 46 | """ 47 | if os.path.exists(cls.temp_dir): 48 | try: 49 | shutil.rmtree(cls.temp_dir) 50 | except OSError as err: 51 | print(f"Error removing temp dir {cls.temp_dir}: {err}") 52 | 53 | def test_offline_model_generation_basic(self): 54 | """ 55 | Test generating text with a local llama.cpp model using IntelliLlamaCPPWrapper. 56 | """ 57 | wrapper = IntelliLlamaCPPWrapper( 58 | model_path=self.model_path, 59 | model_params={"n_threads": 2, "n_ctx": 512, "n_batch": 256}, 60 | ) 61 | params = { 62 | "prompt": "User: Hello Llama, how are you?\nAssistant:", 63 | "max_tokens": 32, 64 | "temperature": 0.8, 65 | "top_p": 0.9, 66 | } 67 | result = wrapper.generate_text(params) 68 | self.assertIn("choices", result, "Result should have 'choices' key.") 69 | self.assertGreater( 70 | len(result["choices"]), 0, "Should have at least one choice." 71 | ) 72 | text_out = result["choices"][0]["text"] 73 | self.assertIsInstance(text_out, str, "The generated text should be a string.") 74 | self.assertGreater(len(text_out), 0, "Output text should not be empty.") 75 | print(f"Offline generation output:\n{text_out}\n") 76 | 77 | def test_offline_model_generation_second(self): 78 | """ 79 | Another chat test to ensure multiple prompts work fine. 80 | """ 81 | wrapper = IntelliLlamaCPPWrapper( 82 | model_path=self.model_path, 83 | model_params={"n_threads": 2, "n_ctx": 512, "n_batch": 256}, 84 | ) 85 | # Update prompt to follow a chat format for consistent output. 86 | params = { 87 | "prompt": "User: What is 2+2?\nAssistant:", 88 | "max_tokens": 30, 89 | "temperature": 0.6, 90 | "top_p": 0.95, 91 | } 92 | result = wrapper.generate_text(params) 93 | self.assertIn("choices", result) 94 | self.assertTrue(len(result["choices"]) > 0) 95 | text_out = result["choices"][0]["text"] 96 | self.assertTrue(isinstance(text_out, str)) 97 | self.assertTrue(len(text_out) > 0, "Output text should not be empty.") 98 | print(f"Second offline generation output:\n{text_out}\n") 99 | 100 | def test_offline_embeddings(self): 101 | """ 102 | A single test for embedding extraction, ensuring we retrieve 103 | a valid embedding dict from llama-cpp-python for a single input. 104 | """ 105 | wrapper = IntelliLlamaCPPWrapper() 106 | # "embedding": True is required for offline embedding mode. 107 | model_params = {"embedding": True, "n_threads": 2, "n_ctx": 256, "n_batch": 128} 108 | wrapper.load_local_model(self.model_path, model_params) 109 | 110 | text = "Hello from TinyLlama" 111 | emb_result = wrapper.get_embeddings({"input": text}) 112 | 113 | # Expecting a simplified dict with key "embedding" (a flat list of floats). 114 | self.assertIsInstance( 115 | emb_result, dict, "Should be a dict for single input embedding." 116 | ) 117 | self.assertIn("embedding", emb_result, "Result must have 'embedding' key.") 118 | emb = emb_result["embedding"] 119 | self.assertIsInstance(emb, list, "'embedding' should be a list of floats.") 120 | self.assertGreater( 121 | len(emb), 10, "Embedding vector should have more than 10 dimensions." 122 | ) 123 | print(f"Embedding vector sample (first 5 dims): {emb[:5]} ...\n") 124 | 125 | def test_server_mode_generation(self): 126 | """ 127 | Optional test: if a llama.cpp server is running on localhost:8080, 128 | generate text via server mode. 129 | """ 130 | server_url = "http://localhost:8080" 131 | wrapper = IntelliLlamaCPPWrapper(server_url=server_url) 132 | params = { 133 | "prompt": "User: Hello from server mode!\nAssistant:", 134 | "max_tokens": 20, 135 | "temperature": 0.7, 136 | } 137 | try: 138 | result = wrapper.generate_text(params) 139 | self.assertIn("choices", result) 140 | self.assertGreater(len(result["choices"]), 0) 141 | text_out = result["choices"][0]["text"] 142 | self.assertIsInstance(text_out, str) 143 | self.assertGreater(len(text_out), 0) 144 | print(f"Server generation output:\n{text_out}\n") 145 | except Exception as e: 146 | self.skipTest( 147 | f"Skipping server mode test. Server not available or failed: {e}" 148 | ) 149 | 150 | 151 | if __name__ == "__main__": 152 | unittest.main() 153 | -------------------------------------------------------------------------------- /intelli/test/integration/test_mistralai_wrapper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | from intelli.wrappers.mistralai_wrapper import MistralAIWrapper 4 | from dotenv import load_dotenv 5 | load_dotenv() 6 | 7 | class TestMistralAIWrapperIntegration(unittest.TestCase): 8 | def setUp(self): 9 | """Set up for the test case.""" 10 | self.api_key = os.getenv("MISTRAL_API_KEY") 11 | self.assertIsNotNone(self.api_key, "MISTRAL_API_KEY must not be None.") 12 | self.mistral = MistralAIWrapper(self.api_key) 13 | 14 | def test_generate_text_integration(self): 15 | """Integration test for generate_text method.""" 16 | 17 | params = { 18 | "model": "mistral-tiny", 19 | "messages": [{"role": "user", "content": "Who is the most renowned French painter?"}] 20 | } 21 | 22 | # Call the model 23 | result = self.mistral.generate_text(params) 24 | print('generate text result: ', result['choices'][0]['message']['content']) 25 | self.assertIn('message', result['choices'][0], "The API response doesn't match the expected format.") 26 | 27 | def test_get_embeddings_integration(self): 28 | """Integration test for get_embeddings method.""" 29 | 30 | params = { 31 | "model": "mistral-embed", 32 | "input": ["Embed this sentence.", "As well as this one."] 33 | } 34 | 35 | # Call the model 36 | result = self.mistral.get_embeddings(params) 37 | print('embedding sample result: ', result['data'][0]['embedding'][:3]) 38 | self.assertTrue('data' in result and len(result['data']) > 0, "The API response should contain embeddings data.") 39 | 40 | if __name__ == "__main__": 41 | unittest.main() 42 | -------------------------------------------------------------------------------- /intelli/test/integration/test_nvidia_wrapper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | from dotenv import load_dotenv 4 | from intelli.wrappers.nvidia_wrapper import NvidiaWrapper 5 | 6 | load_dotenv() 7 | 8 | 9 | class TestNvidiaWrapper(unittest.TestCase): 10 | @classmethod 11 | def setUpClass(cls): 12 | cls.api_key = os.getenv("NVIDIA_API_KEY") 13 | assert cls.api_key, "NVIDIA_API_KEY is not set." 14 | cls.wrapper = NvidiaWrapper(cls.api_key) 15 | 16 | def test_generate_text_llama(self): 17 | params = { 18 | "model": "meta/llama-3.3-70b-instruct", 19 | "messages": [ 20 | {"role": "user", "content": "Write a limerick about GPU computing."} 21 | ], 22 | "max_tokens": 1024, 23 | "temperature": 0.2, 24 | "top_p": 0.7, 25 | "stream": False, 26 | } 27 | response = self.wrapper.generate_text(params) 28 | self.assertIn("choices", response) 29 | self.assertGreater(len(response["choices"]), 0) 30 | message = response["choices"][0]["message"]["content"] 31 | self.assertTrue(isinstance(message, str) and len(message) > 0) 32 | 33 | def test_generate_text_deepseek(self): 34 | params = { 35 | "model": "deepseek-ai/deepseek-r1", 36 | "messages": [ 37 | {"role": "user", "content": "Which number is larger, 9.11 or 9.8?"} 38 | ], 39 | "max_tokens": 4096, 40 | "temperature": 0.6, 41 | "top_p": 0.7, 42 | "stream": False, 43 | } 44 | response = self.wrapper.generate_text(params) 45 | self.assertIn("choices", response) 46 | self.assertGreater(len(response["choices"]), 0) 47 | message = response["choices"][0]["message"]["content"] 48 | self.assertTrue(isinstance(message, str) and len(message) > 0) 49 | 50 | def test_get_embeddings(self): 51 | params = { 52 | "input": ["What is the capital of France?"], 53 | "model": "nvidia/llama-3.2-nv-embedqa-1b-v2", 54 | "input_type": "query", 55 | "encoding_format": "float", 56 | "truncate": "NONE", 57 | } 58 | response = self.wrapper.get_embeddings(params) 59 | self.assertIn("data", response) 60 | self.assertGreater(len(response["data"]), 0) 61 | self.assertIn("embedding", response["data"][0]) 62 | embedding = response["data"][0]["embedding"] 63 | self.assertIsInstance(embedding, list) 64 | self.assertGreater(len(embedding), 0) 65 | 66 | 67 | if __name__ == "__main__": 68 | unittest.main() 69 | -------------------------------------------------------------------------------- /intelli/test/integration/test_nvidia_wrapper_nim.py: -------------------------------------------------------------------------------- 1 | # test_nvidia_wrapper_nim.py 2 | 3 | import unittest 4 | import os 5 | from dotenv import load_dotenv 6 | from intelli.wrappers.nvidia_wrapper import NvidiaWrapper 7 | 8 | # Load environment variables from .env file (if available) 9 | load_dotenv() 10 | 11 | class TestNvidiaWrapperNim(unittest.TestCase): 12 | @classmethod 13 | def setUpClass(cls): 14 | # get the API key and the local NIM URL 15 | cls.api_key = os.getenv("NVIDIA_API_KEY") 16 | cls.nim_base_url = os.getenv("NVIDIA_NIM_BASE_URL", "http://localhost:8000") 17 | if not cls.api_key: 18 | raise ValueError("NVIDIA_API_KEY must be set in your environment.") 19 | # create the wrapper using the local base URL. 20 | cls.wrapper = NvidiaWrapper(cls.api_key, base_url=cls.nim_base_url) 21 | 22 | def test_chat_completion(self): 23 | """Test chat completion using NVIDIA NIM.""" 24 | params = { 25 | "model": "google/gemma-2-9b-it", 26 | "messages": [ 27 | {"role": "user", "content": "Write a limerick about GPU computing."} 28 | ], 29 | "max_tokens": 64, 30 | "temperature": 0.5, 31 | "top_p": 1, 32 | "stream": False, 33 | } 34 | response = self.wrapper.generate_text(params) 35 | self.assertIn("choices", response, "Response should contain 'choices'.") 36 | self.assertGreater(len(response["choices"]), 0, "There should be at least one choice.") 37 | # verify non-empty string. 38 | message = response["choices"][0]["message"]["content"] 39 | self.assertIsInstance(message, str, "Message content should be a string.") 40 | self.assertGreater(len(message), 0, "Message content should not be empty.") 41 | 42 | if __name__ == "__main__": 43 | unittest.main() 44 | -------------------------------------------------------------------------------- /intelli/test/integration/test_remote_embed_model.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from intelli.controller.remote_embed_model import RemoteEmbedModel 3 | from intelli.model.input.embed_input import EmbedInput 4 | import os 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv() 8 | 9 | 10 | class TestRemoteEmbedModel(unittest.TestCase): 11 | 12 | # Set up API keys for different providers 13 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 14 | MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY") 15 | GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") 16 | 17 | def test_openai_embeddings(self): 18 | """Test retrieving embeddings from OpenAI.""" 19 | if self.OPENAI_API_KEY is None: 20 | self.skipTest("OPENAI_API_KEY environment variable is not set.") 21 | 22 | provider = "openai" 23 | model = RemoteEmbedModel(self.OPENAI_API_KEY, provider) 24 | embed_input = EmbedInput(["This is a test sentence for embeddings."]) 25 | embed_input.set_default_values(provider) 26 | 27 | result = model.get_embeddings(embed_input) 28 | self.assertIn("data", result, "OpenAI response should contain 'data' field") 29 | 30 | def test_mistral_embeddings(self): 31 | """Test retrieving embeddings from Mistral.""" 32 | if self.MISTRAL_API_KEY is None: 33 | self.skipTest("MISTRAL_API_KEY environment variable is not set.") 34 | 35 | provider = "mistral" 36 | model = RemoteEmbedModel(self.MISTRAL_API_KEY, provider) 37 | embed_input = EmbedInput(["Mistral provides interesting insights."]) 38 | embed_input.set_default_values(provider) 39 | 40 | result = model.get_embeddings(embed_input) 41 | # Assuming a similar response format for simplicity; adjust according to actual API 42 | self.assertIn("data", result, "Mistral response should contain 'data' field") 43 | 44 | def test_gemini_embeddings(self): 45 | """Test retrieving embeddings from Gemini.""" 46 | if self.GEMINI_API_KEY is None: 47 | self.skipTest("GEMINI_API_KEY environment variable is not set.") 48 | 49 | provider = "gemini" 50 | model = RemoteEmbedModel(self.GEMINI_API_KEY, provider) 51 | embed_input = EmbedInput( 52 | ["Explore Gemini's API for embeddings."], "models/embedding-001" 53 | ) 54 | 55 | result = model.get_embeddings(embed_input) 56 | self.assertIsInstance( 57 | result["values"], list, "Gemini response should be a list of embeddings" 58 | ) 59 | 60 | def test_vllm_embeddings(self): 61 | """Test retrieving embeddings from vLLM.""" 62 | vllm_embed_url = os.getenv("VLLM_EMBED_URL") 63 | if not vllm_embed_url: 64 | self.skipTest("VLLM_EMBED_URL environment variable is not set.") 65 | 66 | provider = "vllm" 67 | model = RemoteEmbedModel( 68 | api_key=None, provider_name=provider, options={"baseUrl": vllm_embed_url} 69 | ) 70 | 71 | # Add debug prints to understand what's being passed 72 | test_sentence = "This is a test sentence for vLLM embeddings." 73 | embed_input = EmbedInput( 74 | [test_sentence], 75 | model="BAAI/bge-small-en-v1.5", 76 | ) 77 | 78 | print(f"Testing vLLM embeddings with URL: {vllm_embed_url}") 79 | print(f"Using model: {embed_input.model}") 80 | print(f"Input text: {embed_input.texts}") 81 | 82 | # Print the actual request that will be sent 83 | vllm_request = embed_input.get_vllm_inputs() 84 | print(f"vLLM request params: {vllm_request}") 85 | 86 | # Get embeddings 87 | result = model.get_embeddings(embed_input) 88 | print(f"Embedding result structure: {list(result.keys())}") 89 | 90 | # Print sample of embeddings 91 | if "embeddings" in result and len(result["embeddings"]) > 0: 92 | print(f"First few dimensions of embedding: {result['embeddings'][0][:5]}") 93 | print(f"Embedding dimensions: {len(result['embeddings'][0])}") 94 | 95 | self.assertIn( 96 | "embeddings", result, "vLLM response should contain 'embeddings' field" 97 | ) 98 | self.assertTrue( 99 | len(result["embeddings"]) > 0, "Should return at least one embedding" 100 | ) 101 | self.assertTrue( 102 | len(result["embeddings"][0]) > 0, "Embedding should have dimensions" 103 | ) 104 | 105 | 106 | if __name__ == "__main__": 107 | unittest.main() 108 | -------------------------------------------------------------------------------- /intelli/test/integration/test_remote_embed_model_nvidia.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | from dotenv import load_dotenv 4 | from intelli.model.input.embed_input import EmbedInput 5 | from intelli.controller.remote_embed_model import RemoteEmbedModel 6 | 7 | load_dotenv() 8 | 9 | class TestRemoteEmbedModelNvidia(unittest.TestCase): 10 | @classmethod 11 | def setUpClass(cls): 12 | cls.api_key = os.getenv("NVIDIA_API_KEY") 13 | assert cls.api_key, "NVIDIA_API_KEY is not set." 14 | cls.embed_model = RemoteEmbedModel(cls.api_key, "nvidia") 15 | 16 | def test_get_embeddings(self): 17 | text = "What is the capital of France?" 18 | embed_input = EmbedInput([text], model="nvidia/llama-3.2-nv-embedqa-1b-v2") 19 | result = self.embed_model.get_embeddings(embed_input) 20 | self.assertIn("data", result) 21 | self.assertGreater(len(result["data"]), 0) 22 | self.assertIn("embedding", result["data"][0]) 23 | embedding = result["data"][0]["embedding"] 24 | self.assertIsInstance(embedding, list) 25 | self.assertGreater(len(embedding), 0) 26 | print("Nvidia embedding sample:", embedding[:5]) 27 | 28 | if __name__ == "__main__": 29 | unittest.main() 30 | -------------------------------------------------------------------------------- /intelli/test/integration/test_remote_recognition_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | from dotenv import load_dotenv 4 | from intelli.controller.remote_recognition_model import RemoteRecognitionModel, SupportedRecognitionModels 5 | from intelli.model.input.text_recognition_input import SpeechRecognitionInput 6 | 7 | load_dotenv() 8 | 9 | 10 | class TestRemoteRecognitionModel(unittest.TestCase): 11 | """ 12 | Integration tests for the RemoteRecognitionModel with both 13 | OpenAI and Keras (offline) providers. 14 | """ 15 | 16 | def setUp(self): 17 | """Set up for the test case.""" 18 | self.api_key_openai = os.getenv('OPENAI_API_KEY') 19 | self.api_key_elevenlabs = os.getenv('ELEVENLABS_API_KEY') 20 | self.temp_dir = './temp' 21 | 22 | # Define path to test audio file (harvard.wav) 23 | self.test_audio_path = os.path.join(self.temp_dir, 'test.wav') 24 | 25 | # Skip tests if the file doesn't exist 26 | if not os.path.exists(self.test_audio_path): 27 | print(f"Warning: Test audio file not found at {self.test_audio_path}") 28 | 29 | # Initialize the recognition models 30 | if self.api_key_openai: 31 | self.openai_recognition = RemoteRecognitionModel( 32 | self.api_key_openai, 33 | SupportedRecognitionModels['OPENAI'] 34 | ) 35 | 36 | if self.api_key_elevenlabs: 37 | self.elevenlabs_recognition = RemoteRecognitionModel( 38 | self.api_key_elevenlabs, 39 | SupportedRecognitionModels['ELEVENLABS'] 40 | ) 41 | 42 | # Only set up Keras if we're going to test it 43 | self.keras_available = False 44 | try: 45 | import keras_nlp 46 | self.keras_available = True 47 | self.keras_recognition = RemoteRecognitionModel( 48 | provider=SupportedRecognitionModels['KERAS'], 49 | model_name="whisper_tiny_en" 50 | ) 51 | except ImportError: 52 | print("Keras NLP not available, skipping Keras tests") 53 | 54 | def test_openai_recognition(self): 55 | """Test speech recognition with OpenAI""" 56 | if not self.api_key_openai: 57 | self.skipTest("OpenAI API key not provided") 58 | 59 | if not os.path.exists(self.test_audio_path): 60 | self.skipTest(f"Test audio file not found: {self.test_audio_path}") 61 | 62 | # Create input parameters 63 | recognition_input = SpeechRecognitionInput( 64 | audio_file_path=self.test_audio_path, 65 | model="whisper-1" 66 | ) 67 | 68 | try: 69 | # Get transcription 70 | result = self.openai_recognition.recognize_speech(recognition_input) 71 | print(f"OpenAI Recognition Result: {result}") 72 | 73 | self.assertIsInstance(result, str) 74 | self.assertTrue(len(result) > 0, "Transcription should not be empty") 75 | except Exception as e: 76 | self.fail(f"OpenAI recognition failed with error: {e}") 77 | 78 | def test_keras_recognition(self): 79 | """Test speech recognition with Keras offline model""" 80 | if not self.keras_available: 81 | self.skipTest("Keras NLP not available") 82 | 83 | if not os.path.exists(self.test_audio_path): 84 | self.skipTest(f"Test audio file not found: {self.test_audio_path}") 85 | 86 | # Create input parameters 87 | recognition_input = SpeechRecognitionInput( 88 | audio_file_path=self.test_audio_path, 89 | language="<|en|>" # Whisper format for language 90 | ) 91 | 92 | try: 93 | # Get transcription 94 | result = self.keras_recognition.recognize_speech(recognition_input) 95 | print(f"Keras Recognition Result: {result}") 96 | 97 | self.assertIsInstance(result, str) 98 | self.assertTrue(len(result) > 0, "Transcription should not be empty") 99 | except Exception as e: 100 | print(f"Warning: Keras recognition test failed with: {str(e)}") 101 | # Don't fail the test as Keras might have issues on some configurations 102 | self.skipTest(f"Keras recognition failed: {str(e)}") 103 | 104 | def test_elevenlabs_recognition(self): 105 | """Test speech recognition with Eleven Labs""" 106 | if not self.api_key_elevenlabs: 107 | self.skipTest("Eleven Labs API key not provided") 108 | 109 | if not os.path.exists(self.test_audio_path): 110 | self.skipTest(f"Test audio file not found: {self.test_audio_path}") 111 | 112 | recognition_input = SpeechRecognitionInput( 113 | audio_file_path=self.test_audio_path, 114 | language="en" # Standard parameter 115 | ) 116 | 117 | recognition_input.model_id = "scribe_v1" 118 | 119 | try: 120 | # Get transcription 121 | result = self.elevenlabs_recognition.recognize_speech(recognition_input) 122 | print(f"Eleven Labs Recognition Result: {result}") 123 | 124 | self.assertIsInstance(result, str) 125 | self.assertTrue(len(result) > 0, "Transcription should not be empty") 126 | 127 | except Exception as e: 128 | error_str = str(e).lower() 129 | if 'subscription' in error_str or '402' in error_str or 'payment' in error_str: 130 | self.skipTest(f"Eleven Labs speech recognition requires higher subscription tier: {e}") 131 | else: 132 | self.fail(f"Eleven Labs recognition failed with error: {e}") 133 | 134 | if __name__ == "__main__": 135 | unittest.main() -------------------------------------------------------------------------------- /intelli/test/integration/test_remote_speech_model.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import os 3 | import unittest 4 | from dotenv import load_dotenv 5 | from intelli.controller.remote_speech_model import ( 6 | RemoteSpeechModel, 7 | SupportedSpeechModels, 8 | ) 9 | from intelli.model.input.text_speech_input import Text2SpeechInput 10 | 11 | load_dotenv() 12 | 13 | 14 | class TestRemoteSpeechModel(unittest.TestCase): 15 | 16 | def setUp(self): 17 | self.api_key_google = os.getenv("GOOGLE_API_KEY") 18 | self.api_key_openai = os.getenv("OPENAI_API_KEY") 19 | self.api_key_elevenlabs = os.getenv("ELEVENLABS_API_KEY") 20 | self.remote_speech_model_google = RemoteSpeechModel( 21 | self.api_key_google, SupportedSpeechModels["GOOGLE"] 22 | ) 23 | self.remote_speech_model_openai = RemoteSpeechModel( 24 | self.api_key_openai, SupportedSpeechModels["OPENAI"] 25 | ) 26 | if self.api_key_elevenlabs: 27 | self.remote_speech_model_elevenlabs = RemoteSpeechModel( 28 | self.api_key_elevenlabs, SupportedSpeechModels["ELEVENLABS"] 29 | ) 30 | self.temp_dir = "../temp" 31 | 32 | if not os.path.exists(self.temp_dir): 33 | os.makedirs(self.temp_dir) 34 | 35 | def test_generate_speech_google(self): 36 | input_params = Text2SpeechInput("Welcome to Intellinode.", "en-gb") 37 | audio_content = self.remote_speech_model_google.generate_speech(input_params) 38 | self.assertTrue(audio_content, "audio_content should not be None") 39 | 40 | # eecode the base64 41 | audio_data = base64.b64decode(audio_content) 42 | google_file_path = os.path.join(self.temp_dir, "google_speech.mp3") 43 | 44 | # save 45 | with open(google_file_path, "wb") as audio_file: 46 | audio_file.write(audio_data) 47 | 48 | self.assertTrue( 49 | os.path.exists(google_file_path), "Google TTS MP3 file should be saved" 50 | ) 51 | 52 | def test_generate_speech_openai(self): 53 | input_params = Text2SpeechInput( 54 | "Welcome to Intellinode.", "en-US", "MALE", "alloy", "tts-1", True 55 | ) 56 | result = self.remote_speech_model_openai.generate_speech(input_params) 57 | self.assertTrue(result, "result should not be None") 58 | 59 | # write the streaming audio 60 | openai_file_path = os.path.join(self.temp_dir, "openai_speech.mp3") 61 | with open(openai_file_path, "wb") as audio_file: 62 | for chunk in result: 63 | audio_file.write(chunk) 64 | 65 | self.assertTrue( 66 | os.path.exists(openai_file_path), "OpenAI TTS MP3 file should be saved" 67 | ) 68 | 69 | def test_generate_speech_elevenlabs(self): 70 | """Test text-to-speech functionality with Eleven Labs""" 71 | if not self.api_key_elevenlabs: 72 | self.skipTest("Eleven Labs API key not provided") 73 | 74 | try: 75 | voices_result = self.remote_speech_model_elevenlabs.list_voices() 76 | self.assertIn("voices", voices_result, "Should return a list of voices") 77 | self.assertTrue( 78 | len(voices_result["voices"]) > 0, "Should have at least one voice" 79 | ) 80 | 81 | # Get the first voice ID 82 | voice_id = voices_result["voices"][0]["voice_id"] 83 | print( 84 | f"Using Eleven Labs voice: {voices_result['voices'][0]['name']} ({voice_id})" 85 | ) 86 | 87 | input_params = Text2SpeechInput( 88 | "Welcome to Intellinode with Eleven Labs.", language="en" 89 | ) 90 | # Add the voice_id attribute dynamically 91 | input_params.voice_id = voice_id 92 | input_params.model_id = ( 93 | "eleven_multilingual_v2" 94 | ) 95 | 96 | # Generate speech 97 | audio_content = self.remote_speech_model_elevenlabs.generate_speech( 98 | input_params 99 | ) 100 | self.assertTrue(audio_content, "audio_content should not be None") 101 | 102 | # Save the audio file 103 | elevenlabs_file_path = os.path.join(self.temp_dir, "elevenlabs_speech.mp3") 104 | with open(elevenlabs_file_path, "wb") as audio_file: 105 | audio_file.write(audio_content) 106 | 107 | self.assertTrue( 108 | os.path.exists(elevenlabs_file_path), 109 | "Eleven Labs TTS MP3 file should be saved", 110 | ) 111 | print(f"Eleven Labs audio saved to: {elevenlabs_file_path}") 112 | 113 | # Test streaming 114 | input_params.text = "This is a streaming test with Eleven Labs." 115 | streaming_response = self.remote_speech_model_elevenlabs.stream_speech( 116 | input_params 117 | ) 118 | 119 | # Save the streaming audio 120 | elevenlabs_stream_path = os.path.join( 121 | self.temp_dir, "elevenlabs_stream.mp3" 122 | ) 123 | with open(elevenlabs_stream_path, "wb") as audio_file: 124 | for chunk in streaming_response.iter_content(chunk_size=1024): 125 | if chunk: 126 | audio_file.write(chunk) 127 | 128 | self.assertTrue( 129 | os.path.exists(elevenlabs_stream_path), 130 | "Eleven Labs streaming MP3 file should be saved", 131 | ) 132 | print(f"Eleven Labs streaming audio saved to: {elevenlabs_stream_path}") 133 | 134 | except Exception as e: 135 | self.fail(f"Eleven Labs test failed with error: {str(e)}") 136 | 137 | 138 | if __name__ == "__main__": 139 | unittest.main() 140 | -------------------------------------------------------------------------------- /intelli/test/integration/test_remote_vision_model.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | from intelli.controller.remote_vision_model import RemoteVisionModel 4 | from intelli.model.input.vision_input import VisionModelInput 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv() 8 | 9 | 10 | class TestRemoteVisionModel(unittest.TestCase): 11 | 12 | def setUp(self): 13 | self.openai_api_key = os.getenv('OPENAI_API_KEY') 14 | self.gemini_api_key = os.getenv('GEMINI_API_KEY') 15 | self.google_api_key = os.getenv('GOOGLE_API_KEY') 16 | 17 | self.image_path = './temp/test_image_desc.png' 18 | 19 | # Check if required keys are available 20 | missing_keys = [] 21 | if not self.openai_api_key: 22 | missing_keys.append("OpenAI") 23 | if not self.gemini_api_key: 24 | missing_keys.append("Gemini") 25 | 26 | if missing_keys: 27 | raise unittest.SkipTest(f"Missing API keys: {', '.join(missing_keys)}") 28 | 29 | def test_openai_image_descriptor(self): 30 | print('--- call openai vision ---') 31 | provider = "openai" 32 | controller = RemoteVisionModel(self.openai_api_key, provider) 33 | 34 | vision_input = VisionModelInput(content="Describe the image", file_path=self.image_path, 35 | model="gpt-4o") 36 | result = controller.image_to_text(vision_input) 37 | 38 | print(result) 39 | 40 | def test_gemini_image_descriptor(self): 41 | print('--- call gemini vision ---') 42 | provider = "gemini" 43 | controller = RemoteVisionModel(self.gemini_api_key, provider) 44 | 45 | vision_input = VisionModelInput(content="Describe this image in detail", file_path=self.image_path, 46 | extension='png') 47 | 48 | try: 49 | result = controller.image_to_text(vision_input) 50 | print(result) 51 | self.assertTrue(len(result) > 0, "Gemini vision should return a non-empty result") 52 | except Exception as e: 53 | print(f"ERROR: {str(e)}") 54 | self.fail(f"Gemini vision test failed: {str(e)}") 55 | 56 | def test_google_image_descriptor(self): 57 | if not self.google_api_key: 58 | self.skipTest("Google API key is missing") 59 | 60 | print('--- call google vision ---') 61 | provider = "google" 62 | controller = RemoteVisionModel(self.google_api_key, provider) 63 | 64 | vision_input = VisionModelInput(content="", file_path=self.image_path) 65 | result = controller.image_to_text(vision_input) 66 | 67 | print(result) 68 | self.assertTrue(len(result) > 0, "Google vision should return a non-empty result") 69 | 70 | 71 | if __name__ == '__main__': 72 | unittest.main() -------------------------------------------------------------------------------- /intelli/test/integration/test_stability_wrapper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | from intelli.wrappers.stability_wrapper import StabilityAIWrapper 4 | import base64 5 | from dotenv import load_dotenv 6 | load_dotenv() 7 | 8 | class TestStabilityAIWrapper(unittest.TestCase): 9 | 10 | def setUp(self): 11 | # Ensure the STABILITY_API_KEY environment variable is set 12 | self.api_key = os.getenv('STABILITY_API_KEY') 13 | if self.api_key is None: 14 | raise unittest.SkipTest("STABILITY_API_KEY environment variable is not set") 15 | 16 | self.wrapper = StabilityAIWrapper(self.api_key) 17 | 18 | def test_generate_text_to_image(self): 19 | print('start genering image') 20 | # Define the parameters for the text to image generation 21 | params = { 22 | "text_prompts": [ 23 | { 24 | "text": "A quaint cottage in a forest clearing, under a starry night sky" 25 | } 26 | ], 27 | "cfg_scale": 7, 28 | "height": 1024, 29 | "width": 1024, 30 | "samples": 1, 31 | "steps": 20 32 | } 33 | 34 | result = self.wrapper.generate_images(params) 35 | 36 | # Verify the response contains expected keys 37 | self.assertIn('artifacts', result) 38 | self.assertTrue(isinstance(result['artifacts'], list), "Artifacts should be a list") 39 | self.assertTrue(len(result['artifacts']) > 0, "Artifacts list should not be empty") 40 | artifact = result['artifacts'][0] 41 | self.assertIn('base64', artifact, "Artifact should contain a base64 key") 42 | 43 | # Decode base64 and save the image 44 | image_data = base64.b64decode(artifact['base64']) 45 | 46 | output_dir = '../temp' 47 | os.makedirs(output_dir, exist_ok=True) 48 | output_path = os.path.join(output_dir, 'stability_generate.png') 49 | 50 | with open(output_path, 'wb') as img_file: 51 | img_file.write(image_data) 52 | 53 | print(f"Saved generated image to {output_path}") 54 | 55 | if __name__ == '__main__': 56 | unittest.main() -------------------------------------------------------------------------------- /intelli/test/integration/test_vllm_wrapper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import json 4 | from dotenv import load_dotenv 5 | from intelli.wrappers.vllm_wrapper import VLLMWrapper 6 | 7 | load_dotenv() 8 | 9 | 10 | class TestVLLMWrapperIntegration(unittest.TestCase): 11 | """Integration tests for VLLMWrapper.""" 12 | 13 | def setUp(self): 14 | """Set up test environment.""" 15 | self.vllm_embed_url = os.getenv("VLLM_EMBED_URL") 16 | self.deepseek_url = os.getenv("DEEPSEEK_VLLM_URL") 17 | self.llama_url = os.getenv("LLAMA_VLLM_URL") 18 | 19 | def test_vllm_embedding(self): 20 | """Test embedding functionality.""" 21 | if not self.vllm_embed_url: 22 | self.skipTest("VLLM_EMBED_URL environment variable not set") 23 | 24 | wrapper = VLLMWrapper(self.vllm_embed_url) 25 | 26 | # Fix: Pass a dictionary with a "texts" key instead of a direct list 27 | response = wrapper.get_embeddings({"texts": ["hello world"]}) 28 | print("VLLM Embeddings sample:", response["embeddings"][0][:3]) 29 | 30 | self.assertIn("embeddings", response) 31 | self.assertTrue(len(response["embeddings"]) > 0) 32 | self.assertTrue(len(response["embeddings"][0]) > 0) 33 | 34 | def test_deepseek_completion(self): 35 | """Test completion with DeepSeek model.""" 36 | if not self.deepseek_url: 37 | self.skipTest("DEEPSEEK_VLLM_URL environment variable not set") 38 | 39 | wrapper = VLLMWrapper(self.deepseek_url) 40 | 41 | params = { 42 | "model": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", 43 | "prompt": "What is machine learning?", 44 | "max_tokens": 100, 45 | "temperature": 0.7 46 | } 47 | 48 | response = wrapper.generate_text(params) 49 | print("Deepseek Completion:", response["choices"][0]["text"]) 50 | 51 | self.assertIn("choices", response) 52 | self.assertTrue(len(response["choices"]) > 0) 53 | self.assertIn("text", response["choices"][0]) 54 | self.assertTrue(len(response["choices"][0]["text"]) > 0) 55 | 56 | def test_deepseek_streaming(self): 57 | """Test the streaming functionality with DeepSeek model.""" 58 | if not self.deepseek_url: 59 | self.skipTest("DEEPSEEK_VLLM_URL environment variable not set") 60 | 61 | try: 62 | # Create wrapper with debugging enabled 63 | wrapper = VLLMWrapper(self.deepseek_url) 64 | wrapper.is_log = True 65 | 66 | # Set up test parameters 67 | params = { 68 | "model": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", 69 | "prompt": "Hello there, how are you?", 70 | "max_tokens": 50, 71 | "temperature": 0.2, 72 | "stream": True 73 | } 74 | 75 | print("\n\nTesting DeepSeek Streaming:") 76 | 77 | # First test direct API to verify it works 78 | import requests 79 | headers = {"Content-Type": "application/json"} 80 | direct_response = requests.post( 81 | f"{self.deepseek_url}/v1/completions", 82 | json=params, 83 | headers=headers, 84 | stream=True 85 | ) 86 | 87 | print("Direct API response status:", direct_response.status_code) 88 | 89 | # Now test through our wrapper 90 | print("Testing through wrapper:") 91 | full_text = "" 92 | 93 | # Collect output from stream 94 | for chunk in wrapper.generate_text_stream(params): 95 | print(f"Received chunk: '{chunk}'") 96 | full_text += chunk 97 | 98 | print(f"Complete text: '{full_text}'") 99 | self.assertTrue(len(full_text) > 0, "DeepSeek streaming response should not be empty") 100 | 101 | except Exception as e: 102 | import traceback 103 | print("\nDetailed error trace:") 104 | traceback.print_exc() 105 | self.fail(f"DeepSeek streaming test failed: {str(e)}") 106 | 107 | def test_llama_completion(self): 108 | """Test completion with Llama model.""" 109 | if not self.llama_url: 110 | self.skipTest("LLAMA_VLLM_URL environment variable not set") 111 | 112 | wrapper = VLLMWrapper(self.llama_url) 113 | 114 | params = { 115 | "model": "meta-llama/Llama-3.1-8B-Instruct", 116 | "prompt": "What is machine learning?", 117 | "max_tokens": 100, 118 | "temperature": 0.7 119 | } 120 | 121 | response = wrapper.generate_text(params) 122 | print("Llama Completion:", response["choices"][0]["text"]) 123 | 124 | self.assertIn("choices", response) 125 | self.assertTrue(len(response["choices"]) > 0) 126 | self.assertIn("text", response["choices"][0]) 127 | self.assertTrue(len(response["choices"][0]["text"]) > 0) 128 | 129 | 130 | if __name__ == "__main__": 131 | unittest.main() -------------------------------------------------------------------------------- /intelli/test/unit/test_chatbot_input.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from intelli.model.input.chatbot_input import ChatModelInput 3 | 4 | class TestChatModelInput(unittest.TestCase): 5 | def setUp(self): 6 | self.system_message = "Let's start a conversation." 7 | 8 | 9 | def test_add_and_delete_messages(self): 10 | chat_model_input = ChatModelInput(system=self.system_message, model="test-model") 11 | chat_model_input.add_user_message("Hello, World!") 12 | chat_model_input.add_assistant_message("Hi, Universe!") 13 | self.assertEqual(len(chat_model_input.messages), 2) 14 | 15 | chat_model_input.delete_last_message(chat_model_input.messages[0]) 16 | self.assertEqual(len(chat_model_input.messages), 1) 17 | 18 | chat_model_input.clean_messages() 19 | self.assertEqual(len(chat_model_input.messages), 0) 20 | 21 | def test_get_openai_input(self): 22 | chat_model_input = ChatModelInput(system=self.system_message, model="test-model") 23 | chat_model_input.add_system_message("System message for OpenAI example") 24 | params = chat_model_input.get_openai_input() 25 | self.assertIn('model', params) 26 | self.assertEqual(params['model'], "test-model") 27 | self.assertTrue('messages' in params) 28 | 29 | def test_get_mistral_input(self): 30 | chat_model_input = ChatModelInput(system=self.system_message, model="test-model") 31 | chat_model_input.add_user_message("User message for Mistral example") 32 | params = chat_model_input.get_mistral_input() 33 | self.assertIn('model', params) 34 | self.assertEqual(params['model'], "test-model") 35 | self.assertTrue('messages' in params) 36 | 37 | def test_get_gemini_input(self): 38 | chat_model_input = ChatModelInput(system=self.system_message, model="test-model") 39 | chat_model_input.add_assistant_message("Assistant message for Gemini example") 40 | params = chat_model_input.get_gemini_input() 41 | self.assertTrue('contents' in params) 42 | self.assertTrue('generationConfig' in params) 43 | 44 | if __name__ == '__main__': 45 | unittest.main() -------------------------------------------------------------------------------- /intelli/test_gpt_image_1.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Test script for the new gpt-image-1 model and parameters 4 | """ 5 | import os 6 | from dotenv import load_dotenv 7 | from intelli.model.input.image_input import ImageModelInput 8 | from intelli.controller.remote_image_model import RemoteImageModel 9 | 10 | # Load environment variables 11 | load_dotenv() 12 | 13 | def test_gpt_image_1_default(): 14 | """Test that gpt-image-1 is set as default""" 15 | print("=== Testing gpt-image-1 as default model ===") 16 | 17 | # Create image input without specifying model 18 | image_input = ImageModelInput(prompt="A cute baby sea otter") 19 | 20 | # Set default values for OpenAI 21 | image_input.set_default_values("openai") 22 | 23 | print(f"Default model: {image_input.model}") 24 | print(f"Default size: {image_input.imageSize}") 25 | 26 | assert image_input.model == "gpt-image-1", f"Expected gpt-image-1, got {image_input.model}" 27 | print("✅ Default model test passed!") 28 | 29 | def test_new_parameters(): 30 | """Test that new parameters are included in OpenAI inputs""" 31 | print("\n=== Testing new parameters ===") 32 | 33 | image_input = ImageModelInput( 34 | prompt="A cute baby sea otter", 35 | model="gpt-image-1", 36 | background="transparent", 37 | quality="high", 38 | output_format="png", 39 | output_compression=90, 40 | moderation="auto", 41 | user="test_user" 42 | ) 43 | 44 | openai_inputs = image_input.get_openai_inputs() 45 | 46 | print("OpenAI inputs:") 47 | for key, value in openai_inputs.items(): 48 | print(f" {key}: {value}") 49 | 50 | # Check that new parameters are included 51 | expected_params = ["background", "quality", "output_format", "output_compression", "moderation", "user"] 52 | for param in expected_params: 53 | assert param in openai_inputs, f"Parameter {param} not found in OpenAI inputs" 54 | 55 | print("✅ New parameters test passed!") 56 | 57 | def test_actual_generation(): 58 | """Test actual image generation with gpt-image-1 (requires API key)""" 59 | print("\n=== Testing actual image generation ===") 60 | 61 | api_key = os.getenv('OPENAI_API_KEY') 62 | if not api_key: 63 | print("⚠️ OPENAI_API_KEY not found, skipping actual generation test") 64 | return 65 | 66 | try: 67 | # Create image input with new parameters 68 | image_input = ImageModelInput( 69 | prompt="A simple geometric logo with a python snake", 70 | model="gpt-image-1", 71 | background="transparent", 72 | quality="high", 73 | output_format="png", 74 | output_compression=85 75 | ) 76 | 77 | # Create image model 78 | image_model = RemoteImageModel(api_key, "openai") 79 | 80 | print("Generating image with gpt-image-1...") 81 | results = image_model.generate_images(image_input) 82 | 83 | print(f"✅ Image generation successful! Generated {len(results)} image(s)") 84 | print(f"Result type: {type(results[0])}") 85 | 86 | # Save the image if it's base64 data 87 | if isinstance(results[0], str) and len(results[0]) > 100: 88 | import base64 89 | import os 90 | 91 | os.makedirs("temp", exist_ok=True) 92 | image_data = base64.b64decode(results[0]) 93 | 94 | with open("temp/gpt_image_1_test.png", "wb") as f: 95 | f.write(image_data) 96 | 97 | print("💾 Image saved to temp/gpt_image_1_test.png") 98 | 99 | except Exception as e: 100 | print(f"❌ Image generation failed: {e}") 101 | 102 | if __name__ == "__main__": 103 | print("Testing gpt-image-1 model and new parameters...\n") 104 | 105 | test_gpt_image_1_default() 106 | test_new_parameters() 107 | test_actual_generation() 108 | 109 | print("\n🎉 All tests completed!") -------------------------------------------------------------------------------- /intelli/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/utils/__init__.py -------------------------------------------------------------------------------- /intelli/utils/cohere_stream_parser.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | class CohereStreamParser: 5 | def __init__(self, is_log=False): 6 | self.buffer = '' 7 | self.is_log = is_log 8 | 9 | def feed(self, data): 10 | self.buffer += data 11 | 12 | if '\n' in self.buffer: 13 | event_end_index = self.buffer.index('\n') 14 | raw_data = self.buffer[:event_end_index + 1].strip() 15 | 16 | # Convert the raw_data into JSON format 17 | try: 18 | json_data = json.loads(raw_data) 19 | except json.JSONDecodeError as e: 20 | print(f"Error decoding JSON: {e}") 21 | return 22 | 23 | content_text = json_data.get('text') 24 | if content_text: 25 | yield content_text 26 | 27 | self.buffer = self.buffer[event_end_index + 1:] 28 | -------------------------------------------------------------------------------- /intelli/utils/conn_helper.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | 4 | 5 | class ConnHelper: 6 | 7 | @staticmethod 8 | def convert_map_to_json(params): 9 | return json.dumps(params) 10 | 11 | @staticmethod 12 | def get_error_message(error): 13 | if isinstance(error, requests.exceptions.RequestException): 14 | if error.response is not None: 15 | try: 16 | return f'Unexpected HTTP response: {error.response.status_code} Error details: {error.response.json()}' 17 | except json.JSONDecodeError: 18 | return f'Unexpected HTTP response: {error.response.status_code} Error details: {error.response.text}' 19 | 20 | return str(error) 21 | -------------------------------------------------------------------------------- /intelli/utils/dataframe_mcp_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | DataFrame MCP Utilities for Intelli - Backward Compatibility Module 3 | 4 | This module maintains backward compatibility by re-exporting DataFrame utilities 5 | from their new location in the mcp package. 6 | 7 | DEPRECATED: Please import from intelli.mcp.dataframe_utils instead. 8 | """ 9 | 10 | import warnings 11 | 12 | # Issue deprecation warning 13 | warnings.warn( 14 | "Importing from intelli.utils.dataframe_mcp_utils is deprecated. " 15 | "Please import from intelli.mcp instead: " 16 | "from intelli.mcp import PandasMCPServerBuilder, PolarsMCPServerBuilder", 17 | DeprecationWarning, 18 | stacklevel=2 19 | ) 20 | 21 | # Re-export everything from the new location for backward compatibility 22 | try: 23 | from intelli.mcp.dataframe_utils import ( 24 | BaseDataFrameMCPServerBuilder, 25 | PandasMCPServerBuilder, 26 | PolarsMCPServerBuilder, 27 | PANDAS_AVAILABLE, 28 | POLARS_AVAILABLE 29 | ) 30 | except ImportError: 31 | # If the new location isn't available, provide error message 32 | def _create_unavailable_class(name): 33 | class UnavailableClass: 34 | def __init__(self, *args, **kwargs): 35 | raise ImportError( 36 | f"{name} is no longer available from this location. " 37 | "Please install the MCP package with 'pip install intelli[mcp]' " 38 | "and import from intelli.mcp instead." 39 | ) 40 | return UnavailableClass 41 | 42 | BaseDataFrameMCPServerBuilder = _create_unavailable_class("BaseDataFrameMCPServerBuilder") 43 | PandasMCPServerBuilder = _create_unavailable_class("PandasMCPServerBuilder") 44 | PolarsMCPServerBuilder = _create_unavailable_class("PolarsMCPServerBuilder") 45 | PANDAS_AVAILABLE = False 46 | POLARS_AVAILABLE = False 47 | 48 | # Maintain the same __all__ for compatibility 49 | __all__ = [ 50 | 'BaseDataFrameMCPServerBuilder', 51 | 'PandasMCPServerBuilder', 52 | 'PolarsMCPServerBuilder', 53 | 'PANDAS_AVAILABLE', 54 | 'POLARS_AVAILABLE' 55 | ] -------------------------------------------------------------------------------- /intelli/utils/logging.py: -------------------------------------------------------------------------------- 1 | class Logger: 2 | def __init__(self, enable_logging=True, head_size=200): 3 | self.enable_logging = enable_logging 4 | self.head_size = head_size 5 | 6 | def log_head(self, message, data=None): 7 | if self.enable_logging: 8 | if data: 9 | print(f"{message}: {data[:self.head_size]}") 10 | else: 11 | print(message) 12 | 13 | def log(self, message, data=None): 14 | if self.enable_logging: 15 | if data: 16 | print(f"{message}: {data}") 17 | else: 18 | print(message) 19 | -------------------------------------------------------------------------------- /intelli/utils/proxy_helper.py: -------------------------------------------------------------------------------- 1 | from copy import deepcopy 2 | 3 | from intelli.config import config as default_config 4 | 5 | 6 | class ProxyHelper: 7 | _instance = None 8 | API_VERSION = '2023-12-01-preview' 9 | 10 | def __init__(self): 11 | self.set_default_openai() 12 | 13 | @classmethod 14 | def get_instance(cls): 15 | if cls._instance is None: 16 | cls._instance = ProxyHelper() 17 | return cls._instance 18 | 19 | def set_default_openai(self): 20 | config = deepcopy(default_config["url"]) 21 | self.apply_openai_config(config['openai']) 22 | 23 | def set_azure_openai(self, resource_name): 24 | if not resource_name: 25 | raise ValueError("Azure resource name must be provided.") 26 | 27 | config = deepcopy(default_config["url"]) 28 | self.resource_name = resource_name 29 | self.openai_url = config['azure_openai']['base'].replace('{resource-name}', resource_name) 30 | self.openai_completion = config['azure_openai']['completions'] 31 | self.openai_chat_gpt = config['azure_openai']['chatgpt'] 32 | self.openai_image = config['azure_openai']['imagegenerate'] 33 | self.openai_embed = config['azure_openai']['embeddings'] 34 | self.openai_audio_transcriptions = config['azure_openai']['audiotranscriptions'] 35 | self.openai_audio_speech = config['azure_openai']['audiospeech'] 36 | self.openai_files = config['azure_openai']['files'] 37 | self.openai_finetuning_job = config['azure_openai']['finetuning'] 38 | self.openai_type = 'azure' 39 | 40 | def get_openai_chat_url(self, model=''): 41 | if self.openai_type == 'azure': 42 | return self.openai_chat_gpt.replace('{deployment-id}', model).replace('{api-version}', 43 | ProxyHelper.API_VERSION) 44 | else: 45 | return self.openai_chat_gpt 46 | 47 | def get_openai_image_url(self): 48 | """ 49 | Method to get the OpenAI image generation URL 50 | """ 51 | if self.openai_type == 'azure': 52 | return self.openai_image.replace('{api-version}', '2023-06-01-preview') 53 | else: 54 | return self.openai_image 55 | 56 | def get_openai_embed_url(self, model=''): 57 | """ 58 | Method to get the Embeddings URL 59 | """ 60 | if self.openai_type == 'azure': 61 | return self.openai_embed.replace('{deployment-id}', model).replace('{api-version}', ProxyHelper.API_VERSION) 62 | else: 63 | return self.openai_embed 64 | 65 | def get_openai_audio_transcriptions_url(self, model=''): 66 | """ 67 | Method to get the OpenAI audio transcriptions URL 68 | """ 69 | if self.openai_type == 'azure': 70 | return self.openai_audio_transcriptions.replace('{deployment-id}', model).replace('{api-version}', 71 | ProxyHelper.API_VERSION) 72 | else: 73 | return self.openai_audio_transcriptions 74 | 75 | def get_openai_audio_speech_url(self, model=''): 76 | """ 77 | Method to get the OpenAI audio to speech URL 78 | """ 79 | if self.openai_type == 'azure': 80 | return self.openai_audio_speech.replace('{deployment-id}', model).replace('{api-version}', 81 | ProxyHelper.API_VERSION) 82 | else: 83 | return self.openai_audio_speech 84 | 85 | def get_openai_files_url(self): 86 | """ 87 | Method to get the OpenAI files endpoint URL 88 | """ 89 | if self.openai_type == 'azure': 90 | return self.openai_files.replace('{api-version}', ProxyHelper.API_VERSION) 91 | else: 92 | return self.openai_files 93 | 94 | def get_openai_finetuning_job_url(self): 95 | """ 96 | Method to get the OpenAI fine-tuning job URL 97 | """ 98 | if self.openai_type == 'azure': 99 | return self.openai_finetuning_job.replace('{api-version}', '2023-10-01-preview') 100 | else: 101 | return self.openai_finetuning_job 102 | 103 | def set_openai_proxy_values(self, proxy_settings): 104 | 105 | self.openai_type = 'custom' 106 | 107 | if proxy_settings and (not proxy_settings['base'] and proxy_settings['url']): 108 | proxy_settings['base'] = proxy_settings['url'] 109 | 110 | adjusted_settings = { 111 | 'base': proxy_settings.get('base', proxy_settings.get('url', self.openai_url)), 112 | 'completions': proxy_settings.get('completions', self.openai_completion), 113 | 'chatgpt': proxy_settings.get('chatgpt', self.openai_chat_gpt), 114 | 'imagegenerate': proxy_settings.get('imagegenerate', self.openai_image), 115 | 'embeddings': proxy_settings.get('embeddings', self.openai_embed), 116 | 'audiotranscriptions': proxy_settings.get('audiotranscriptions', self.openai_audio_transcriptions), 117 | 'audiospeech': proxy_settings.get('audiospeech', self.openai_audio_speech), 118 | 'files': proxy_settings.get('files', self.openai_files), 119 | 'finetuning': proxy_settings.get('finetuning', self.openai_finetuning_job), 120 | 'organization': proxy_settings.get('organization', self.organization), 121 | } 122 | 123 | self.apply_openai_config(adjusted_settings) 124 | 125 | def apply_openai_config(self, config): 126 | 127 | self.openai_url = config['base'] 128 | self.openai_completion = config['completions'] 129 | self.openai_chat_gpt = config['chatgpt'] 130 | self.openai_image = config['imagegenerate'] 131 | self.openai_embed = config['embeddings'] 132 | self.openai_audio_transcriptions = config['audiotranscriptions'] 133 | self.openai_audio_speech = config['audiospeech'] 134 | self.openai_files = config['files'] 135 | self.openai_finetuning_job = config['finetuning'] 136 | self.openai_type = 'openai' 137 | self.resource_name = '' 138 | self.organization = config.get('organization', None) 139 | 140 | # optional getters - to match the right parameter with the provider 141 | def get_openai_resource_name(self): 142 | return self.resource_name 143 | 144 | def get_openai_organization(self): 145 | return self.organization 146 | 147 | def get_openai_type(self): 148 | return self.openai_type 149 | 150 | def get_openai_url(self): 151 | return self.openai_url 152 | -------------------------------------------------------------------------------- /intelli/utils/system_helper.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | 4 | class SystemHelper: 5 | def __init__(self): 6 | # Setting up the path to the templates directory 7 | self.systems_path = Path(__file__).parent.parent / "resource" / "templates" 8 | 9 | def get_prompt_path(self, file_type): 10 | """Returns the file path for the specified prompt type.""" 11 | file_map = { 12 | "sentiment": "sentiment_prompt.in", 13 | "summary": "summary_prompt.in", 14 | "html_page": "html_page_prompt.in", 15 | "graph_dashboard": "graph_dashboard_prompt.in", 16 | "instruct_update": "instruct_update.in", 17 | "prompt_example": "prompt_example.in", 18 | "augmented_chatbot": "augmented_chatbot.in" 19 | } 20 | 21 | if file_type in file_map: 22 | return self.systems_path / file_map[file_type] 23 | else: 24 | raise ValueError(f"File type '{file_type}' not supported.") 25 | 26 | def load_prompt(self, file_type): 27 | """Loads the prompt template from a file.""" 28 | prompt_path = self.get_prompt_path(file_type) 29 | with open(prompt_path, 'r', encoding='utf-8') as file: 30 | prompt_template = file.read() 31 | 32 | return prompt_template 33 | 34 | def load_static_prompt(self, file_type): 35 | static_prompts = { 36 | "augmented_chatbot": ( 37 | "Using the provided context, craft a cohesive response that addresses the user's query. " 38 | "If the context lacks relevance, focus on generating accurate answer " 39 | "based on the user's question alone. Aim for clarity in your reply.\n" 40 | "Context:\n" 41 | "${semantic_search}\n" 42 | "------------------\n" 43 | "User's Question:\n" 44 | "${user_query}" 45 | ), 46 | } 47 | 48 | if file_type in static_prompts: 49 | return static_prompts[file_type] 50 | else: 51 | raise ValueError(f"Static prompt for file type '{file_type}' not defined.") 52 | -------------------------------------------------------------------------------- /intelli/wrappers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intelligentnode/Intelli/dd1b55ab120b6e6e53a55446dcd974b840ae004d/intelli/wrappers/__init__.py -------------------------------------------------------------------------------- /intelli/wrappers/anthropic_wrapper.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from intelli.config import config 4 | from intelli.utils.conn_helper import ConnHelper 5 | 6 | 7 | class AnthropicWrapper: 8 | def __init__(self, api_key): 9 | self.API_BASE_URL = config['url']['anthropic']['base'] 10 | self.API_VERSION = config['url']['anthropic']['version'] 11 | self.session = requests.Session() 12 | self.session.headers.update({ 13 | 'Content-Type': 'application/json', 14 | 'Accept': 'application/json', 15 | 'x-api-key': api_key, 16 | 'anthropic-version': self.API_VERSION 17 | }) 18 | 19 | def generate_text(self, params): 20 | url = f"{self.API_BASE_URL}{config['url']['anthropic']['messages']}" 21 | response = self.session.post(url, json=params) 22 | try: 23 | response.raise_for_status() 24 | return response.json() 25 | except requests.exceptions.RequestException as error: 26 | raise Exception(ConnHelper.get_error_message(error)) 27 | finally: 28 | response.close() 29 | self.session.close() 30 | 31 | def stream_text(self, params): 32 | """Yields text from streaming API.""" 33 | url = f"{self.API_BASE_URL}{config['url']['anthropic']['messages']}" 34 | headers = self.session.headers.copy() 35 | params['stream'] = True 36 | try: 37 | with requests.post(url, headers=headers, json=params, stream=True) as response: 38 | response.raise_for_status() 39 | for line in response.iter_lines(): 40 | if line: 41 | decoded_line = line.decode('utf-8') 42 | yield decoded_line 43 | except requests.exceptions.RequestException as error: 44 | raise Exception(f"Stream request failed: {error}") 45 | -------------------------------------------------------------------------------- /intelli/wrappers/cohereai_wrapper.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from intelli.config import config 4 | from intelli.utils.conn_helper import ConnHelper 5 | 6 | 7 | class CohereAIWrapper: 8 | def __init__(self, api_key): 9 | self.API_BASE_URL = config['url']['cohere']['base'] 10 | self.COHERE_VERSION = config['url']['cohere']['version'] 11 | self.API_KEY = api_key 12 | self.headers = { 13 | 'Content-Type': 'application/json', 14 | 'Authorization': f'Bearer {self.API_KEY}', 15 | 'Cohere-Version': self.COHERE_VERSION, 16 | } 17 | 18 | def generate_text(self, params): 19 | url = config['url']['cohere']['completions'] 20 | try: 21 | response = requests.post(f'{self.API_BASE_URL}{url}', json=params, headers=self.headers) 22 | response.raise_for_status() 23 | return response.json() 24 | except requests.exceptions.RequestException as error: 25 | raise Exception(ConnHelper.get_error_message(error)) 26 | 27 | def generate_chat_text(self, params): 28 | url = config['url']['cohere']['chat'] 29 | try: 30 | response = requests.post(f'{self.API_BASE_URL}{url}', json=params, headers=self.headers) 31 | response.raise_for_status() 32 | return response.json() 33 | except requests.exceptions.RequestException as error: 34 | raise Exception(ConnHelper.get_error_message(error)) 35 | 36 | def get_embeddings(self, params): 37 | url = config['url']['cohere']['embed'] 38 | try: 39 | response = requests.post(f'{self.API_BASE_URL}{url}', json=params, headers=self.headers) 40 | response.raise_for_status() 41 | return response.json() 42 | except requests.exceptions.RequestException as error: 43 | raise Exception(ConnHelper.get_error_message(error)) 44 | -------------------------------------------------------------------------------- /intelli/wrappers/intellicloud_wrapper.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from intelli.config import config 4 | from intelli.utils.conn_helper import ConnHelper 5 | 6 | 7 | class IntellicloudWrapper: 8 | def __init__(self, api_key, api_base=None): 9 | self.ONE_KEY = api_key 10 | self.API_BASE_URL = api_base if api_base else config['url']['intellicloud']['base'] 11 | 12 | def semantic_search(self, query_text, k=3, filters=None): 13 | if filters is None: 14 | filters = {} 15 | 16 | url = f"{self.API_BASE_URL}{config['url']['intellicloud']['semantic_search']}" 17 | # set the data 18 | data = {'one_key': self.ONE_KEY, 'query_text': query_text, 'k': k} 19 | if filters and 'document_name' in filters: 20 | data['document_name'] = filters['document_name'] 21 | 22 | # call the document search 23 | try: 24 | response = requests.post(url, data=data) 25 | response.raise_for_status() 26 | return response.json()['data'] 27 | except requests.RequestException as e: 28 | raise Exception(ConnHelper.get_error_message(e)) 29 | -------------------------------------------------------------------------------- /intelli/wrappers/keras_wrapper.py: -------------------------------------------------------------------------------- 1 | from intelli.utils.whisper_helper import WhisperHelper 2 | import os 3 | 4 | 5 | class KerasWrapper: 6 | def __init__(self, model_name=None, model_params=None): 7 | self.model_name = model_name 8 | self.model_params = model_params 9 | self.whisper_helper = None 10 | self.model = None 11 | self._load_model() 12 | 13 | def _load_model(self): 14 | try: 15 | import keras_nlp 16 | import keras 17 | 18 | self.nlp_manager = keras_nlp 19 | self.keras_manager = keras 20 | except ImportError as e: 21 | raise ImportError( 22 | "keras_nlp is not installed or model is not supported." 23 | ) from e 24 | 25 | if self.model_params and "KAGGLE_USERNAME" in self.model_params: 26 | os.environ["KAGGLE_USERNAME"] = self.model_params["KAGGLE_USERNAME"] 27 | os.environ["KAGGLE_KEY"] = self.model_params["KAGGLE_KEY"] 28 | 29 | if "gemma" in self.model_name: 30 | self.model = self.nlp_manager.models.GemmaCausalLM.from_preset( 31 | self.model_name 32 | ) 33 | elif "mistral" in self.model_name: 34 | self.model = self.nlp_manager.models.MistralCausalLM.from_preset( 35 | self.model_name 36 | ) 37 | elif "llama" in self.model_name: 38 | self.model = self.nlp_manager.models.Llama3CausalLM.from_preset( 39 | self.model_name 40 | ) 41 | elif "whisper" in self.model_name: 42 | try: 43 | backbone = self.nlp_manager.models.WhisperBackbone.from_preset( 44 | self.model_name 45 | ) 46 | self.whisper_helper = WhisperHelper( 47 | model_name=self.model_name, backbone=backbone 48 | ) 49 | except Exception as e: 50 | raise ValueError(f"Error loading Whisper model: {e}") 51 | else: 52 | raise ValueError(f"Unsupported model name: {self.model_name}") 53 | 54 | def update_model_params(self, model_params): 55 | self.model_params = model_params 56 | 57 | def set_model(self, model, model_params): 58 | self.model = model 59 | self.model_params = model_params 60 | 61 | def generate(self, input_text, max_length=180): 62 | if not self.model: 63 | raise ValueError("Model is not set.") 64 | generated_output = self.model.generate(input_text, max_length=max_length) 65 | if isinstance(generated_output, str) and generated_output.startswith( 66 | input_text 67 | ): 68 | generated_output = generated_output.replace(input_text, "", 1).strip() 69 | return generated_output 70 | 71 | def fine_tune( 72 | self, 73 | dataset, 74 | fine_tuning_config, 75 | enable_lora=True, 76 | custom_loss=None, 77 | custom_metrics=None, 78 | ): 79 | if not self.model: 80 | raise ValueError("Model is not set.") 81 | try: 82 | import keras_nlp 83 | import keras 84 | except ImportError as e: 85 | raise ImportError( 86 | "keras_nlp is not installed or model is not supported." 87 | ) from e 88 | 89 | lora_rank = fine_tuning_config.get("lora_rank", 4) 90 | if enable_lora: 91 | self.model.backbone.enable_lora(rank=lora_rank) 92 | self.model.preprocessor.sequence_length = fine_tuning_config.get( 93 | "sequence_length", 512 94 | ) 95 | 96 | learning_rate = fine_tuning_config.get("learning_rate", 0.001) 97 | weight_decay = fine_tuning_config.get("weight_decay", 0.004) 98 | beta_1 = fine_tuning_config.get("beta_1", 0.9) 99 | beta_2 = fine_tuning_config.get("beta_2", 0.999) 100 | 101 | optimizer = keras.optimizers.AdamW( 102 | learning_rate=learning_rate, 103 | weight_decay=weight_decay, 104 | beta_1=beta_1, 105 | beta_2=beta_2, 106 | ) 107 | optimizer.exclude_from_weight_decay(var_names=["bias", "scale"]) 108 | 109 | custom_loss = ( 110 | keras.losses.SparseCategoricalCrossentropy(from_logits=True) 111 | if not custom_loss 112 | else custom_loss 113 | ) 114 | custom_metrics = ( 115 | [keras.metrics.SparseCategoricalAccuracy()] 116 | if not custom_metrics 117 | else custom_metrics 118 | ) 119 | 120 | self.model.compile( 121 | loss=custom_loss, 122 | optimizer=optimizer, 123 | weighted_metrics=custom_metrics, 124 | ) 125 | 126 | epochs = fine_tuning_config.get("epochs", 3) 127 | batch_size = fine_tuning_config.get("batch_size", 32) 128 | self.model.fit(dataset, epochs=epochs, batch_size=batch_size) 129 | 130 | def transcript( 131 | self, 132 | audio_data, 133 | sample_rate=16000, 134 | language=None, 135 | user_prompt=None, 136 | condition_on_previous_text=False, 137 | max_steps=80, 138 | max_chunk_sec=30, 139 | ): 140 | """ 141 | Convert speech to text using the Whisper model. 142 | """ 143 | if not self.whisper_helper: 144 | raise ValueError( 145 | "Whisper is not initialized. Make sure you used a 'whisper_*' model_name." 146 | ) 147 | 148 | return self.whisper_helper.transcribe( 149 | audio_data=audio_data, 150 | sample_rate=sample_rate, 151 | language=language, 152 | max_steps=max_steps, 153 | max_chunk_sec=max_chunk_sec, 154 | user_prompt=user_prompt, 155 | condition_on_previous_text=condition_on_previous_text, 156 | ) 157 | -------------------------------------------------------------------------------- /intelli/wrappers/mcp_config.py: -------------------------------------------------------------------------------- 1 | """ 2 | MCP configuration helper functions to simplify agent creation. 3 | """ 4 | 5 | def local_server_config(script_path, python_executable=None, env=None): 6 | """ 7 | Create configuration for a local MCP server running from a Python script. 8 | 9 | Args: 10 | script_path: Path to the MCP server script 11 | python_executable: Python executable to use (default: sys.executable) 12 | env: Environment variables for the subprocess 13 | 14 | Returns: 15 | Dict with MCP agent configuration 16 | """ 17 | import sys 18 | 19 | executable = python_executable or sys.executable 20 | 21 | return { 22 | "command": executable, 23 | "args": [script_path], 24 | "env": env 25 | } 26 | 27 | def websocket_server_config(url): 28 | """ 29 | Create configuration for a WebSocket MCP server. 30 | 31 | Args: 32 | url: WebSocket URL (ws:// or wss://) 33 | 34 | Returns: 35 | Dict with MCP agent configuration 36 | """ 37 | return {"url": url} 38 | 39 | def http_server_config(url): 40 | """ 41 | Create configuration for an HTTP MCP server. 42 | 43 | Args: 44 | url: HTTP URL (http:// or https://) 45 | 46 | Returns: 47 | Dict with MCP agent configuration 48 | """ 49 | return {"url": url} 50 | 51 | def create_mcp_agent(server_config, tool_name, **tool_args): 52 | """ 53 | Create an MCP agent with the given configuration. 54 | 55 | Args: 56 | server_config: Server configuration created by one of the helper functions 57 | tool_name: Name of the tool to execute 58 | **tool_args: Arguments to pass to the tool 59 | 60 | Returns: 61 | Dict with model_params for creating an MCP agent 62 | """ 63 | # Prepare model parameters 64 | model_params = { 65 | "tool": tool_name, 66 | **server_config 67 | } 68 | 69 | # Add tool arguments with arg_ prefix 70 | for arg_name, arg_value in tool_args.items(): 71 | model_params[f"arg_{arg_name}"] = arg_value 72 | 73 | return model_params -------------------------------------------------------------------------------- /intelli/wrappers/mistralai_wrapper.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from intelli.config import config 4 | from intelli.utils.conn_helper import ConnHelper 5 | 6 | 7 | class MistralAIWrapper: 8 | 9 | def __init__(self, api_key): 10 | self.API_BASE_URL = config['url']['mistral']['base'] 11 | self.session = requests.Session() 12 | self.session.headers.update({ 13 | 'Content-Type': 'application/json', 14 | 'Accept': 'application/json', 15 | 'Authorization': f'Bearer {api_key}' 16 | }) 17 | 18 | def generate_text(self, params): 19 | url = f"{self.API_BASE_URL}{config['url']['mistral']['completions']}" 20 | 21 | try: 22 | response = self.session.post(url, json=params) 23 | response.raise_for_status() 24 | return response.json() 25 | except requests.exceptions.RequestException as e: 26 | raise Exception(ConnHelper.get_error_message(e)) 27 | 28 | def get_embeddings(self, params): 29 | url = f"{self.API_BASE_URL}{config['url']['mistral']['embed']}" 30 | 31 | try: 32 | response = self.session.post(url, json=params) 33 | response.raise_for_status() 34 | return response.json() 35 | except requests.exceptions.RequestException as e: 36 | raise Exception(ConnHelper.get_error_message(e)) 37 | -------------------------------------------------------------------------------- /intelli/wrappers/nvidia_wrapper.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from intelli.config import config 3 | 4 | 5 | class NvidiaWrapper: 6 | def __init__(self, api_key: str, base_url: str = None): 7 | self.api_key = api_key 8 | # support local url or cloud nvidia builder by default 9 | self.base_url = base_url if base_url is not None else config["url"]["nvidia"]["base"] 10 | self.chat_endpoint = config["url"]["nvidia"]["chat"] 11 | self.embeddings_endpoint = config["url"]["nvidia"]["embeddings"] 12 | self.headers = { 13 | "Content-Type": "application/json", 14 | "Accept": "application/json", 15 | "Authorization": f"Bearer {api_key}", 16 | } 17 | 18 | def generate_text(self, params: dict) -> dict: 19 | if "stream" not in params: 20 | params["stream"] = False 21 | url = self.base_url + self.chat_endpoint 22 | response = requests.post(url, json=params, headers=self.headers) 23 | response.raise_for_status() 24 | return response.json() 25 | 26 | def generate_text_stream(self, params: dict): 27 | params["stream"] = True 28 | url = self.base_url + self.chat_endpoint 29 | response = requests.post(url, json=params, headers=self.headers, stream=True) 30 | response.raise_for_status() 31 | for line in response.iter_lines(decode_unicode=True): 32 | if line: 33 | yield line 34 | 35 | def get_embeddings(self, params: dict) -> dict: 36 | url = self.base_url + self.embeddings_endpoint 37 | response = requests.post(url, json=params, headers=self.headers) 38 | response.raise_for_status() 39 | return response.json() 40 | -------------------------------------------------------------------------------- /intelli/wrappers/stability_wrapper.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | 4 | from intelli.config import config 5 | from intelli.utils.conn_helper import ConnHelper 6 | 7 | 8 | class StabilityAIWrapper: 9 | def __init__(self, api_key): 10 | self.api_base_url = config['url']['stability']['base'] 11 | self.session = requests.Session() 12 | self.session.headers.update({ 13 | 'Authorization': f'Bearer {api_key}', 14 | 'Accept': 'application/json', 15 | }) 16 | 17 | def generate_images(self, params, engine='stable-diffusion-xl-1024-v1-0'): 18 | endpoint = config['url']['stability']['text_to_image'].format(engine) 19 | url = f"{self.api_base_url}{endpoint}" 20 | try: 21 | response = self.session.post(url, json=params) 22 | response.raise_for_status() 23 | return response.json() 24 | except requests.exceptions.RequestException as e: 25 | raise Exception(ConnHelper.get_error_message(e)) 26 | -------------------------------------------------------------------------------- /sample/basic_mcp/README.md: -------------------------------------------------------------------------------- 1 | # MCP Calculator Demo 2 | 3 | ## What's Included 4 | - **Calculator Server**: Simple MCP server with math operations. 5 | - **Flow Client**: Client that combines AI understanding with MCP functions. 6 | 7 | ## Requirements 8 | - Python 3.10+ 9 | - OpenAI API key (set as OPENAI_API_KEY environment variable) 10 | - Packages: `mcp`, `intelli` 11 | 12 | ## Quick Start 13 | 14 | 1. **Start the server** 15 | ```bash 16 | python mcp_math_server.py 17 | ``` 18 | 19 | 2. **Run the client** 20 | ```bash 21 | python math_flow_client.py 22 | ``` 23 | Try changing the query in the script to test different operations! 24 | -------------------------------------------------------------------------------- /sample/basic_mcp/math_flow_client.py: -------------------------------------------------------------------------------- 1 | # math_flow_client.py 2 | import os 3 | import asyncio 4 | import sys 5 | from intelli.flow.agents.agent import Agent 6 | from intelli.flow.input.task_input import TextTaskInput 7 | from intelli.flow.tasks.task import Task 8 | from intelli.flow.flow import Flow 9 | from intelli.flow.types import AgentTypes 10 | from intelli.mcp import create_mcp_preprocessor 11 | from dotenv import load_dotenv 12 | 13 | # Load environment variables for API keys 14 | load_dotenv() 15 | 16 | async def run_math_flow(query="What is 7 plus 8?"): 17 | # Get the path to the MCP server file 18 | current_dir = os.path.dirname(os.path.abspath(__file__)) 19 | server_path = os.path.join(current_dir, "mcp_math_server.py") 20 | 21 | # Create LLM agent with stronger prompt 22 | llm_agent = Agent( 23 | agent_type=AgentTypes.TEXT.value, 24 | provider="openai", 25 | mission="Parse math query", 26 | model_params={ 27 | "key": os.getenv("OPENAI_API_KEY"), 28 | "model": "gpt-3.5-turbo", 29 | "system_message": """ 30 | You are a specialized parser for arithmetic operations. 31 | 32 | Your ONLY job is to extract numbers and operations from user queries. 33 | 34 | You MUST return ONLY a valid JSON object with this exact format: 35 | {"operation": "add", "a": number, "b": number} 36 | 37 | Supported operations: 38 | - "add" (for addition, plus, sum) 39 | - "subtract" (for subtraction, minus, difference) 40 | - "multiply" (for multiplication, times, product) 41 | 42 | DO NOT include explanations, confirmations or any other text. 43 | Your entire response must be ONLY the JSON object. 44 | """ 45 | } 46 | ) 47 | 48 | # Create a task for the LLM 49 | llm_task = Task( 50 | TextTaskInput(query), 51 | llm_agent, 52 | log=True 53 | ) 54 | 55 | # Create a preprocessor for the subprocess-based MCP server with complete operation mapping 56 | extract_operation = create_mcp_preprocessor( 57 | server_path=server_path, 58 | default_tool="add", 59 | operations_map={ 60 | # Addition operations 61 | "add": "add", 62 | "plus": "add", 63 | "sum": "add", 64 | "+": "add", 65 | 66 | # Subtraction operations 67 | "subtract": "subtract", 68 | "minus": "subtract", 69 | "difference": "subtract", 70 | "-": "subtract", 71 | 72 | # Multiplication operations 73 | "multiply": "multiply", 74 | "times": "multiply", 75 | "product": "multiply", 76 | "*": "multiply", 77 | "x": "multiply" 78 | }, 79 | param_names=["a", "b"] 80 | ) 81 | 82 | # Create proper MCP agent using subprocess approach 83 | mcp_agent = Agent( 84 | agent_type=AgentTypes.MCP.value, 85 | provider="mcp", 86 | mission="Calculate arithmetic result", 87 | model_params={ 88 | "command": sys.executable, 89 | "args": [server_path], 90 | "tool": "add", 91 | "arg_a": 0, 92 | "arg_b": 0 93 | } 94 | ) 95 | 96 | # Create a task for the calculation 97 | mcp_task = Task( 98 | TextTaskInput("Calculate"), 99 | mcp_agent, 100 | log=True, 101 | pre_process=extract_operation 102 | ) 103 | 104 | # Create the flow 105 | tasks = {"parse": llm_task, "calculate": mcp_task} 106 | map_paths = {"parse": ["calculate"], "calculate": []} 107 | 108 | flow = Flow(tasks=tasks, map_paths=map_paths, log=True) 109 | 110 | print("\n=== Flow Created ===") 111 | print(f"Tasks: {list(tasks.keys())}") 112 | print("=================\n") 113 | 114 | # Run the flow 115 | results = await flow.start() 116 | 117 | # Format and print results 118 | print("\n=== Flow Results ===") 119 | print(f"Query: {query}") 120 | print(f"LLM parsing: {results['parse']['output']}") 121 | print(f"Calculation result: {results['calculate']['output']}") 122 | print("====================\n") 123 | 124 | return results 125 | 126 | if __name__ == "__main__": 127 | # Example query to test 128 | user_query = "What is 42 plus 28?" 129 | 130 | print(f"Processing query: {user_query}") 131 | print("Using MCP server with subprocess transport") 132 | 133 | # Run the flow 134 | asyncio.run(run_math_flow(user_query)) -------------------------------------------------------------------------------- /sample/basic_mcp/mcp_math_server.py: -------------------------------------------------------------------------------- 1 | # mcp_math_server.py 2 | from intelli.mcp import MCPServerBuilder 3 | 4 | # Create a server using the builder 5 | server = MCPServerBuilder("MathTools") 6 | 7 | # Add tools with decorators 8 | @server.add_tool 9 | def add(a: int, b: int) -> int: 10 | """Add two numbers""" 11 | return a + b 12 | 13 | @server.add_tool 14 | def subtract(a: int, b: int) -> int: 15 | """Subtract two numbers""" 16 | return a - b 17 | 18 | @server.add_tool 19 | def multiply(a: int, b: int) -> int: 20 | """Multiply two numbers""" 21 | return a * b 22 | 23 | if __name__ == "__main__": 24 | # Run the server with stdio transport 25 | server.run(transport="stdio") 26 | -------------------------------------------------------------------------------- /sample/http_dataframe_mcp/http_dataframe_flow_client.py: -------------------------------------------------------------------------------- 1 | """ 2 | HTTP MCP DataFrame Flow Client Example. 3 | 4 | Demonstrates using Intelli Flow to query a DataFrame server via HTTP. 5 | Query operations include schema, shape, head, column selection and filtering. 6 | 7 | Run steps: 8 | 1. Start server: python http_mcp_dataframe_server.py 9 | 2. Check server is running at http://localhost:8000/mcp 10 | 3. Run client: python http_dataframe_flow_client.py 11 | """ 12 | import os 13 | import asyncio 14 | import json 15 | 16 | from intelli.flow.agents.agent import Agent 17 | from intelli.flow.input.task_input import TextTaskInput 18 | from intelli.flow.tasks.task import Task 19 | from intelli.flow.flow import Flow 20 | from intelli.flow.types import AgentTypes 21 | from intelli.mcp import create_mcp_preprocessor 22 | from dotenv import load_dotenv 23 | 24 | # Load environment variables 25 | load_dotenv() 26 | 27 | # Server connection settings 28 | MCP_DATAFRAME_SERVER_URL = "http://localhost:8000/mcp" 29 | OUTPUT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "temp", "mcp_dataframe_client") 30 | 31 | # Generate and save flow visualization 32 | def save_flow_graph(flow: Flow, name: str = "http_dataframe_flow"): 33 | os.makedirs(OUTPUT_DIR, exist_ok=True) 34 | try: 35 | graph_path = flow.generate_graph_img( 36 | name=name, 37 | save_path=OUTPUT_DIR 38 | ) 39 | print(f"\n🎨 Flow visualization saved to: {graph_path}") 40 | except Exception as e: 41 | print(f"⚠️ Could not generate graph: {e}") 42 | 43 | # Main flow definition and execution function 44 | async def run_dataframe_query_flow(): 45 | print(f"Targeting MCP DataFrame Server at: {MCP_DATAFRAME_SERVER_URL}") 46 | 47 | # Task 1: Get DataFrame Schema 48 | schema_agent = Agent( 49 | agent_type=AgentTypes.MCP.value, 50 | provider="mcp", 51 | mission="Get DataFrame schema", 52 | model_params={ 53 | "url": MCP_DATAFRAME_SERVER_URL, 54 | "tool": "get_schema" 55 | } 56 | ) 57 | schema_task = Task( 58 | TextTaskInput("Requesting schema"), 59 | schema_agent, 60 | log=True 61 | ) 62 | 63 | # Task 2: Get DataFrame Shape 64 | shape_agent = Agent( 65 | agent_type=AgentTypes.MCP.value, 66 | provider="mcp", 67 | mission="Get DataFrame shape", 68 | model_params={ 69 | "url": MCP_DATAFRAME_SERVER_URL, 70 | "tool": "get_shape", 71 | } 72 | ) 73 | shape_task = Task(TextTaskInput("Requesting shape"), shape_agent, log=True) 74 | 75 | # Task 3: Get DataFrame Head 76 | head_agent = Agent( 77 | agent_type=AgentTypes.MCP.value, 78 | provider="mcp", 79 | mission="Get DataFrame head", 80 | model_params={ 81 | "url": MCP_DATAFRAME_SERVER_URL, 82 | "tool": "get_head", 83 | "arg_n": 3 # Get first 3 rows 84 | } 85 | ) 86 | head_task = Task(TextTaskInput("Requesting head"), head_agent, log=True) 87 | 88 | # Task 4: Select Specific Columns 89 | select_agent = Agent( 90 | agent_type=AgentTypes.MCP.value, 91 | provider="mcp", 92 | mission="Select specific columns", 93 | model_params={ 94 | "url": MCP_DATAFRAME_SERVER_URL, 95 | "tool": "select_columns", 96 | "arg_columns": ["Name", "Salary"] 97 | } 98 | ) 99 | select_task = Task( 100 | TextTaskInput(json.dumps({"columns_to_select": ["Name", "Salary"]})), 101 | select_agent, 102 | log=True 103 | ) 104 | 105 | # Task 5: Filter Rows 106 | filter_agent = Agent( 107 | agent_type=AgentTypes.MCP.value, 108 | provider="mcp", 109 | mission="Filter rows from DataFrame", 110 | model_params={ 111 | "url": MCP_DATAFRAME_SERVER_URL, 112 | "tool": "filter_rows", 113 | "arg_column": "City", 114 | "arg_operator": "==", 115 | "arg_value": "New York" 116 | } 117 | ) 118 | filter_task = Task(TextTaskInput("Requesting filtered data"), filter_agent, log=True) 119 | 120 | # Define flow tasks and connections 121 | tasks_dict = { 122 | "get_schema": schema_task, 123 | "get_shape": shape_task, 124 | "get_head": head_task, 125 | "select_cols": select_task, 126 | "filter_data": filter_task 127 | } 128 | 129 | # Sequential flow: schema -> shape -> head -> select -> filter 130 | map_paths_dict = { 131 | "get_schema": ["get_shape"], 132 | "get_shape": ["get_head"], 133 | "get_head": ["select_cols"], 134 | "select_cols": ["filter_data"], 135 | "filter_data": [] 136 | } 137 | 138 | data_flow = Flow(tasks=tasks_dict, map_paths=map_paths_dict, log=True) 139 | save_flow_graph(data_flow, name="http_dataframe_query_flow") 140 | 141 | print("\n=== DataFrame Query Flow Created ===") 142 | print(f"Tasks: {list(tasks_dict.keys())}") 143 | print("==================================\n") 144 | 145 | # Run the flow 146 | try: 147 | flow_results = await data_flow.start() 148 | except Exception as e: 149 | print(f"🛑 Error running flow: {e}") 150 | print(f"Make sure server is running at {MCP_DATAFRAME_SERVER_URL}") 151 | return 152 | 153 | # Display results 154 | print("\n=== DataFrame Query Flow Results ===") 155 | for task_name, result_data in flow_results.items(): 156 | output = result_data.get('output') 157 | # Format output as pretty JSON when possible 158 | if isinstance(output, str): 159 | try: 160 | output = json.dumps(json.loads(output), indent=2) 161 | except json.JSONDecodeError: 162 | pass 163 | elif isinstance(output, dict) or isinstance(output, list): 164 | output = json.dumps(output, indent=2) 165 | 166 | print(f"\n--- Task: {task_name} ---") 167 | print(f"Output:\n{output}") 168 | print("==================================\n") 169 | 170 | return flow_results 171 | 172 | if __name__ == "__main__": 173 | print("🚀 Starting HTTP MCP DataFrame Flow Client Example...") 174 | print("Ensure the `http_mcp_dataframe_server.py` is running separately.") 175 | 176 | asyncio.run(run_dataframe_query_flow()) 177 | 178 | print("🏁 DataFrame Flow Client Example Finished.") -------------------------------------------------------------------------------- /sample/http_dataframe_mcp/http_mcp_dataframe_server.py: -------------------------------------------------------------------------------- 1 | """ 2 | HTTP MCP DataFrame Server 3 | 4 | Serves CSV data using DataFrame operations over HTTP. 5 | Supports Pandas or Polars, depending on what's installed. 6 | """ 7 | import os 8 | import sys 9 | 10 | from intelli.mcp import PandasMCPServerBuilder, PolarsMCPServerBuilder, PANDAS_AVAILABLE, POLARS_AVAILABLE 11 | 12 | if __name__ == "__main__": 13 | # Get path to sample CSV 14 | csv_file_name = "sample_data.csv" 15 | current_script_dir = os.path.dirname(os.path.abspath(__file__)) 16 | csv_file_path = os.path.join(current_script_dir, csv_file_name) 17 | 18 | if not os.path.exists(csv_file_path): 19 | print(f"Error: Sample CSV file not found at {csv_file_path}") 20 | print(f"Please ensure '{csv_file_name}' exists in this directory.") 21 | sys.exit(1) 22 | 23 | server_builder = None 24 | server_type = "" 25 | server_name_prefix = "Http" 26 | 27 | # Try Pandas first 28 | if PANDAS_AVAILABLE: 29 | print("Using Pandas for DataFrame operations") 30 | try: 31 | server_builder = PandasMCPServerBuilder( 32 | server_name=f"{server_name_prefix}PandasDataFrameServer", 33 | csv_file_path=csv_file_path, 34 | stateless_http=True 35 | ) 36 | server_type = "Pandas" 37 | except Exception as e: 38 | print(f"Failed to initialize PandasMCPServerBuilder: {e}") 39 | server_builder = None 40 | 41 | # Fall back to Polars if Pandas failed or isn't available 42 | if server_builder is None and POLARS_AVAILABLE: 43 | print("Using Polars for DataFrame operations") 44 | try: 45 | server_builder = PolarsMCPServerBuilder( 46 | server_name=f"{server_name_prefix}PolarsDataFrameServer", 47 | csv_file_path=csv_file_path, 48 | stateless_http=True 49 | ) 50 | server_type = "Polars" 51 | except Exception as e: 52 | print(f"Failed to initialize PolarsMCPServerBuilder: {e}") 53 | server_builder = None 54 | 55 | if server_builder and server_builder.df is not None: 56 | print(f"Successfully initialized {server_type} DataFrame HTTP MCP Server.") 57 | 58 | # Configure HTTP server parameters 59 | mcp_path = "/mcp" # Use the same path as the calculator example 60 | host = "0.0.0.0" 61 | port = 8000 # Must match client (and default Uvicorn port) 62 | 63 | # Start the server 64 | server_builder.run( 65 | transport="streamable-http", 66 | mount_path=mcp_path, 67 | host=host, 68 | port=port, 69 | print_info=True 70 | ) 71 | elif server_builder and server_builder.df is None: 72 | print(f"Server initialized but DataFrame failed to load from {csv_file_path}.") 73 | print("Check that the CSV file is valid.") 74 | sys.exit(1) 75 | else: 76 | print("Error: Neither Pandas nor Polars is available.") 77 | print("Please install pandas or polars: pip install pandas polars") 78 | sys.exit(1) -------------------------------------------------------------------------------- /sample/http_dataframe_mcp/sample_data.csv: -------------------------------------------------------------------------------- 1 | ID,Name,Age,City,Salary 2 | 1,Alice,30,New York,70000 3 | 2,Bob,24,Los Angeles,60000 4 | 3,Charlie,35,Chicago,80000 5 | 4,David,28,New York,75000 6 | 5,Eve,40,Chicago,90000 7 | 6,Frank,22,Los Angeles,55000 8 | 7,Grace,33,New York,82000 9 | 8,Hank,45,Chicago,95000 10 | 9,Ivy,29,Los Angeles,62000 11 | 10,Jack,31,New York,78000 12 | 11,Karen,38,Chicago,88000 13 | 12,Leo,26,Los Angeles,59000 14 | 13,Mona,32,New York,81000 15 | 14,Nick,42,Chicago,92000 16 | 15,Olivia,27,Los Angeles,61000 -------------------------------------------------------------------------------- /sample/http_mcp/README.md: -------------------------------------------------------------------------------- 1 | # MCP Calculator Demo 2 | 3 | ## What's Included 4 | - **Calculator Server**: HTTP-based MCP server with math operations. 5 | - **Flow Client**: Client that combines AI understanding with MCP functions. 6 | 7 | ## Requirements 8 | - Python 3.10+ 9 | - OpenAI API key (set as OPENAI_API_KEY environment variable) 10 | - Packages: `mcp`, `intelli`, `httpx` 11 | ## Quick Start 12 | 13 | 1. **Start the server** 14 | ```bash 15 | python http_mcp_calculator_server.py 16 | ``` 17 | This runs an MCP calculator server at http://localhost:8000/mcp 18 | 19 | 2. **Run the client** 20 | ```bash 21 | python http_math_flow_client.py 22 | ``` 23 | Try changing the query in the script to test different operations! 24 | 25 | ## How It Works 26 | 27 | **Server (http_mcp_calculator_server.py)** 28 | - Creates an MCP server with math tools 29 | - Tools: add, subtract, multiply 30 | - Uses streamable HTTP transport at "/mcp" endpoint 31 | 32 | **Client (http_math_flow_client.py)** 33 | - Creates a two-step flow: 34 | * OpenAI parses natural language into math operations 35 | * MCP agent sends operations to the calculator server 36 | - Shows the progression from text to calculation to result 37 | 38 | ## Try These Examples 39 | 40 | Modify the `user_query` variable in the client to try: 41 | ```python 42 | user_query = "What is 25 multiplied by 4?" 43 | user_query = "Can you subtract 15 from 100?" 44 | user_query = "Add 123 and 456 please" 45 | ``` 46 | 47 | The flow will extract the operation and numbers automatically. 48 | 49 | ## How Components Connect 50 | 51 | ``` 52 | User Query → OpenAI Parser → MCP Client → Calculator Server → Result 53 | ``` 54 | 55 | The client handles all parameter conversion and error handling automatically! 56 | 57 | -------------------------------------------------------------------------------- /sample/http_mcp/http_mcp_calculator_server.py: -------------------------------------------------------------------------------- 1 | # http_mcp_calculator_server.py 2 | import os 3 | from intelli.mcp import MCPServerBuilder 4 | 5 | # Create a server with HTTP support 6 | server = MCPServerBuilder("Calculator", stateless_http=True) 7 | 8 | # Add tools with decorators 9 | @server.add_tool 10 | def add(a: int, b: int) -> str: 11 | """Add two numbers""" 12 | print(f"Adding {a} + {b}") 13 | result = a + b 14 | print(f"Result: {result}") 15 | return str(result) 16 | 17 | @server.add_tool 18 | def subtract(a: int, b: int) -> str: 19 | """Subtract second number from first number""" 20 | print(f"Subtracting {b} from {a}") 21 | result = a - b 22 | print(f"Result: {result}") 23 | return str(result) 24 | 25 | @server.add_tool 26 | def multiply(a: int, b: int) -> str: 27 | """Multiply two numbers""" 28 | print(f"Multiplying {a} * {b}") 29 | result = a * b 30 | print(f"Result: {result}") 31 | return str(result) 32 | 33 | if __name__ == "__main__": 34 | print("Starting Calculator MCP Server...") 35 | 36 | # Configure HTTP server with streamable-http transport 37 | mcp_path = "/mcp" 38 | host = "0.0.0.0" # Used for info display only 39 | port = 8000 # Used for info display only 40 | 41 | # Run the server with HTTP transport 42 | # Note: FastMCP internally handles host/port configuration 43 | server.run( 44 | transport="streamable-http", 45 | mount_path=mcp_path, 46 | host=host, 47 | port=port 48 | ) 49 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | with open("PIPREADME.md", "r", encoding="utf-8") as fh: 4 | pip_description = fh.read() 5 | 6 | setup( 7 | name="intelli", 8 | version="1.1.5", 9 | author="Intellinode", 10 | author_email="admin@intellinode.ai", 11 | description="Build AI agents and MCPs with Intellinode – simplifying model orchestration.", 12 | long_description=pip_description, 13 | long_description_content_type="text/markdown", 14 | url="https://www.intellinode.ai/", 15 | project_urls={ 16 | "Source Code": "https://github.com/intelligentnode/Intelli", 17 | }, 18 | packages=find_packages(exclude=["test", "test.*"]), 19 | package_data={"": ["*.in"]}, 20 | python_requires=">=3.10", 21 | install_requires=[ 22 | "python-dotenv>=1.0.0", 23 | "networkx>=3.2.0", 24 | ], 25 | extras_require={ 26 | "visual": ["matplotlib>=3.6.0"], 27 | "offline": [ 28 | "keras-nlp", 29 | "keras>=3", 30 | "librosa", 31 | "keras-hub", 32 | "tensorflow-text" 33 | ], 34 | "llamacpp": ["llama-cpp-python>=0.3.7", "huggingface_hub>=0.28.1"], 35 | "mcp": ["mcp[ws,cli]~=1.9.0", "pandas"], 36 | "dataframe": ["pandas", "polars>=0.19.0"], 37 | "all": [ 38 | "matplotlib>=3.6.0", 39 | "numpy<2.0", 40 | "keras-nlp", "keras>=3", "librosa", "keras-hub", "tensorflow-text", 41 | "llama-cpp-python>=0.3.7", "huggingface_hub>=0.28.1", 42 | "mcp[ws,cli]~=1.9.0", 43 | "pandas", "polars" 44 | ], 45 | "dev": ["pytest>=7.0.0"], 46 | }, 47 | ) 48 | --------------------------------------------------------------------------------