├── .github └── workflows │ └── release.yml ├── .gitignore ├── LICENSE.txt ├── README.md ├── docs ├── src │ ├── agentic-tool-use.md │ ├── agents.md │ ├── custom_tools.md │ ├── faq.md │ ├── home.md │ ├── llms.md │ ├── mcp-integration.md │ ├── multi-agent-teams.md │ ├── observability.md │ ├── orchestration.md │ ├── tasks.md │ └── tools │ │ ├── amadeus_tools.md │ │ ├── audio_tools.md │ │ ├── calculator_tools.md │ │ ├── conversation_tools.md │ │ ├── embeddings_tools.md │ │ ├── faiss_tools.md │ │ ├── file_tools.md │ │ ├── fred_tools.md │ │ ├── github_tools.md │ │ ├── langchain_tools.md │ │ ├── linear_tools.md │ │ ├── pinecone_tools.md │ │ ├── stripe_tools.md │ │ ├── text_splitters.md │ │ ├── web_tools.md │ │ ├── wikipedia_tools.md │ │ └── yahoo_finance_tools.md └── static │ ├── favicon.ico │ ├── orchestra-loops.png │ └── orchestrator.png ├── examples └── python │ ├── browseruse_chat.py │ ├── fallback_example.py │ ├── finance_chat.py │ ├── finance_flow.py │ ├── finance_team.py │ ├── github_issue_linear_tracker_team.py │ ├── github_update_summarizer_team.py │ ├── grapher_flow.py │ ├── linear_chat.py │ ├── mcp │ ├── mcp_fast_calc.py │ ├── mcp_test_fastmcp.py │ ├── mcp_test_fetch.py │ ├── mcp_test_filesystem.py │ ├── mcp_test_playwright.py │ └── mcp_test_slack.py │ ├── receipt_archiver.py │ ├── reverse_image_shopping.py │ └── stripe_chat.py ├── package.json ├── packages └── python │ ├── LICENSE.txt │ ├── README.md │ ├── poetry.lock │ ├── pyproject.toml │ ├── setup.py │ └── src │ └── mainframe_orchestra │ ├── __init__.py │ ├── adapters │ ├── __init__.py │ └── mcp_adapter.py │ ├── agent.py │ ├── config.py │ ├── llm.py │ ├── orchestration.py │ ├── task.py │ ├── tools │ ├── __init__.py │ ├── amadeus_tools.py │ ├── audio_tools.py │ ├── calculator_tools.py │ ├── embedding_tools.py │ ├── faiss_tools.py │ ├── file_tools.py │ ├── fred_tools.py │ ├── github_tools.py │ ├── langchain_tools.py │ ├── linear_tools.py │ ├── matplotlib_tools.py │ ├── pinecone_tools.py │ ├── stripe_tools.py │ ├── text_splitters.py │ ├── web_tools.py │ ├── wikipedia_tools.py │ └── yahoo_finance_tools.py │ └── utils │ ├── __init__.py │ ├── braintrust_utils.py │ ├── logging_config.py │ └── parse_json_response.py ├── pnpm-workspace.yaml ├── scalar.config.json └── turbo.json /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Python Package Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*.*.*' 7 | 8 | jobs: 9 | release: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v4 15 | 16 | - name: Set up Python 17 | uses: actions/setup-python@v5 18 | with: 19 | python-version: '3.x' 20 | 21 | - name: Install Poetry 22 | run: | 23 | curl -sSL https://install.python-poetry.org | python3 - 24 | export PATH="$HOME/.local/bin:$PATH" 25 | 26 | - name: Install dependencies 27 | working-directory: ./packages/python 28 | run: poetry install 29 | 30 | - name: Build package 31 | working-directory: ./packages/python 32 | run: poetry build 33 | 34 | - name: Publish to PyPI 35 | working-directory: ./packages/python 36 | env: 37 | TWINE_USERNAME: __token__ 38 | TWINE_PASSWORD: ${{ secrets.PYPI_KEY }} 39 | run: poetry publish --username $TWINE_USERNAME --password $TWINE_PASSWORD 40 | 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # macOS 86 | .DS_Store 87 | 88 | # pyenv 89 | # For a library or package, you might want to ignore these files since the code is 90 | # intended to run in multiple environments; otherwise, check them in: 91 | # .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # poetry 101 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 102 | # This is especially recommended for binary packages to ensure reproducibility, and is more 103 | # commonly ignored for libraries. 104 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 105 | #poetry.lock 106 | 107 | # pdm 108 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 109 | #pdm.lock 110 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 111 | # in version control. 112 | # https://pdm.fming.dev/#use-with-ide 113 | .pdm.toml 114 | 115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 116 | __pypackages__/ 117 | 118 | # Celery stuff 119 | celerybeat-schedule 120 | celerybeat.pid 121 | 122 | # SageMath parsed files 123 | *.sage.py 124 | 125 | # Environments 126 | .env 127 | .venv 128 | env/ 129 | venv/ 130 | ENV/ 131 | env.bak/ 132 | venv.bak/ 133 | myenv/ 134 | 135 | # Spyder project settings 136 | .spyderproject 137 | .spyproject 138 | 139 | # Rope project settings 140 | .ropeproject 141 | 142 | # mkdocs documentation 143 | /site 144 | 145 | # mypy 146 | .mypy_cache/ 147 | .dmypy.json 148 | dmypy.json 149 | 150 | # Pyre type checker 151 | .pyre/ 152 | 153 | # pytype static type analyzer 154 | .pytype/ 155 | 156 | # Cython debug symbols 157 | cython_debug/ 158 | 159 | # PyCharm 160 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 161 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 162 | # and can be added to the global gitignore or merged into this file. For a more nuclear 163 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 164 | #.idea/ 165 | 166 | # dependencies 167 | node_modules 168 | .pnp 169 | .pnp.js 170 | 171 | # testing 172 | coverage 173 | 174 | # next.js 175 | .next/ 176 | out/ 177 | build 178 | dist 179 | 180 | # misc 181 | *.pem 182 | 183 | # debug 184 | npm-debug.log* 185 | yarn-debug.log* 186 | yarn-error.log* 187 | 188 | # local env files 189 | .env 190 | .env.local 191 | .env.development.local 192 | .env.test.local 193 | .env.production.local 194 | 195 | # turbo 196 | .turbo 197 | 198 | # vercel 199 | .vercel 200 | 201 | # ruff 202 | .ruff_cache/ 203 | -------------------------------------------------------------------------------- /docs/src/agents.md: -------------------------------------------------------------------------------- 1 | # Agents 2 | 3 | Agents in Orchestra are components that encapsulate specific personas to be assigned to tasks. They are designed to perform tasks in a manner consistent with their defined role, goal, and attributes. Agents are typically configured with a specific LLM and, if needed, a set of tools tailored to their role, enabling them to effectively execute their assigned tasks. 4 | 5 | The reusability of agents in Orchestra not only streamlines workflow design but also contributes to the overall consistency of output. By utilizing the same agent across different tasks within its area of expertise, you can expect uniform behavior and response styles, which is particularly valuable in maintaining a coherent user experience or adhering to specific operational standards. 6 | 7 | ### Configuring Agent Intelligence and Capabilities 8 | 9 | In Orchestra, agents can be further customized by setting specific LLMs and tool sets. This configuration allows you to fine-tune the agent's intelligence level, associated costs, and functional capabilities, effectively creating specialized teams of agents. 10 | 11 | ### Anatomy of an Agent 12 | 13 | An agent in Orchestra is defined by four core components: 14 | 15 | - **role**: Defines the agent's purpose or function within the Orchestra workflow. e.g. "Web Researcher". 16 | - **goal**: Specifies the desired outcome or objective that the agent aims to achieve. e.g. "find relevant Python agent repositories with open issues". 17 | - **attributes** (Optional): Additional characteristics or traits that shape the agent's behavior and personality. e.g. "analytical, detail-oriented, and determined to write thorough reports". 18 | - **llm**: The underlying language model assigned to the agent for generating responses. e.g. `OpenrouterModels.haiku`. 19 | 20 | ### Creating an Agent 21 | 22 | To create an agent in Orchestra, you can use the Agent class provided by the library. Here's an example: 23 | 24 | ```python 25 | from mainframe_orchestra import Agent, OpenrouterModels 26 | 27 | customer_support_agent = Agent( 28 | role="customer support representative", 29 | goal="to resolve customer inquiries accurately and efficiently", 30 | attributes="friendly, empathetic, and knowledgeable about the product", 31 | llm=OpenrouterModels.haiku 32 | ) 33 | ``` 34 | 35 | ### Assigning Agents to Tasks 36 | 37 | Here's an example demonstrating how an agent can be created and then integrated into multiple tasks within a Orchestra workflow: 38 | 39 | ```python 40 | from mainframe_orchestra import Task, Agent, OpenrouterModels 41 | 42 | data_analyst_agent = Agent( 43 | role="data analyst", 44 | goal="to provide insights and recommendations based on data analysis", 45 | attributes="analytical, detail-oriented, and proficient in statistical methods", 46 | llm=OpenrouterModels.haiku 47 | ) 48 | 49 | def analysis_task (sales_data): 50 | return Task.create( 51 | agent=data_analyst_agent, 52 | context=f"The sales data for the past quarter is attached: '{sales_data}'.", 53 | instruction="Analyze the sales data and provide recommendations for improving revenue." 54 | ) 55 | ``` 56 | 57 | ### Assigning Tools to Agents 58 | 59 | Agents can be assigned tools to enhance their capabilities and enable them to perform specific actions. Tools are functions that the agent can use to interact with external systems, process data, or perform specialized tasks. 60 | 61 | The agent will have the opportunity to use tools provided to the agent or the task to assist in its completion. The tools are passed to the agent's 'tools' parameter during initialization, and the agent will then be able to see and use the tools before completing their final response. They can call tools once, recursively, or multiple times in parallel. For more on tool use see the [tool use](./tool-use) page. 62 | 63 | Here's an example of assigning tools to an agent: 64 | 65 | ```python 66 | from mainframe_orchestra import Agent, GitHubTools, OpenaiModels 67 | 68 | researcher = Agent( 69 | role="GitHub researcher", 70 | goal="find relevant Python agent repositories with open issues", 71 | attributes="analytical, detail-oriented, able to assess repository relevance and popularity", 72 | llm=OpenaiModels.gpt_4o_mini, 73 | tools={GitHubTools.search_repositories, GitHubTools.get_repo_details} 74 | ) 75 | ``` 76 | 77 | In this example, the researcher agent is assigned two tools from the GitHubTools module. These tools allow the agent to search for repositories and get repository details, which are essential for its role as a GitHub researcher. Tools are passed to the agent's 'tools' parameter during initialization. 78 | 79 | ##### Advanced Agent Parameters 80 | 81 | Agents can be assigned additional parameters to tune their behavior. These additional params control model temperature and max tokens. Default temperature is 0.7 and max tokens is 4000. You can set temperature and max tokens in the agent definition and they will override the defaults set in the llm. Here's an example: 82 | 83 | ```python 84 | from mainframe_orchestra import Agent, OpenrouterModels 85 | 86 | assistant_agent = Agent( 87 | role="assistant", 88 | goal="to provide insights and recommendations based on data analysis", 89 | llm=OpenrouterModels.haiku, 90 | max_tokens=500, 91 | temperature=0.5 92 | ) 93 | ``` 94 | 95 | These additional settings are optional and are often not required unless custom or specific temperature and max tokens are required. The default temperature of 0.7 and max tokens of 4000 covers most use cases, but programming or long responses may benefit from custom temperature and max tokens. 96 | 97 | #### LLM Fallbacks via Lists 98 | 99 | You can now specify multiple LLMs for a task, allowing for automatic fallback if the primary LLM fails or times out. 100 | 101 | In this example, if `AnthropicModels.sonnet_3_5` fails (e.g., due to rate limiting), the task automatically falls back to `AnthropicModels.haiku_3_5`. You can specify as many LLMs as you want in the list and they will be tried in order. You can have the models fall back to another of the same provider, or you can have them fall back to a different provider if the provider itself fails. This is useful for handling rate limits or other failures that may occur with certain LLMs, particularly in a production environment. 102 | 103 | ```python 104 | from mainframe_orchestra import Agent, GitHubTools, AnthropicModels 105 | 106 | researcher = Agent( 107 | role="GitHub researcher", 108 | goal="find relevant Python agent repositories with open issues", 109 | attributes="analytical, detail-oriented, able to assess repository relevance and popularity", 110 | llm=[AnthropicModels.sonnet_3_5, AnthropicModels.haiku_3_5], 111 | tools={GitHubTools.search_repositories, GitHubTools.get_repo_details} 112 | ) 113 | ``` 114 | 115 | ##### Prompting 116 | 117 | Prompting involves crafting effective prompts for agent roles, goals, and attributes to elicit desired behaviors and responses from the language model. Here are some tips for effective prompting: 118 | 119 | - Use clear and concise language that captures the essence of the agent's role and goal. 120 | - Use the optional attributes field to provide additional behavioral cues and suggestions based on feedback from tests. 121 | - Experiment with different prompt variations and evaluate their impact on agent performance. 122 | - Use the attributes field to provide additional behavioral cues and suggestions based on feedback from tests". 123 | 124 | Testing and iterative development is key to creating effective prompts. The feedback from the initial runs will be used to refine the prompts and improve the performance of the agents. It's worth testing and adjusting early in the process as you develop out your multi-agent team or workflows. 125 | 126 | By incorporating these advanced techniques, you can create agents that can handle complex tasks, adapt to user preferences, and provide more personalized and context-aware responses. 127 | -------------------------------------------------------------------------------- /docs/src/faq.md: -------------------------------------------------------------------------------- 1 | # FAQ 2 | 3 | This FAQ page addresses common questions about setting up and using Mainframe-Orchestra. If you have additional questions, please feel free to open an issue on [our GitHub repository](https://github.com/mainframecomputer/orchestra). 4 | 5 | ### Setting Up Agents and Tasks 6 | 7 | ##### How do I set up an agent? 8 | 9 | There are two main ways to set up agents in Orchestra: 10 | 11 | One way to set up agents is by creating a class of agents. This approach allows you to group related agents together and provides a structured way to organize your agents. 12 | 13 | ```python 14 | # Option 1: Class of agents 15 | class Agents: 16 | web_researcher = Agent( 17 | role="web researcher", 18 | goal="find relevant information on the web", 19 | attributes="detail-oriented, analytical", 20 | llm=OpenrouterModels.haiku 21 | ) 22 | summarizer = Agent( 23 | role="summarizer", 24 | goal="condense information into concise summaries", 25 | attributes="concise, clear communicator", 26 | llm=OpenrouterModels.haiku 27 | ) 28 | programmer = Agent( 29 | role="programmer", 30 | goal="write and debug code", 31 | attributes="logical, problem-solver", 32 | llm=OpenrouterModels.haiku 33 | ) 34 | ``` 35 | 36 | Another way to set up agents is by creating them directly in your script. This approach is more straightforward and can be useful for simpler setups or when you need to create agents on the fly. 37 | 38 | ```python 39 | # Option 2: Direct setup in script 40 | researcher = Agent( 41 | role="researcher", 42 | goal="conduct thorough research on given topics", 43 | attributes="analytical, detail-oriented", 44 | llm=OpenrouterModels.haiku 45 | ) 46 | ``` 47 | 48 | ##### How do I set up a task? 49 | 50 | To set up a task, you can wrap the Task.create method in a function that imports what it needs to perform the task. This makes the task modular and easy to use in sequences: 51 | 52 | ```python 53 | from mainframe_orchestra import Task, OpenrouterModels 54 | 55 | def research_task(topic): 56 | result = Task.create( 57 | agent="web_researcher", 58 | context=f"The user wants information about {topic}", 59 | instruction=f"Explain {topic} and provide a comprehensive summary", 60 | llm=OpenrouterModels.haiku 61 | ) 62 | return result 63 | 64 | # Usage 65 | topic = "artificial intelligence" 66 | research_result = research_task(topic) 67 | ``` 68 | 69 | ### Language Models and Customization 70 | 71 | ##### How do I use a model that's not supported in the LLM function list? 72 | 73 | You can use the custom_model method available for each provider to use models that are not explicitly listed in the LLM function list: 74 | 75 | ```python 76 | from mainframe_orchestra import OpenrouterModels, AnthropicModels, OllamaModels, OpenaiModels 77 | 78 | # OpenRouter custom model 79 | llm = OpenrouterModels.custom_model(model_name="meta-llama/llama-3-70b-instruct") 80 | 81 | # Anthropic custom model 82 | llm = AnthropicModels.custom_model(model_name="claude-3-opus-20240229") 83 | 84 | # Ollama custom model 85 | llm = OllamaModels.custom_model(model_name="llama3.1:405b") 86 | 87 | # OpenAI custom model 88 | llm = OpenAIModels.custom_model(model_name="gpt-4o-mini") 89 | ``` 90 | 91 | ### Privacy and Local Usage 92 | 93 | ##### How do I use Orchestra locally and privately? 94 | 95 | To use Orchestra locally and privately: 96 | 97 | - Download and install Ollama 98 | - Pull the model you want to use 99 | - Host it from the terminal 100 | - Select the model function from llm.py or set a newer model card with the custom_model function 101 | 102 | All requests will stay internal to your device in this configuration, ensuring privacy and local usage. 103 | 104 | ### Future Development 105 | 106 | ##### Are you adding more tools? Are you open to requests? 107 | 108 | Yes, we are open to adding new features and toolkits as needed. If you have a request for a new tool or feature, please open an issue on [our GitHub repository](https://github.com/mainframecomputer/orchestra) or contribute directly to the project. 109 | 110 | ### Additional Questions 111 | 112 | ##### How do I handle errors when using tools? 113 | 114 | Implement proper error handling when using tools, especially those that interact with external APIs. Tools that print and return errors rather than fail are preferable, as they can be provided back to the agent in a retry loop, allowing multiple attempts to succeed. 115 | 116 | ##### How should I manage API keys for various tools? 117 | 118 | Use environment variables to manage API keys required by various tools. This enhances security and makes it easier to deploy your application across different environments. 119 | 120 | ##### Can I create custom tools? 121 | 122 | Yes, you can create custom tools following the same pattern as the built-in ones. This allows you to extend Orchestra's functionality to meet your specific needs. 123 | 124 | -------------------------------------------------------------------------------- /docs/src/home.md: -------------------------------------------------------------------------------- 1 | 2 | # Introduction 3 | 4 | Orchestra is a lightweight open-source agentic framework for creating LLM-driven task pipelines and multi-agent teams, centered around the concept of Tasks rather than conversation patterns. 5 | 6 | [Orchestra GitHub Repository](https://github.com/mainframecomputer/orchestra) 7 | 8 | ### Core Principles 9 | 10 | Orchestra is built around the concept of task completion, rather than conversation patterns. It has a modular architecture with interchangeable components. It's meant to be lightweight with minimal dependencies, and it offers transparency through a flat hierarchy and full prompt exposure. 11 | 12 | ![Orchestra Orchestrator](https://utfs.io/f/lKo6VaP8kaqVeFkvKtdZGxnUaslhq80BRH2VtP5O6oNbFvjw) 13 | 14 | ### Core Components 15 | 16 | ##### Tasks 17 | 18 | Tasks are the fundamental building blocks of Orchestra. Each task represents a single, discrete unit of work to be performed by a Large Language Model. They include an optional context field for providing relevant background information, and an instruction that defines the core purpose of the task. 19 | 20 | ##### Agents 21 | 22 | An Agent in Orchestra represents a specific role or persona with a clear goal. It can have optional attributes, and is powered by a selected language model (LLM). This structure allows Agents to maintain a consistent persona across multiple tasks. Agents can also be assigned tools, which are specific deterministic functions that the agent can use to interact with libraries, APIs, the internet, and more. 23 | 24 | ##### Tools 25 | 26 | Tools in Orchestra are wrappers around external services or APIs, as well as utilities for common operations. You can link tools together with tasks to create structured, deterministic AI-integrated pipelines, offering precise control over the AI's actions in scenarios that require predictable workflows. Or, you can directly assign tools to agents, and the agents to tasks, enabling more autonomous, self-determined tool use. In this mode, AI Agents can independently choose and utilize tools to complete their assigned tasks. 27 | 28 | ##### Language Models 29 | 30 | Orchestra supports various Language Models out-of-the-box, including models from OpenAI, Anthropic, open source models from OpenRouter, Groq, and Ollama where they can be locally hosted on device. 31 | 32 | ### Getting Started 33 | 34 | To begin using Orchestra: 35 | 36 | - Create a folder for your Orchestra projects 37 | - Create a .env file with your relevant API Keys 38 | - In your new folder, set up a virtual environment and install Orchestra 39 | 40 | ```bash 41 | python -m venv venv 42 | source venv/bin/activate # On Windows use: venv\Scripts\activate 43 | pip install mainframe-orchestra 44 | ``` 45 | 46 | Once you have installed Orchestra, you can start building your agentic workflows and multi-agent teams. 47 | 48 | ```python 49 | # Single Agent example 50 | from mainframe_orchestra import Task, Agent, WebTools, OpenrouterModels 51 | 52 | researcher = Agent( 53 | role="research assistant", 54 | goal="answer user queries", 55 | attributes="thorough in web research", 56 | tools={WebTools.serper_search}, 57 | llm=OpenrouterModels.haiku 58 | ) 59 | 60 | def research_task(agent, topic): 61 | return Task.create( 62 | agent=agent, 63 | instruction=f"Research {topic} and provide a summary of the top 3 results." 64 | ) 65 | 66 | def main(): 67 | topic = input("Enter a topic to research: ") 68 | response = research_task(researcher, topic) 69 | print(response) 70 | 71 | if __name__ == "__main__": 72 | main() 73 | ``` 74 | 75 | ```python 76 | # Multi Agent example 77 | from mainframe_orchestra import Agent, Task, WebTools, WikipediaTools, AmadeusTools, OpenrouterModels, set_verbosity 78 | from datetime import datetime 79 | 80 | set_verbosity(1) 81 | 82 | web_research_agent = Agent( 83 | role="web research agent", 84 | goal="search the web thoroughly for travel information", 85 | attributes="hardworking, diligent, thorough, comprehensive.", 86 | llm=OpenrouterModels.haiku, 87 | tools=[WebTools.serper_search, WikipediaTools.search_articles, WikipediaTools.search_images] 88 | ) 89 | 90 | travel_agent = Agent( 91 | role="travel agent", 92 | goal="assist the traveller with their request", 93 | attributes="friendly, hardworking, and comprehensive in reporting back to users", 94 | llm=OpenrouterModels.haiku, 95 | tools=[AmadeusTools.search_flights, WebTools.serper_search, WebTools.get_weather_data] 96 | ) 97 | 98 | def research_destination(destination, interests): 99 | return Task.create( 100 | agent=web_research_agent, 101 | context=f"User Destination: {destination}\nUser Interests: {interests}", 102 | instruction=f"Research {destination} and write a comprehensive report with images embedded in markdown." 103 | ) 104 | 105 | def research_events(destination, dates, interests): 106 | return Task.create( 107 | agent=web_research_agent, 108 | context=f"Destination: {destination}\nDates: {dates}\nInterests: {interests}", 109 | instruction="Research events in the given location for the given date span." 110 | ) 111 | 112 | def research_weather(destination, dates): 113 | return Task.create( 114 | agent=travel_agent, 115 | context=f"Location: {destination}\nDates: {dates}", 116 | instruction="Search for weather information and write a report." 117 | ) 118 | 119 | def search_flights(current_location, destination, dates): 120 | return Task.create( 121 | agent=travel_agent, 122 | context=f"From: {current_location}\nTo: {destination}\nDates: {dates}", 123 | instruction="Search for flights and report on the best options." 124 | ) 125 | 126 | def write_travel_report(destination_report, events_report, weather_report, flight_report): 127 | return Task.create( 128 | agent=travel_agent, 129 | context=f"Reports: {destination_report}, {events_report}, {weather_report}, {flight_report}", 130 | instruction="Write a comprehensive travel plan based on the provided reports." 131 | ) 132 | 133 | def main(): 134 | destination = input("Enter a destination: ") 135 | interests = input("Enter your interests: ") 136 | dates = input("Enter your dates of travel: ") 137 | current_location = input("Enter your current location: ") 138 | 139 | destination_report = research_destination(destination, interests) 140 | events_report = research_events(destination, dates, interests) 141 | weather_report = research_weather(destination, dates) 142 | flight_report = search_flights(current_location, destination, dates) 143 | travel_report = write_travel_report(destination_report, events_report, weather_report, flight_report) 144 | print(travel_report) 145 | 146 | if __name__ == "__main__": 147 | main() 148 | ``` 149 | 150 | 151 | ### Multi-Agent Teams 152 | 153 | Orchestra enables the creation of powerful multi-agent teams by assigning tasks to agents equipped with specific tools. This approach facilitates complex workflows and collaborative problem-solving, allowing you to tackle intricate challenges that require diverse skills and knowledge. 154 | 155 | In a multi-agent team, each agent is designed with a specialized role, a set of tools, and specific expertise. By combining these agents, you can create AI workflows capable of handling a wide range of tasks, from research and analysis to problem-solving and code generation. 156 | 157 | **Acknowledgment**: Mainframe-Orchestra is a fork and further development of [TaskflowAI](https://github.com/philippe-page/taskflowai) by Philippe Pagé. 158 | -------------------------------------------------------------------------------- /docs/src/llms.md: -------------------------------------------------------------------------------- 1 | # LLMs 2 | 3 | Mainframe-Orchestra supports integrating a variety of language models, providing developers with the flexibility to choose the most appropriate model for their specific use case. The Language Model (LLM) interfaces in Orchestra offer a unified and consistent way to interact with various AI models from different providers. 4 | 5 | ### LLM Interface Structure 6 | 7 | The LLM interfaces are defined in the llm.py module. This module contains several classes, each representing a different LLM provider: 8 | 9 | - OpenaiModels 10 | - AnthropicModels 11 | - OpenrouterModels 12 | - OllamaModels 13 | - GroqModels 14 | - TogetheraiModels 15 | - GeminiModels 16 | - DeepseekModels 17 | 18 | Each class contains static methods corresponding to specific models offered by the provider, following a consistent structure. 19 | 20 | ### Supported Language Models 21 | 22 | Orchestra supports a wide range of language models from various providers. Here's an overview of some supported models: 23 | 24 | - OpenAI Models: GPT-4.5-preview, GPT-4o, GPT-4, GPT-4 Turbo, GPT-3.5 Turbo 25 | - Anthropic Models: Claude-3 Opus, Claude-3 Sonnet, Claude-3 Haiku, Claude-3.5 Sonnet, Claude-3.7 Sonnet 26 | - Openrouter Models: Various models including Anthropic Claude, OpenAI GPT, Llama, Mistral AI 27 | - Ollama Models: Llama 3, Gemma, Mistral, Qwen, Phi-3, Llama 2, CodeLlama, LLaVA, Mixtral 28 | - Groq Models: Gemma, Llama3, Llama3.1, Mixtral 29 | - Togetherai Models: Meta Llama 3.1, Mixtral, Mistral, many other open source models 30 | - Gemini Models: Gemini 2.0, Gemini 1.5 Pro, Gemini 1.5 Flash, Gemini 1.5 Pro (Flash) 31 | - Deepseek Models: Deepseek Reasoner, Deepseek Chat 32 | 33 | ### Integrating Language Models 34 | 35 | To integrate a language model within a `Task` object, you simply need to specify the appropriate model function from the corresponding class. Here's an example: 36 | 37 | ```python 38 | from mainframe_orchestra import OpenrouterModels 39 | 40 | llm = OpenrouterModels.haiku 41 | ``` 42 | 43 | In this example, we're using the OpenrouterModels.haiku model. You can then assign llm to any agent. The llm parameter is passed to the Task.create() method, allowing the task to use the specified language model for generating responses.This is helpful if you want to use the same model for multiple agents. Alternatively, you can pass the model directly to the agent as a parameter, like `llm=OpenrouterModels.haiku`, if you want certain agents to use specific models. 44 | 45 | ### Language Model Selection Considerations 46 | 47 | When selecting a language model for your agents, consider the following factors: 48 | 49 | - Performance: Different models vary in speed, accuracy, and output quality. 50 | - Cost: Consider the cost implications, especially for production deployments. 51 | - Capabilities: Ensure the selected model aligns with your task requirements (e.g., natural language generation, code generation). 52 | - Context Window Size: For tasks requiring larger context, choose models with appropriate context window sizes. 53 | - Tool Use Capabilities: Some models are better suited for tasks involving tool use. 54 | 55 | ### Advanced Techniques 56 | 57 | Orchestra supports several advanced techniques for working with language models: 58 | 59 | - Chaining Multiple Models: You can use different models for different stages of a workflow. 60 | - Model-Agnostic Tasks: Design tasks that can work with various language models by passing the LLM as a parameter. 61 | - Custom Models: Use the `custom_model` method to work with models not explicitly defined. 62 | 63 | ### LLM Fallbacks via Lists 64 | 65 | You can now specify multiple LLMs for a task, allowing for automatic fallback if the primary LLM fails or times out. 66 | 67 | In this example, if `AnthropicModels.sonnet_3_5` fails (e.g., due to rate limiting), the task automatically falls back to `AnthropicModels.haiku_3_5`. You can specify as many LLMs as you want in the list and they will be tried in order. You can have the models fall back to another of the same provider, or you can have them fall back to a different provider if the provider itself fails. This is useful for handling rate limits or other failures that may occur with certain LLMs, particularly in a production environment. 68 | 69 | ```python 70 | from mainframe_orchestra import Agent, GitHubTools, AnthropicModels 71 | 72 | researcher = Agent( 73 | role="GitHub researcher", 74 | goal="find relevant Python agent repositories with open issues", 75 | attributes="analytical, detail-oriented, able to assess repository relevance and popularity", 76 | llm=[AnthropicModels.sonnet_3_5, AnthropicModels.haiku_3_5], 77 | tools={GitHubTools.search_repositories, GitHubTools.get_repo_details} 78 | ) 79 | ``` 80 | 81 | ### Using Custom Models 82 | 83 | Orchestra provides flexibility to use custom or unsupported models through the `custom_model` method available for each provider. Here's how you can use it: 84 | 85 | ```python 86 | # OpenAI custom model 87 | llm = OpenaiModels.custom_model("gpt-5") 88 | 89 | # Anthropic custom model 90 | llm = AnthropicModels.custom_model("claude-3-opus-20240229") 91 | 92 | # OpenRouter custom model 93 | llm = OpenrouterModels.custom_model("meta-llama/llama-3-70b-instruct") 94 | 95 | # Ollama custom model 96 | llm = OllamaModels.custom_model("llama3") 97 | 98 | ``` 99 | 100 | This approach allows you to use models that may not have pre-built functions in Orchestra, or to easily switch between different versions or fine-tuned variants of models. Remember to ensure that you have the necessary API access and credentials for the custom model you're trying to use. 101 | 102 | ### Customizing OpenAI Base URL 103 | 104 | Orchestra provides flexibility to customize the OpenAI base URL, allowing you to connect to OpenAI-compatible APIs or proxies. This is particularly useful for: 105 | 106 | - Using Azure OpenAI endpoints 107 | - Connecting to local deployments of OpenAI-compatible models 108 | - Using proxy services that implement the OpenAI API 109 | - Working with custom OpenAI-compatible endpoints 110 | 111 | There are three ways to customize the OpenAI base URL: 112 | 113 | #### 1. Using Environment Variables 114 | 115 | Set the `OPENAI_BASE_URL` environment variable before running your application: 116 | 117 | ```python 118 | import os 119 | os.environ["OPENAI_BASE_URL"] = "https://your-custom-endpoint.com/v1" 120 | 121 | # Now all OpenAI requests will use the custom base URL 122 | from mainframe_orchestra import Agent, Task, OpenaiModels 123 | ``` 124 | 125 | #### 2. Setting a Global Base URL 126 | 127 | Use the `set_base_url` class method to set a default base URL for all OpenAI requests: 128 | 129 | ```python 130 | from mainframe_orchestra.llm import OpenaiModels 131 | 132 | # Set a global base URL for all OpenAI requests 133 | OpenaiModels.set_base_url("https://your-custom-endpoint.com/v1") 134 | 135 | # All subsequent requests will use this base URL 136 | response, error = await OpenaiModels.gpt_4o(messages=[{"role": "user", "content": "Hello"}]) 137 | ``` 138 | 139 | #### 3. Per-Request Base URL 140 | 141 | Specify a custom base URL for a specific request: 142 | 143 | ```python 144 | from mainframe_orchestra.llm import OpenaiModels 145 | 146 | # Use a custom base URL for this specific request only 147 | response, error = await OpenaiModels.gpt_4o( 148 | messages=[{"role": "user", "content": "Hello"}], 149 | base_url="https://your-custom-endpoint.com/v1" 150 | ) 151 | ``` 152 | 153 | This flexibility allows you to easily switch between different OpenAI-compatible endpoints based on your specific needs, without changing your code structure. 154 | 155 | ### Conclusion 156 | 157 | The Orchestra framework provides a robust and flexible approach to integrating a wide range of language models. By allowing language model selection at the task level and providing a consistent interface across different providers, Orchestra empowers developers to optimize their AI workflows for specific use cases while maintaining code simplicity and reusability. 158 | 159 | -------------------------------------------------------------------------------- /docs/src/multi-agent-teams.md: -------------------------------------------------------------------------------- 1 | # Multi-Agent Teams 2 | 3 | Mainframe-Orchestra enables the creation and orchestration of multi-agent teams to tackle complex tasks. This section explores the Agent and Task classes, their interaction, and how to design effective multi-agent workflows. 4 | 5 | ### Multi-Agent Workflows 6 | 7 | Here's an example of a multi-agent flow in Mainframe-Orchestra: 8 | 9 | ```python 10 | from mainframe_orchestra import Agent, Task, WebTools, WikipediaTools, AmadeusTools, OpenrouterModels, set_verbosity 11 | from datetime import datetime 12 | 13 | set_verbosity(1) 14 | 15 | web_research_agent = Agent( 16 | role="web research agent", 17 | goal="search the web thoroughly for travel information", 18 | attributes="hardworking, diligent, thorough, comphrehensive.", 19 | llm=OpenrouterModels.gpt_4o, 20 | tools=[WebTools.serper_search, WikipediaTools.search_articles, WikipediaTools.search_images] 21 | ) 22 | 23 | travel_agent = Agent( 24 | role="travel agent", 25 | goal="assist the traveller with their request", 26 | attributes="frindly, hardworking, and comprehensive and extensive in reporting back to users", 27 | llm=OpenrouterModels.gpt_4o, 28 | tools=[AmadeusTools.search_flights, WebTools.serper_search, WebTools.get_weather_data] 29 | ) 30 | 31 | # Define the taskflow 32 | 33 | def research_destination(destination, interests): 34 | destination_report = Task.create( 35 | agent=web_research_agent, 36 | context=f"User Destination: {destination}\nUser Interests: {interests}", 37 | instruction=f"Use your tools to search relevant information about the given destination: {destination}. Use wikipedia tools to search the destination's wikipedia page, as well as images of the destination. In your final answer you should write a comprehensive report about the destination with images embedded in markdown." 38 | ) 39 | return destination_report 40 | 41 | def research_events(destination, dates, interests): 42 | events_report = Task.create( 43 | agent=web_research_agent, 44 | context=f"User's intended destination: {destination}\n\nUser's intended dates of travel: {dates}\nUser Interests: {interests}", 45 | instruction="Use your tools to research events in the given location for the given date span. Ensure your report is a comprehensive report on events in the area for that time period." 46 | ) 47 | return events_report 48 | 49 | def research_weather(destination, dates): 50 | current_date = datetime.now().strftime("%Y-%m-%d") 51 | weather_report = Task.create( 52 | agent=travel_agent, 53 | context=f"Location: {destination}\nDates: {dates}\n(Current Date: {current_date})", 54 | instruction="Use your weather tool to search for weather information in the given dates and write a report on the weather for those dates. Do not be concerned about dates in the future; ** IF dates are more than 10 days away, user web search instead of weather tool. If the dates are within 10 days, use the weather tool. ** Always search for weather information regardless of the date you think it is." 55 | ) 56 | return weather_report 57 | 58 | def search_flights(current_location, destination, dates): 59 | flight_report = Task.create( 60 | agent=travel_agent, 61 | context=f"Current Location: {current_location}\n\nDestination: {destination}\nDate Range: {dates}", 62 | instruction=f"Search for a lot of flights in the given date range to collect a bunch of options and return a report on the best options in your opinion, based on convenience and lowest price." 63 | ) 64 | return flight_report 65 | 66 | def write_travel_report(destination_report, events_report, weather_report, flight_report): 67 | travel_report = Task.create( 68 | agent=travel_agent, 69 | context=f"Destination Report: {destination_report}\n--------\n\nEvents Report: {events_report}\n--------\n\nWeather Report: {weather_report}\n--------\n\nFlight Report: {flight_report}", 70 | instruction=f"Write a comprehensive travel plan and report given the information above. Ensure your report conveys all the detail in the given information, from flight options, to weather, to events, and image urls, etc. Preserve detail and write your report in extensive length." 71 | ) 72 | return travel_report 73 | 74 | def main(): 75 | current_location = input("Enter current location: ") 76 | destination = input("Enter destination: ") 77 | dates = input("Enter dates: ") 78 | interests = input("Enter interests: ") 79 | 80 | destination_report = research_destination(destination, interests) 81 | print(destination_report) 82 | events_report = research_events(destination, dates, interests) 83 | print(events_report) 84 | weather_report = research_weather(destination, dates) 85 | print(weather_report) 86 | flight_report = search_flights(current_location, destination, dates) 87 | print(flight_report) 88 | travel_report = write_travel_report(destination_report, events_report, weather_report, flight_report) 89 | print(travel_report) 90 | 91 | if __name__ == "__main__": 92 | main() 93 | ``` 94 | 95 | Each agent is equipped with specific tools from the WebTools, WikipediaTools, and AmadeusTools modules, allowing them to interact with the web, Wikipedia, and Amadeus effectively. The agents work in sequence, each building upon the work of the previous one, creating a sequential workflow. 96 | 97 | Multi-agent workflows offer several benefits. They allow for specialization, with each agent focusing on specific tasks. The workflow becomes modular, divided into smaller, manageable parts. These workflows can scale to handle complex tasks by distributing work among agents. They're also flexible, allowing easy modification to meet changing needs. 98 | 99 | ### Workflow Design 100 | 101 | When designing multi-agent workflows, consider the following steps: 102 | 103 | - Break down the overall problem into distinct subtasks. 104 | - Create specialized agents for each subtask, defining their roles, goals, and tools. 105 | - Design a sequence of tasks that pass information between agents. 106 | - Use the Task.create() method to execute tasks for each agent in the workflow. 107 | - Carefully curate tool sets for each agent, providing only the tools necessary for their specific tasks. 108 | - Use descriptive attributes for agents to guide their behavior and decision-making process. 109 | - Ensure smooth information flow between tasks by structuring your workflow carefully. 110 | - Monitor and analyze the performance of your multi-agent teams to identify areas for improvement. 111 | 112 | -------------------------------------------------------------------------------- /docs/src/observability.md: -------------------------------------------------------------------------------- 1 | # Observability 2 | 3 | ### Braintrust Integration 4 | 5 | ##### How do I enable or disable Braintrust integration? 6 | 7 | Orchestra has built-in support for [Braintrust](https://www.braintrust.dev/), which provides observability and evaluation for LLM applications. 8 | 9 | To enable Braintrust integration: 10 | 1. Install the `braintrust` package: `pip install braintrust` 11 | 2. Set your Braintrust API key as an environment variable: 12 | ``` 13 | export BRAINTRUST_API_KEY=your_api_key 14 | ``` 15 | 3. By default, Braintrust is automatically enabled when the API key is present 16 | 17 | To explicitly enable Braintrust integration: 18 | ``` 19 | export BRAINTRUST_ORCHESTRA_ENABLED=true 20 | ``` 21 | 22 | To disable Braintrust integration inside Orchestra: 23 | ``` 24 | export BRAINTRUST_ORCHESTRA_ENABLED=false 25 | ``` 26 | 27 | You can also use `0` or `no` as values to disable it, or `1` or `yes` to enable it. Braintrust integration is automatically enabled when: 28 | 1. The `braintrust` package is installed 29 | 2. The `BRAINTRUST_API_KEY` environment variable is set with a valid API key 30 | 3. The `BRAINTRUST_ORCHESTRA_ENABLED` environment variable is either: 31 | - Set to enable it (`true`, `1`, or `yes`) 32 | - Not set at all (defaults to enabled when API key exists) 33 | 34 | When enabled, Braintrust will trace all tool calls and requests to OpenAI, OpenRouter, Groq, Together AI, and Deepseek, providing detailed logs and analytics in your Braintrust dashboard. -------------------------------------------------------------------------------- /docs/src/tools/amadeus_tools.md: -------------------------------------------------------------------------------- 1 | # Amadeus Tools 2 | 3 | The AmadeusTools class provides a comprehensive set of methods to interact with the Amadeus API for flight-related operations. It offers powerful functionality for searching flights, finding the cheapest travel dates, and getting flight inspiration. This class is designed to simplify the process of working with the Amadeus API, handling authentication and request formatting internally. 4 | 5 | ### Class Methods 6 | 7 | ##### _get_access_token() 8 | 9 | This private method retrieves the Amadeus API access token using the API key and secret stored in environment variables. It's used internally by other methods to authenticate API requests. 10 | 11 | ##### search_flights() 12 | 13 | Searches for flight offers using the Amadeus API. This method provides extensive flexibility in search parameters, allowing users to specify origin, destination, dates, number of travelers (including adults, children, and infants), travel class, non-stop preferences, currency, maximum price, and the number of results to return. 14 | 15 | ```python 16 | AmadeusTools.search_flights( 17 | origin="NYC", 18 | destination="LON", 19 | departure_date="2023-07-01", 20 | return_date="2023-07-15", 21 | adults=2, 22 | children=1, 23 | infants=0, 24 | travel_class="ECONOMY", 25 | non_stop=False, 26 | currency="USD", 27 | max_price=1000, 28 | max_results=10 29 | ) 30 | ``` 31 | 32 | ##### get_cheapest_date() 33 | 34 | Finds the cheapest travel dates for a given route using the Flight Offers Search API. This method is particularly useful for flexible travel planning, allowing users to identify the most cost-effective dates for their journey. 35 | 36 | ```python 37 | AmadeusTools.get_cheapest_date( 38 | origin="NYC", 39 | destination="PAR", 40 | departure_date="2023-08-01", 41 | return_date="2023-08-15", 42 | adults=2 43 | ) 44 | ``` 45 | 46 | ##### get_flight_inspiration() 47 | 48 | Retrieves flight inspiration using the Flight Inspiration Search API. This method is ideal for travelers who are open to various destinations, as it suggests travel options based on the origin city and optional price constraints. It's a powerful tool for discovering new travel opportunities and planning budget-friendly trips. 49 | 50 | ```python 51 | AmadeusTools.get_flight_inspiration( 52 | origin="NYC", 53 | max_price=500, 54 | currency="USD" 55 | ) 56 | ``` 57 | 58 | Here's an example of how you might create a travel agent using these tools: 59 | 60 | ```python 61 | travel_agent = Agent( 62 | role="Travel Planner", 63 | goal="Plan the most cost-effective and enjoyable trips for clients", 64 | attributes="Knowledgeable about global destinations, budget-conscious, detail-oriented", 65 | tools={AmadeusTools.search_flights, AmadeusTools.get_flight_inspiration}, 66 | llm=OpenrouterModels.haiku 67 | ) 68 | ``` 69 | 70 | This travel agent can leverage the Amadeus tools to search for flights and get inspiration for travel destinations, making it a powerful assistant for travel planning tasks. 71 | 72 | ### Usage Notes 73 | 74 | To use the AmadeusTools class, you must set the AMADEUS_API_KEY and AMADEUS_API_SECRET environment variables. These credentials are essential for authenticating with the Amadeus API and are securely managed by the class. 75 | 76 | The class methods handle API authentication internally, abstracting away the complexity of token management. This allows developers to focus on making API calls and processing the returned data without worrying about the underlying authentication mechanism. 77 | 78 | All methods in the AmadeusTools class return data in the form of Python dictionaries, making it easy to work with the results in your application. The structure of the returned data closely mirrors the JSON responses from the Amadeus API, ensuring that you have access to all the details provided by the API. 79 | 80 | Error handling is built into these methods, with HTTP errors being caught and re-raised with additional context. This helps in debugging and handling potential issues that may arise during API interactions. These returned errors not only assist developers in debugging but also enable agents to self-correct, enhancing the robustness of the system. 81 | 82 | -------------------------------------------------------------------------------- /docs/src/tools/audio_tools.md: -------------------------------------------------------------------------------- 1 | # Audio Tools 2 | 3 | The Audio Tools module provides two main classes: `TextToSpeechTools` and `WhisperTools`. These classes offer functionality for text-to-speech conversion and audio transcription/translation. They are designed to simplify the process of working with audio in your Orchestra applications, handling API interactions and audio processing internally. 4 | 5 | ## TextToSpeechTools 6 | 7 | The `TextToSpeechTools` class provides methods for converting text to speech using either the ElevenLabs API or the OpenAI API. 8 | 9 | ### Class Methods 10 | 11 | #### elevenlabs_text_to_speech() 12 | 13 | Converts text to speech using the ElevenLabs API and either plays the generated audio or saves it to a file. 14 | 15 | ```python 16 | TextToSpeechTools.elevenlabs_text_to_speech( 17 | text="Hello, world!", 18 | voice="Giovanni", 19 | output_file="output.mp3" 20 | ) 21 | ``` 22 | 23 | #### openai_text_to_speech() 24 | 25 | Generates speech from text using the OpenAI API and either saves it to a file or plays it aloud. 26 | 27 | ```python 28 | TextToSpeechTools.openai_text_to_speech( 29 | text="Hello, world!", 30 | voice="onyx", 31 | output_file="output.mp3" 32 | ) 33 | ``` 34 | 35 | ## WhisperTools 36 | 37 | The `WhisperTools` class provides methods for transcribing and translating audio using the OpenAI Whisper API. 38 | 39 | ### Class Methods 40 | 41 | #### whisper_transcribe_audio() 42 | 43 | Transcribes audio using the OpenAI Whisper API. 44 | 45 | ```python 46 | WhisperTools.whisper_transcribe_audio( 47 | audio_input="audio.mp3", 48 | model="whisper-1", 49 | language="en", 50 | response_format="json", 51 | temperature=0 52 | ) 53 | ``` 54 | 55 | #### whisper_translate_audio() 56 | 57 | Translates audio using the OpenAI Whisper API. 58 | 59 | ```python 60 | WhisperTools.whisper_translate_audio( 61 | audio_input="audio.mp3", 62 | model="whisper-1", 63 | response_format="json", 64 | ) 65 | ``` 66 | 67 | ## Usage in Orchestra 68 | 69 | These audio tools can be integrated into your Orchestra agents to enable them to self determine when to speak: 70 | 71 | ```python 72 | voice_agent = Agent( 73 | role="Voice Assistant", 74 | goal="Assist users through voice interaction", 75 | attributes="Clear speaking voice, good listener, multilingual", 76 | tools={ 77 | TextToSpeechTools.openai_text_to_speech 78 | }, 79 | llm=OpenrouterModels.haiku 80 | ) 81 | ``` 82 | 83 | This voice agent can use the audio tools to respond with generated speech. The TextToSpeechTools can be used to give agents the ability to initiate speech output at their discretion. 84 | 85 | ## Direct Usage 86 | 87 | You can also use these tools in your main flow to handle audio inputs and outputs from agents and tasks: 88 | 89 | ```python 90 | def main(): 91 | conversation_history = [] 92 | temp_dir = tempfile.mkdtemp() 93 | audio_file = os.path.join(temp_dir, "recorded_audio.wav") 94 | 95 | print("Enter 'q' to quit.") 96 | 97 | while True: 98 | user_input = input("Press Enter to start recording (or 'q' to quit): ").lower() 99 | if user_input == 'q': 100 | print("Exiting...") 101 | break 102 | 103 | # Record user input via microphone upon 'enter' 104 | RecordingTools.record_audio(audio_file) 105 | 106 | # Transcribe the audio 107 | transcription = WhisperTools.whisper_transcribe_audio(audio_file) 108 | 109 | # Collect the text from the transcription 110 | user_message = transcription['text'] if isinstance(transcription, dict) else transcription 111 | 112 | # Add the user's message to the conversation history 113 | conversation_history.append(f"User: {user_message}") 114 | 115 | # Agent acts and responds to the user 116 | response = respond_task(user_message, conversation_history) 117 | conversation_history.append(f"Assistant: {response}") 118 | print("Assistant's response:") 119 | print(response) 120 | 121 | # Read the agent response out loud 122 | TextToSpeechTools.elevenlabs_text_to_speech(text=response) 123 | 124 | # Clean up the temporary directory 125 | os.rmdir(temp_dir) 126 | 127 | if __name__ == "__main__": 128 | main() 129 | ``` 130 | 131 | ## Usage Notes 132 | 133 | - To use the `TextToSpeechTools` and `WhisperTools` classes, you must set the appropriate API keys in your environment variables (ELEVENLABS_API_KEY and OPENAI_API_KEY). 134 | - The `elevenlabs_text_to_speech` method requires the `elevenlabs` library to be installed. If not present, it will raise an ImportError with instructions to install. 135 | - The `openai_text_to_speech` method uses `pygame` for audio playback. Ensure it's installed in your environment. 136 | - All methods in these classes handle API authentication internally, abstracting away the complexity of token management. 137 | - Error handling is built into these methods, with specific exceptions raised for common issues like missing API keys or module import errors. 138 | 139 | By incorporating these audio tools into your Orchestra agents, you can get started with creating sophisticated voice-enabled applications and multi-modal AI assistants capable of processing and generating both text and speech. -------------------------------------------------------------------------------- /docs/src/tools/calculator_tools.md: -------------------------------------------------------------------------------- 1 | # Calculator Tools 2 | 3 | The CalculatorTools class provides a set of methods for performing basic and advanced math operations, date manipulation, and utility functions. It allows users to perform calculations, format dates, and retrieve the current time. 4 | 5 | ### Class Methods 6 | 7 | ##### basic_math() 8 | 9 | Performs basic and advanced math operations on multiple numbers. It takes an operation string and a list of numbers as arguments and returns the result of the operation. 10 | 11 | ```python 12 | CalculatorTools.basic_math( 13 | operation="add", 14 | args=[5, 3, 2] 15 | ) 16 | ``` 17 | 18 | ##### get_current_time() 19 | 20 | Retrieves the current UTC time in the format 'YYYY-MM-DD HH:MM:SS'. 21 | 22 | ```python 23 | CalculatorTools.get_current_time() 24 | ``` 25 | 26 | ##### add_days() 27 | 28 | Adds a specified number of days to a given date and returns the resulting date in 'YYYY-MM-DD' format. 29 | 30 | ```python 31 | CalculatorTools.add_days( 32 | date_str="2023-05-15", 33 | days=7 34 | ) 35 | ``` 36 | 37 | ##### days_between() 38 | 39 | Calculates the number of days between two dates provided in 'YYYY-MM-DD' format. 40 | 41 | ```python 42 | CalculatorTools.days_between( 43 | date1_str="2023-05-01", 44 | date2_str="2023-05-15" 45 | ) 46 | ``` 47 | 48 | ##### format_date() 49 | 50 | Converts a date string from one format to another. It takes the date string, input format, and desired output format as arguments. 51 | 52 | ```python 53 | CalculatorTools.format_date( 54 | date_str="2023-05-15", 55 | input_format="%Y-%m-%d", 56 | output_format="%B %d, %Y" 57 | ) 58 | ``` 59 | 60 | ### Usage Notes 61 | 62 | The basic_math() method supports various math operations, including 'add', 'subtract', 'multiply', 'divide', 'exponent', 'root', 'modulo', and 'factorial'. It requires at least one number for the operation, and some operations may require additional numbers. 63 | 64 | The date-related methods (add_days(), days_between(), format_date()) expect dates to be provided in the 'YYYY-MM-DD' format by default. The format_date() method allows you to specify custom input and output formats. 65 | 66 | The get_current_time() method returns the current UTC time as a string in the format 'YYYY-MM-DD HH:MM:SS'. 67 | 68 | -------------------------------------------------------------------------------- /docs/src/tools/conversation_tools.md: -------------------------------------------------------------------------------- 1 | # Conversation Tools 2 | 3 | The ConversationTools class primarily facilitates human-in-the-loop processes within the Orchestra framework. It provides utility methods that enable human interaction, enhancing Orchestra's capabilities for creating aligned and responsive AI systems. These tools can be used whenever you need to incorporate human feedback, guidance, or oversight into your task-based workflows. 4 | 5 | ### Class Methods 6 | 7 | ##### ask_user(question: str) -> str 8 | 9 | Facilitates human-in-the-loop processes by allowing agents to request input from human users. This method is crucial for maintaining alignment in AI systems and creating effective AI assistants that can work alongside humans, seeking clarification or guidance when needed. 10 | 11 | ```python 12 | user_input = ConversationTools.ask_user("What specific criteria should I consider for this task?") 13 | print(f"User's criteria: {user_input}") 14 | ``` 15 | 16 | ### Usage in Tasks 17 | 18 | Here's an example of how you might use ConversationTools to create a human-aligned agent system for task execution: 19 | 20 | ```python 21 | from mainframe_orchestra import Agent, Task, OpenrouterModels, set_verbosity, ConversationTools, WebTools 22 | 23 | set_verbosity(1) 24 | 25 | research_assistant = Agent( 26 | role="Research Assistant", 27 | goal="Provide accurate and relevant information based on user requirements", 28 | attributes="Adaptable, detail-oriented, and responsive to user feedback", 29 | tools={WebTools.exa_search, ConversationTools.ask_user}, 30 | llm=OpenrouterModels.haiku 31 | ) 32 | 33 | def research_task(topic: str): 34 | initial_query = ConversationTools.ask_user(f"What specific aspects of {topic} should I focus on?") 35 | research_results = WebTools.exa_search(f"{topic} {initial_query}") 36 | 37 | feedback = ConversationTools.ask_user("Is this information sufficient, or should I refine my search?") 38 | if "refine" in feedback.lower(): 39 | refined_query = ConversationTools.ask_user("How should I adjust my search?") 40 | research_results += WebTools.exa_search(refined_query) 41 | 42 | return research_results 43 | 44 | Task(research_task, args=["artificial intelligence"]) 45 | ``` 46 | 47 | ### Key Concepts 48 | 49 | 1. **Human-in-the-Loop**: The `ask_user` method allows for seamless integration of human input, ensuring AI systems remain aligned with human preferences and values during task execution. 50 | 51 | 2. **Alignment and Course Correction**: By incorporating human feedback at key points, agents can adjust their approach and ensure they're meeting user expectations. 52 | 53 | 3. **Adaptive Task Execution**: The ability to seek clarification or additional guidance allows for more flexible and responsive task workflows. 54 | 55 | ### Usage Notes 56 | 57 | 1. Use human input strategically to maintain a balance between automation and human oversight in your task workflows. 58 | 2. Design your tasks with clear points for potential human intervention, allowing for course correction and alignment checks. 59 | 3. Consider using human input for critical decision points, validation of results, or when facing ambiguity in task requirements. 60 | 61 | By leveraging these tools, Orchestra can support the creation of AI systems that are not only efficient but also closely aligned with human intent and values. This approach ensures that AI assistants remain helpful, responsive, and adaptable to user needs throughout the task execution process. -------------------------------------------------------------------------------- /docs/src/tools/embeddings_tools.md: -------------------------------------------------------------------------------- 1 | # Embeddings Tools 2 | 3 | The EmbeddingsTools class provides a set of methods for generating embeddings using various AI platforms, including OpenAI, Cohere, and Mistral AI. It allows users to easily generate vector representations of text data for use in natural language processing tasks such as semantic search, clustering, and classification. 4 | 5 | ### Class Methods 6 | 7 | ##### get_openai_embeddings() 8 | 9 | Generates embeddings for the given input text using OpenAI's API. It takes the input text (either a single string or a list of strings) and the desired model as arguments, and returns a tuple containing the embeddings and the number of dimensions for the chosen model. 10 | 11 | ```python 12 | EmbeddingsTools.get_openai_embeddings( 13 | input_text="This is a sample text.", 14 | model="text-embedding-ada-002" 15 | ) 16 | ``` 17 | 18 | ##### get_cohere_embeddings() 19 | 20 | Generates embeddings for the given input text using Cohere's API. It takes the input text (either a single string or a list of strings), the desired model, and the input type as arguments, and returns a tuple containing the embeddings and the number of dimensions for the chosen model. 21 | 22 | ```python 23 | EmbeddingsTools.get_cohere_embeddings( 24 | input_text=["Text 1", "Text 2"], 25 | model="embed-english-v3.0", 26 | input_type="search_document" 27 | ) 28 | ``` 29 | 30 | ##### get_mistral_embeddings() 31 | 32 | Generates embeddings for the given input text using Mistral AI's API. It takes the input text (either a single string or a list of strings) and the desired model as arguments, and returns a tuple containing the embeddings and the number of dimensions for the chosen model. 33 | 34 | ```python 35 | EmbeddingsTools.get_mistral_embeddings( 36 | input_text=["Text 1", "Text 2"], 37 | model="mistral-embed" 38 | ) 39 | ``` 40 | 41 | ### Usage Notes 42 | 43 | To use the EmbeddingsTools class, you need to have valid API keys for the respective AI platforms. The API keys should be set as environment variables: 44 | 45 | The class methods automatically handle the API requests and return the embeddings along with the number of dimensions for the chosen model. The input text can be provided as a single string or a list of strings, allowing you to generate embeddings for multiple texts in a single call. 46 | 47 | The available models for each platform are defined in the MODEL_DIMENSIONS dictionary within the EmbeddingsTools class. You can choose the desired model by passing its name as the model argument to the respective method. 48 | 49 | The generated embeddings are returned as a list of lists, where each inner list represents the embedding vector for a single input text. The number of dimensions for the chosen model is also returned as part of the tuple. 50 | 51 | In case of any errors during the API requests, the methods will raise appropriate exceptions with detailed error messages. Make sure to handle these exceptions in your code and provide appropriate error handling and logging mechanisms. 52 | 53 | It's important to note that generating embeddings can be computationally expensive, especially for large datasets. Consider the cost implications and rate limits of the respective AI platforms when using these methods in your applications. 54 | 55 | The EmbeddingsTools class provides a convenient way to generate embeddings from different AI platforms using a consistent interface. You can easily switch between platforms by calling the appropriate method and providing the necessary arguments. 56 | 57 | Remember to keep your API keys secure and avoid sharing them publicly. It's recommended to store them as environment variables or in a secure configuration file. 58 | 59 | -------------------------------------------------------------------------------- /docs/src/tools/faiss_tools.md: -------------------------------------------------------------------------------- 1 | # Local Vector Storage 2 | 3 | The FAISSTools class provides a comprehensive set of methods to interact with Facebook AI Similarity Search (FAISS) for efficient similarity search and clustering of dense vectors. It offers powerful functionality for creating, managing, and querying FAISS indexes, which are particularly useful for tasks involving large-scale vector similarity searches, such as semantic search, recommendation systems, and more. 4 | 5 | ### Class Methods 6 | 7 | ##### __init__(dimension: int, metric: str = "IP") 8 | 9 | Initializes the FAISSTools with the specified vector dimension and distance metric. The default metric is "IP" (Inner Product). 10 | 11 | ```python 12 | faiss_tools = FAISSTools(dimension=768, metric="IP") 13 | ``` 14 | 15 | Note: This method includes error handling for importing the `faiss` library. If `faiss` is not installed, it will raise an `ImportError` with instructions on how to install it. 16 | 17 | ##### create_index(index_type: str = "Flat") 18 | 19 | Creates a new FAISS index of the specified type. Currently supports "Flat" index type with both "IP" (Inner Product) and "L2" (Euclidean) metrics. 20 | 21 | ```python 22 | faiss_tools.create_index(index_type="Flat") 23 | ``` 24 | 25 | ##### load_index(index_path: str) 26 | 27 | Loads a FAISS index and its associated metadata from files. The method automatically appends the `.faiss` extension for the index file and `.metadata` for the metadata file. 28 | 29 | ```python 30 | faiss_tools.load_index("/path/to/product_embeddings") 31 | ``` 32 | 33 | ##### save_index(index_path: str) 34 | 35 | Saves the current FAISS index and its metadata to files. The method automatically appends the `.faiss` extension for the index file and `.metadata` for the metadata file. 36 | 37 | ```python 38 | faiss_tools.save_index("/path/to/save/product_embeddings") 39 | ``` 40 | 41 | ##### add_vectors(vectors: np.ndarray) 42 | 43 | Adds vectors to the FAISS index. Automatically normalizes vectors if using Inner Product similarity. 44 | 45 | ```python 46 | vectors = np.random.rand(100, 768) # 100 vectors of dimension 768 47 | faiss_tools.add_vectors(vectors) 48 | ``` 49 | 50 | ##### search_vectors(query_vectors: np.ndarray, top_k: int = 10) 51 | 52 | Searches for similar vectors in the FAISS index, returning the top-k results. 53 | 54 | ```python 55 | query = np.random.rand(1, 768) # 1 query vector of dimension 768 56 | distances, indices = faiss_tools.search_vectors(query, top_k=5) 57 | ``` 58 | 59 | ##### remove_vectors(ids: np.ndarray) 60 | 61 | Removes vectors from the FAISS index by their IDs. 62 | 63 | ```python 64 | ids_to_remove = np.array([1, 3, 5]) 65 | faiss_tools.remove_vectors(ids_to_remove) 66 | ``` 67 | 68 | ##### get_vector_count() 69 | 70 | Returns the number of vectors in the FAISS index. 71 | 72 | ```python 73 | count = faiss_tools.get_vector_count() 74 | ``` 75 | 76 | ##### set_metadata(key: str, value: Any) 77 | 78 | Sets metadata for the index. 79 | 80 | ```python 81 | faiss_tools.set_metadata("description", "Product embeddings index") 82 | ``` 83 | 84 | ##### get_metadata(key: str) 85 | 86 | Retrieves metadata from the index. 87 | 88 | ```python 89 | description = faiss_tools.get_metadata("description") 90 | ``` 91 | 92 | ##### set_embedding_info(provider: str, model: str) 93 | 94 | Sets the embedding provider and model information. 95 | 96 | ```python 97 | faiss_tools.set_embedding_info("openai", "text-embedding-ada-002") 98 | ``` 99 | 100 | ##### normalize_vector(vector: np.ndarray) 101 | 102 | A static method that normalizes a vector to unit length. This is used internally for "IP" metric calculations. 103 | 104 | ```python 105 | normalized_vector = FAISSTools.normalize_vector(vector) 106 | ``` 107 | 108 | ### Understanding Index File Naming 109 | 110 | When working with FAISS indexes using the FAISSTools class, the index files are saved with specific extensions. The main index file is saved with a `.faiss` extension, while the associated metadata is saved with a `.metadata` extension. 111 | 112 | When using `save_index()` or `load_index()`, you should provide the base filename without any extension. The method will automatically append the correct extensions when saving or loading files. 113 | 114 | For example, if you save an index as "product_embeddings": 115 | - The main index file will be saved as "product_embeddings.faiss" 116 | - The metadata file will be saved as "product_embeddings.metadata" 117 | 118 | To load this index later, you would use: 119 | 120 | ```python 121 | faiss_tools.load_index("/path/to/product_embeddings") 122 | ``` 123 | 124 | The method will automatically look for "product_embeddings.faiss" and "product_embeddings.metadata" files. 125 | 126 | ### Usage Notes 127 | 128 | To use the FAISSTools class, you need to have FAISS installed. You can install it using pip: 129 | 130 | ```bash 131 | pip install faiss-cpu # for CPU-only version 132 | # or 133 | pip install faiss-gpu # for GPU support 134 | ``` 135 | 136 | The FAISSTools class is designed to work with numpy arrays for vector operations. Make sure you have numpy installed and imported in your project. 137 | 138 | When using Inner Product (IP) similarity, vectors are automatically normalized to unit length before being added to the index or used for querying. This ensures consistent similarity calculations. 139 | 140 | Error handling is built into these methods, with appropriate exceptions being raised for common issues such as dimension mismatches or file not found errors. 141 | 142 | Here's an example of how you might create a semantic search agent using these tools: 143 | 144 | ```python 145 | semantic_search_agent = Agent( 146 | role="Semantic Search Expert", 147 | goal="Perform efficient and accurate semantic searches on large datasets", 148 | attributes="Knowledgeable about vector embeddings and similarity search algorithms", 149 | tools={FAISSTools.quer}, 150 | llm=OpenrouterModels.haiku 151 | ) 152 | 153 | def semantic_search_task(agent, query, index_path): 154 | return Task.create( 155 | agent=agent, 156 | context=f"FAISS index path: {index_path}\nQuery: {query}", 157 | instruction="Load the FAISS index, perform a semantic search for the given query, and return the top 5 most similar results." 158 | ) 159 | 160 | # Usage 161 | index_path = "/path/to/your/specific_index_name" 162 | query = "Example search query" 163 | results = semantic_search_task(semantic_search_agent, query, index_path) 164 | print(results) 165 | ``` 166 | 167 | This semantic search agent can leverage the FAISSTools to load pre-built indexes, perform similarity searches, and return relevant results based on vector embeddings. 168 | 169 | ### Best Practices 170 | 171 | 1. **Index Creation**: Choose the appropriate index type based on your dataset size and performance requirements. The "Flat" index is suitable for small to medium-sized datasets, but for larger datasets, consider using more advanced index types provided by FAISS. 172 | 173 | 2. **Vector Normalization**: When using Inner Product similarity, vectors are automatically normalized. However, if you're using L2 distance, consider normalizing your vectors before adding them to the index for consistent results. 174 | 175 | 3. **Metadata Management**: Use the metadata functionality to store important information about your index, such as the embedding model used, dataset description, or any other relevant details. 176 | 177 | 4. **Error Handling**: Always handle potential exceptions, especially when loading indexes or performing searches with user-provided queries. 178 | 179 | 5. **Performance Optimization**: For large-scale applications, consider using GPU-enabled FAISS and experiment with different index types and parameters to optimize performance. 180 | 181 | 6. **Index Persistence**: Regularly save your index to disk, especially after adding or removing vectors, to ensure data persistence and quick recovery in case of system failures. 182 | 183 | By following these practices and leveraging the full capabilities of the FAISSTools class, you can build powerful and efficient similarity search systems within your Orchestra projects. -------------------------------------------------------------------------------- /docs/src/tools/file_tools.md: -------------------------------------------------------------------------------- 1 | # File Tools 2 | 3 | The FileTools class provides a set of methods for performing various file-related operations, including saving code to files, generating directory trees, reading file contents, and searching within files of different formats (CSV, JSON, XML, YAML). 4 | 5 | ### Class Methods 6 | 7 | ##### save_code_to_file() 8 | 9 | Saves the given code to a file at the specified path. It creates the necessary directories if they don't exist and handles potential errors that may occur during the file saving process. 10 | 11 | ```python 12 | FileTools.save_code_to_file( 13 | code="print('Hello, World!')", 14 | file_path="path/to/file.py" 15 | ) 16 | ``` 17 | 18 | ##### generate_directory_tree() 19 | 20 | Recursively generates a file structure dictionary for the given base path. It traverses the directory tree, ignoring specified files and directories, and returns a nested dictionary representing the file structure. Each directory is represented by a dict with 'name', 'type', and 'children' keys, and each file is represented by a dict with 'name', 'type', and 'contents' keys. 21 | 22 | ```python 23 | file_structure = FileTools.generate_directory_tree( 24 | base_path="path/to/directory", 25 | additional_ignore=[".git", "temp"] 26 | ) 27 | ``` 28 | 29 | ##### read_file_contents() 30 | 31 | Retrieves the contents of a file at the specified path. It attempts to read the file using UTF-8 encoding and falls back to ISO-8859-1 encoding if necessary. It returns the file contents as a string if successfully read, or None if an error occurs. 32 | 33 | ```python 34 | file_contents = FileTools.read_file_contents("path/to/file.txt") 35 | ``` 36 | 37 | ##### read_csv() 38 | 39 | Reads a CSV file and returns its contents as a list of dictionaries, where each dictionary represents a row in the CSV. It uses the csv module to parse the CSV file and handles potential errors that may occur during the file reading process. 40 | 41 | ```python 42 | csv_data = FileTools.read_csv("path/to/file.csv") 43 | ``` 44 | 45 | ##### read_json() 46 | 47 | Reads a JSON file and returns its contents as a dictionary or a list, depending on the structure of the JSON data. It uses the json module to parse the JSON file and handles potential errors that may occur during the file reading process. 48 | 49 | ```python 50 | json_data = FileTools.read_json("path/to/file.json") 51 | ``` 52 | 53 | ##### read_xml() 54 | 55 | Reads an XML file and returns its contents as an ElementTree object. It uses the xml.etree.ElementTree module to parse the XML file and handles potential errors that may occur during the file reading process. 56 | 57 | ```python 58 | xml_data = FileTools.read_xml("path/to/file.xml") 59 | ``` 60 | 61 | ##### read_yaml() 62 | 63 | Reads a YAML file and returns its contents as a dictionary or a list, depending on the structure of the YAML data. It uses the yaml module to parse the YAML file and handles potential errors that may occur during the file reading process. 64 | 65 | ```python 66 | yaml_data = FileTools.read_yaml("path/to/file.yaml") 67 | ``` 68 | 69 | ##### search_csv() 70 | 71 | Searches for a specific value in a CSV file and returns matching rows as a list of dictionaries. It uses the pandas library to read the CSV file and perform the search based on the specified column and value. 72 | 73 | ```python 74 | matching_rows = FileTools.search_csv( 75 | file_path="path/to/file.csv", 76 | search_column="name", 77 | search_value="John" 78 | ) 79 | ``` 80 | 81 | ##### search_json() 82 | 83 | Searches for a specific key-value pair in a JSON structure and returns matching items as a list. It recursively traverses the JSON data and appends matching items to the results list. 84 | 85 | ```python 86 | matching_items = FileTools.search_json( 87 | data=json_data, 88 | search_key="age", 89 | search_value=30 90 | ) 91 | ``` 92 | 93 | ##### search_xml() 94 | 95 | Searches for specific elements in an XML structure based on tag name, attribute name, and attribute value. It uses the xml.etree.ElementTree module to traverse the XML tree and find matching elements. 96 | 97 | ```python 98 | matching_elements = FileTools.search_xml( 99 | root=xml_data, 100 | tag="book", 101 | attribute="genre", 102 | value="fiction" 103 | ) 104 | ``` 105 | 106 | ##### search_yaml() 107 | 108 | Searches for a specific key-value pair in a YAML structure and returns matching items as a list. It reuses the search_json() method since YAML is parsed into Python data structures. 109 | 110 | ```python 111 | matching_items = FileTools.search_yaml( 112 | data=yaml_data, 113 | search_key="name", 114 | search_value="Alice" 115 | ) 116 | ``` 117 | 118 | ### Usage Notes 119 | 120 | The FileTools class provides a convenient way to perform various file-related operations in Python. It offers methods for saving code to files, generating directory trees, reading file contents, and searching within files of different formats. 121 | 122 | When using the save_code_to_file() method, ensure that you have the necessary write permissions for the specified file path. The method will create the necessary directories if they don't exist. 123 | 124 | The generate_directory_tree() method allows you to generate a nested dictionary representation of a directory structure. You can specify additional files or directories to ignore using the additional_ignore parameter. 125 | 126 | The read_file_contents() method attempts to read the file using UTF-8 encoding and falls back to ISO-8859-1 encoding if necessary. It returns the file contents as a string if successfully read, or None if an error occurs. 127 | 128 | The read_csv(), read_json(), read_xml(), and read_yaml() methods provide convenient ways to read files of different formats and return their contents as Python data structures. 129 | 130 | The search_csv(), search_json(), search_xml(), and search_yaml() methods allow you to search for specific values or elements within files of different formats. They return matching items as lists. 131 | 132 | When using the file reading and searching methods, ensure that the specified file paths are correct and that you have the necessary read permissions for those files. 133 | 134 | The FileTools class provides a set of static methods, which means you can directly call them using the class name without creating an instance of the class. 135 | 136 | Remember to handle potential errors and exceptions that may occur during file operations, such as file not found errors or permission errors. The methods in the FileTools class raise appropriate exceptions in case of errors, which you can catch and handle accordingly in your code. 137 | 138 | -------------------------------------------------------------------------------- /docs/src/tools/fred_tools.md: -------------------------------------------------------------------------------- 1 | # Fred Tools 2 | 3 | The FredTools class provides a set of methods to interact with the Federal Reserve Economic Data (FRED) API for economic data analysis. It offers powerful functionality for analyzing economic indicators, yield curves, and economic news sentiment. This class is designed to simplify the process of working with the FRED API, handling data retrieval and analysis internally. 4 | 5 | ### Class Methods 6 | 7 | ##### economic_indicator_analysis() 8 | 9 | Performs a comprehensive analysis of economic indicators using the FRED API. This method provides detailed statistics and trends for specified economic indicators over a given time period. 10 | 11 | ```python 12 | FredTools.economic_indicator_analysis( 13 | indicator_ids=["GDP", "UNRATE", "CPIAUCSL"], 14 | start_date="2020-01-01", 15 | end_date="2023-12-31" 16 | ) 17 | ``` 18 | 19 | ##### yield_curve_analysis() 20 | 21 | Analyzes the US Treasury yield curve using data from the FRED API. This method is particularly useful for assessing economic conditions and potential future trends based on the shape of the yield curve. 22 | 23 | ```python 24 | FredTools.yield_curve_analysis( 25 | treasury_maturities=["DGS1MO", "DGS3MO", "DGS6MO", "DGS1", "DGS2", "DGS5", "DGS10", "DGS30"], 26 | start_date="2022-01-01", 27 | end_date="2023-12-31" 28 | ) 29 | ``` 30 | 31 | ##### economic_news_sentiment_analysis() 32 | 33 | Performs sentiment analysis on economic news series data from the FRED API. This method helps in understanding the overall sentiment in economic news over a specified time period. 34 | 35 | ```python 36 | FredTools.economic_news_sentiment_analysis( 37 | news_series_id="STLFSI2", 38 | start_date="2022-01-01", 39 | end_date="2023-12-31" 40 | ) 41 | ``` 42 | 43 | Here's an example of how you might create an economic analyst agent using these tools: 44 | 45 | ```python 46 | economic_analyst = Agent( 47 | role="Economic Analyst", 48 | goal="Provide comprehensive economic analysis and insights", 49 | attributes="Data-driven, detail-oriented, proficient in economic theory and statistics", 50 | tools={FredTools.economic_indicator_analysis, FredTools.yield_curve_analysis, FredTools.economic_news_sentiment_analysis}, 51 | llm=OpenrouterModels.haiku 52 | ) 53 | ``` 54 | 55 | This economic analyst agent can leverage the Fred tools to analyze economic indicators, assess yield curves, and evaluate economic news sentiment, making it a powerful assistant for economic analysis tasks. 56 | 57 | ### Usage Notes 58 | 59 | To use the FredTools class, you must set the FRED_API_KEY environment variable. This credential is essential for authenticating with the FRED API and is securely managed by the class. 60 | 61 | The class methods handle API authentication and data retrieval internally, abstracting away the complexity of working directly with the FRED API. This allows developers to focus on analyzing the economic data without worrying about the underlying data retrieval mechanism. 62 | 63 | All methods in the FredTools class return data in the form of Python dictionaries, making it easy to work with the results in your application. The structure of the returned data includes comprehensive statistics and analysis results, ensuring that you have access to detailed insights from the economic data. 64 | 65 | Error handling is built into these methods, with potential issues such as missing data or API errors being handled gracefully. This helps in debugging and handling potential issues that may arise during data retrieval and analysis. 66 | 67 | ### Dependencies 68 | 69 | The FredTools class requires the following additional libraries: 70 | 71 | - pandas: For data manipulation and analysis 72 | - fredapi: For interacting with the FRED API 73 | 74 | These dependencies can be installed using pip: 75 | 76 | ```bash 77 | pip install pandas fredapi 78 | ``` 79 | 80 | ### Example Task 81 | 82 | Here's an example of how you might create a task for the economic analyst agent using the FredTools: 83 | 84 | ```python 85 | from mainframe_orchestra import Task, Agent, FredTools 86 | 87 | financial_analyst = Agent( 88 | role="Financial Analyst", 89 | goal="Provide comprehensive financial analysis and insights", 90 | attributes="Data-driven, detail-oriented, proficient in financial theory and statistics", 91 | tools={FredTools.economic_indicator_analysis, FredTools.yield_curve_analysis, FredTools.economic_news_sentiment_analysis}, 92 | llm=OpenrouterModels.haiku 93 | ) 94 | 95 | def analyze_economic_conditions(agent): 96 | return Task.create( 97 | agent=financial_analyst, 98 | instruction="""Analyze current economic conditions using the following steps: 99 | 1. Analyze key economic indicators (GDP, Unemployment Rate, and CPI) for the past 3 years. 100 | 2. Examine the yield curve for signs of potential recession. 101 | 3. Assess the overall sentiment in economic news for the past year. 102 | Provide a comprehensive report summarizing your findings and potential economic outlook.""" 103 | ) 104 | 105 | # Usage 106 | economic_report = analyze_economic_conditions(economic_analyst) 107 | print(economic_report) 108 | ``` 109 | 110 | This task demonstrates how the economic analyst agent can use the FredTools to perform a comprehensive analysis of economic conditions, leveraging data from multiple sources and methods to provide valuable insights. -------------------------------------------------------------------------------- /docs/src/tools/langchain_tools.md: -------------------------------------------------------------------------------- 1 | # LangchainTools 2 | 3 | ## Introduction 4 | 5 | LangchainTools is an integration class within the Mainframe-Orchestra framework that bridges the gap between Orchestra and Langchain's extensive toolkit ecosystem. This integration allows developers to incorporate Langchain's diverse set of tools into Orchestra agents, significantly expanding the capabilities of agents and workflows in Orchestra. 6 | 7 | By leveraging LangchainTools, developers can access a wide array of pre-built tools from Langchain, ranging from web search engines and language translators to code interpreters and database connectors. This integration not only enhances the functionality of Orchestra agents but also reduces development time by eliminating the need to recreate these tools from scratch. 8 | 9 | **Note:** LangchainTools is built on top of the Langchain framework, and it is not a Langchain library. It is a Orchestra library to wrap and use Langchain tools in Orchestra agents. It is in Beta and under development, so please bear with the rough edges and feel free to submit an issue or PR. 10 | 11 | ## How It Works 12 | 13 | LangchainTools acts as a wrapper and adapter for Langchain tools, making them compatible with Orchestra's agent and task system. It dynamically imports Langchain tools, wraps them in a Orchestra-friendly interface, and provides methods to list, retrieve, and use these tools within Orchestra agents. 14 | 15 | The class handles the complexities of tool instantiation, input formatting, and output processing, presenting a unified and simplified API for Orchestra users. This abstraction allows developers to focus on designing their AI workflows rather than worrying about the intricacies of tool integration. 16 | 17 | ## Why Use LangchainTools 18 | 19 | 1. **Expanded Capabilities**: Access a vast library of pre-built tools to enhance your AI agents' abilities. 20 | 2. **Simplified Integration**: Easily incorporate complex functionalities into your Orchestra workflows without extensive coding. 21 | 3. **Flexibility**: Mix and match tools from different sources, combining Orchestra's native tools with Langchain's offerings. 22 | 4. **Time-Saving**: Leverage existing, well-tested tools instead of developing them from scratch. 23 | 5. **Community Support**: Benefit from the ongoing development and improvements in both the Orchestra and Langchain communities. 24 | 25 | By using LangchainTools, developers can create more versatile, powerful, and efficient AI agents capable of handling a wide range of tasks across various domains. 26 | 27 | 28 | ## Features 29 | 30 | - List all available Langchain tools 31 | - Retrieve information about specific tools 32 | - Wrap and use Langchain tools in Orchestra agents 33 | 34 | ## Usage 35 | 36 | ### Listing Available Tools 37 | 38 | To get a list of all available Langchain tools, use the `list_available_tools()` method: 39 | 40 | ```python 41 | from mainframe_orchestra import LangchainTools 42 | 43 | available_tools = LangchainTools.list_available_tools() 44 | print("Available tools:", available_tools) 45 | ``` 46 | 47 | This will output a list of available tools, which may include: 48 | 49 | ```bash 50 | ['AINAppOps', 51 | 'AINOwnerOps', 52 | 'AINRuleOps', 53 | 'AINTransfer', 54 | 'AINValueOps', 55 | 'AIPluginTool', 56 | 'APIOperation', 57 | 'ArxivQueryRun', 58 | 'AskNewsSearch', 59 | 'AzureAiServicesDocumentIntelligenceTool', 60 | 'AzureAiServicesImageAnalysisTool', 61 | 'AzureAiServicesSpeechToTextTool', 62 | 'AzureAiServicesTextToSpeechTool', 63 | 'AzureAiServicesTextAnalyticsForHealthTool', 64 | 'AzureCogsFormRecognizerTool', 65 | 'AzureCogsImageAnalysisTool', 66 | 'AzureCogsSpeech2TextTool', 67 | 'AzureCogsText2SpeechTool', 68 | 'AzureCogsTextAnalyticsHealthTool', 69 | 'BalanceSheets', 70 | 'BaseGraphQLTool', 71 | 'BaseRequestsTool', 72 | 'BaseSQLDatabaseTool', 73 | 'BaseSparkSQLTool', 74 | 'BaseTool', 75 | 'BearlyInterpreterTool', 76 | ... 77 | ``` 78 | 79 | ### Getting Tool Information 80 | 81 | To retrieve information about a specific tool, use the `get_tool_info()` method: 82 | 83 | ```python 84 | tool_info = LangchainTools.get_tool_info("DuckDuckGoSearchRun") 85 | print("DuckDuckGoSearchRun info:", tool_info) 86 | ``` 87 | 88 | This will return a brief description of the tool. 89 | ```bash 90 | DuckDuckGoSearchRun info: {'name': 'duckduckgo_search', 'description': 'A wrapper around DuckDuckGo Search. Useful for when you need to answer questions about current events. Input should be a search query.', 'module_path': 'langchain_community.tools.ddg_search.tool'} 91 | ``` 92 | 93 | ### Retrieving and Using Tools 94 | 95 | To assign a specific langchain tool to your Orchestra agent, use the `get_tool()` method: 96 | 97 | ```python 98 | from mainframe_orchestra import LangchainTools 99 | 100 | # Retrieve the DuckDuckGoSearchRun tool 101 | duckduckgosearch = LangchainTools.get_tool("DuckDuckGoSearchRun") 102 | ``` 103 | 104 | ## Example 105 | 106 | Here's a complete example demonstrating how to use the LangchainTools class: 107 | 108 | ```python 109 | import os 110 | from mainframe_orchestra import Task, Agent, OpenrouterModels, set_verbosity, LangchainTools 111 | 112 | set_verbosity(1) 113 | 114 | # Get the DuckDuckGoSearchRun tool 115 | duckduckgosearch = LangchainTools.get_tool("DuckDuckGoSearchRun") 116 | 117 | # Create an agent with the DuckDuckGoSearchRun tool 118 | web_researcher = Agent( 119 | role="Web Searcher", 120 | goal="Search the web for information", 121 | tools={duckduckgosearch}, 122 | llm=OpenrouterModels.llama_3_1_405b_instruct 123 | ) 124 | 125 | # Create and execute a task 126 | task_result = Task.create( 127 | agent=web_researcher, 128 | instruction="Search the web for information about NVDA and summarize the results." 129 | ) 130 | 131 | print(task_result) 132 | ``` 133 | 134 | ## API Reference 135 | 136 | ### `LangchainTools.list_available_tools() -> List[str]` 137 | 138 | Returns a list of names of all available Langchain tools. 139 | 140 | ### `LangchainTools.get_tool_info(tool_name: str) -> Dict[str, str]` 141 | 142 | Retrieves information about a specific Langchain tool. 143 | 144 | - `tool_name` (str): The name of the tool to retrieve information for. 145 | 146 | Returns a dictionary containing the tool's name, description, and module path. 147 | 148 | Raises: 149 | - `ValueError`: If an unknown tool name is provided. 150 | 151 | ### `LangchainTools.get_tool(tool_name: str) -> Callable` 152 | 153 | Retrieves and wraps a specified Langchain tool. 154 | 155 | - `tool_name` (str): Name of the Langchain tool to retrieve. 156 | 157 | Returns a wrapped Langchain tool as a callable function. 158 | 159 | Raises: 160 | - `ValueError`: If an unknown tool name is provided. 161 | - `ImportError`: If Langchain dependencies are not installed. 162 | 163 | ## Notes 164 | 165 | - The LangchainTools class dynamically imports and wraps Langchain tools, making them compatible with the Orchestra framework. 166 | - Ensure that you have the necessary permissions and API keys set up for the specific Langchain tools you intend to use. 167 | - Some tools may require additional configuration or dependencies. Refer to the Langchain documentation for specific tool requirements. 168 | - To use LangchainTools, you need to install the required packages with `pip install Orchestra[langchain_tools]`. 169 | -------------------------------------------------------------------------------- /docs/src/tools/linear_tools.md: -------------------------------------------------------------------------------- 1 | # Linear Tools 2 | 3 | The LinearTools class provides methods for interacting with the Linear API, allowing you to manage issues, workflows, and team-related operations. 4 | 5 | ### Configuration 6 | 7 | Before using Linear API operations, set up your environment variables: 8 | 9 | ```bash 10 | export LINEAR_API_KEY=your_linear_api_key 11 | export LINEAR_TEAM_ID=your_team_id 12 | ``` 13 | 14 | ### Class Methods 15 | 16 | ##### get_team_issues(team_id: str = None, status: Optional[str] = None) 17 | 18 | Lists issues for a specific team, optionally filtered by status. 19 | 20 | ```python 21 | # Get all team issues 22 | issues = LinearTools.get_team_issues() 23 | 24 | # Get issues with specific status 25 | issues = LinearTools.get_team_issues(status="In Progress") 26 | 27 | # Get issues for a different team 28 | issues = LinearTools.get_team_issues(team_id="TEAM_ID") 29 | ``` 30 | 31 | ##### update_issue_status(issue_id: str, status_id: str) 32 | 33 | Updates the status of an issue. 34 | 35 | ```python 36 | result = LinearTools.update_issue_status("ISSUE_ID", "STATUS_ID") 37 | ``` 38 | 39 | ##### search_issues(search_query: str) 40 | 41 | Searches for issues using a text query. 42 | 43 | ```python 44 | issues = LinearTools.search_issues("bug in authentication") 45 | ``` 46 | 47 | ##### get_team_by_name(team_name: str) 48 | 49 | Gets team information by team key/name. Also prints available teams for reference. 50 | 51 | ```python 52 | team = LinearTools.get_team_by_name("ENG") 53 | ``` 54 | 55 | ##### get_workflow_states(team_id: str = None) 56 | 57 | Gets all workflow states for a team. 58 | 59 | ```python 60 | states = LinearTools.get_workflow_states() 61 | ``` 62 | 63 | ##### create_issue(title: str, description: str, team_id: str = None, priority: Optional[int] = None, state_id: Optional[str] = None) 64 | 65 | Creates a new issue in Linear. 66 | 67 | ```python 68 | issue = LinearTools.create_issue( 69 | title="Fix authentication bug", 70 | description="Users are unable to log in when...", 71 | priority=4, # 4 is urgent 72 | state_id="STATE_ID" # Optional initial state 73 | ) 74 | ``` 75 | 76 | ### Return Data Structures 77 | 78 | ##### Issue Data 79 | ```python 80 | { 81 | "id": "ISSUE_ID", 82 | "title": "Issue title", 83 | "state": { 84 | "name": "In Progress" 85 | }, 86 | "priority": 2, 87 | "assignee": { 88 | "name": "John Doe" 89 | }, 90 | "url": "https://linear.app/..." 91 | } 92 | ``` 93 | 94 | ##### Team Data 95 | ```python 96 | { 97 | "id": "TEAM_ID", 98 | "name": "Engineering", 99 | "key": "ENG" 100 | } 101 | ``` 102 | 103 | ##### Workflow State Data 104 | ```python 105 | { 106 | "id": "STATE_ID", 107 | "name": "In Progress", 108 | "type": "started" 109 | } 110 | ``` 111 | 112 | ### Usage Notes 113 | 114 | 1. **Authentication**: 115 | - Requires `LINEAR_API_KEY` environment variable 116 | - Team operations require `LINEAR_TEAM_ID` environment variable 117 | - Both are validated on class initialization 118 | 119 | 2. **Error Handling**: 120 | - Methods return error messages as strings when operations fail 121 | - Successful operations return appropriate data structures 122 | - Environment variable missing errors are raised as ValueError 123 | 124 | 3. **Priority Levels**: 125 | - 0: No priority 126 | - 1: Low 127 | - 2: Medium 128 | - 3: High 129 | - 4: Urgent 130 | 131 | 4. **Team Management**: 132 | - Use `get_team_by_name()` to find team IDs 133 | - Team ID can be provided either as UUID or team key 134 | - Default team from `LINEAR_TEAM_ID` is used when team_id is not specified 135 | 136 | 5. **Issue Creation**: 137 | - Title and description are required 138 | - Priority and state_id are optional 139 | - Team ID defaults to environment variable if not provided 140 | - Returns issue data including URL and ID 141 | 142 | 6. **State Management**: 143 | - Use `get_workflow_states()` to find available states 144 | - State IDs are required for updating issue status 145 | - States are team-specific 146 | 147 | The LinearTools class provides a streamlined interface to Linear's API while handling authentication, error checking, and response processing automatically. -------------------------------------------------------------------------------- /docs/src/tools/pinecone_tools.md: -------------------------------------------------------------------------------- 1 | # Pinecone Tools 2 | 3 | The PineconeTools class provides a comprehensive set of methods to interact with the Pinecone vector database API. It offers powerful functionality for creating and managing indexes, upserting and querying vectors, and handling vector metadata. This class is designed to simplify the process of working with the Pinecone API, handling authentication and request formatting internally. 4 | 5 | ### Class Methods 6 | 7 | ##### __init__(api_key: str = None) 8 | 9 | Initializes the PineconeTools class with the Pinecone API key. If not provided, it attempts to use the PINECONE_API_KEY environment variable. 10 | 11 | ##### create_index(name: str, dimension: int, metric: str = "cosine", cloud: str = "aws", region: str = "us-east-1") 12 | 13 | Creates a new Pinecone index with the specified parameters. 14 | 15 | ```python 16 | PineconeTools().create_index( 17 | name="my-index", 18 | dimension=1536, 19 | metric="cosine", 20 | cloud="aws", 21 | region="us-east-1" 22 | ) 23 | ``` 24 | 25 | ##### delete_index(name: str) 26 | 27 | Deletes a Pinecone index with the given name. 28 | 29 | ```python 30 | PineconeTools().delete_index("my-index") 31 | ``` 32 | 33 | ##### list_indexes() 34 | 35 | Lists all available Pinecone indexes. 36 | 37 | ```python 38 | indexes = PineconeTools().list_indexes() 39 | print(indexes) 40 | ``` 41 | 42 | ##### upsert_vectors(index_name: str, vectors: List[Dict[str, Any]]) 43 | 44 | Upserts vectors into a Pinecone index. 45 | 46 | ```python 47 | vectors = [ 48 | {"id": "vec1", "values": [0.1, 0.2, 0.3], "metadata": {"key": "value"}}, 49 | {"id": "vec2", "values": [0.4, 0.5, 0.6], "metadata": {"key": "value2"}} 50 | ] 51 | PineconeTools().upsert_vectors("my-index", vectors) 52 | ``` 53 | 54 | ##### query_index(index_name: str, query_vector: List[float], top_k: int = 10, filter: Dict = None, include_metadata: bool = True) 55 | 56 | Queries a Pinecone index for similar vectors. 57 | 58 | ```python 59 | query_vector = [0.1, 0.2, 0.3] 60 | results = PineconeTools().query_index("my-index", query_vector, top_k=5) 61 | print(results) 62 | ``` 63 | 64 | ##### delete_vectors(index_name: str, ids: List[str]) 65 | 66 | Deletes vectors from a Pinecone index by their IDs. 67 | 68 | ```python 69 | PineconeTools().delete_vectors("my-index", ["vec1", "vec2"]) 70 | ``` 71 | 72 | ##### update_vector_metadata(index_name: str, id: str, metadata: Dict[str, Any]) 73 | 74 | Updates the metadata of a vector in a Pinecone index. 75 | 76 | ```python 77 | new_metadata = {"key": "updated_value"} 78 | PineconeTools().update_vector_metadata("my-index", "vec1", new_metadata) 79 | ``` 80 | 81 | ##### describe_index_stats(index_name: str) 82 | 83 | Gets statistics about a Pinecone index. 84 | 85 | ```python 86 | stats = PineconeTools().describe_index_stats("my-index") 87 | print(stats) 88 | ``` 89 | 90 | ##### normalize_vector(vector: List[float]) 91 | 92 | A static method that normalizes a vector to unit length. 93 | 94 | ```python 95 | normalized_vector = PineconeTools.normalize_vector([1.0, 2.0, 3.0]) 96 | print(normalized_vector) 97 | ``` 98 | 99 | ##### get_pinecone_index(name: str) 100 | 101 | Returns a Pinecone index object for the given index name. 102 | 103 | ```python 104 | index = PineconeTools().get_pinecone_index("my-index") 105 | ``` 106 | 107 | ### Error Handling 108 | 109 | All methods in the PineconeTools class include error handling. If an operation fails, an exception will be raised with a descriptive error message. It's recommended to wrap calls to these methods in try-except blocks to handle potential errors gracefully. 110 | 111 | ### Environment Variables 112 | 113 | The `PineconeTools` class prioritizes the use of the `PINECONE_API_KEY` environment variable. If you prefer not to pass the API key explicitly when initializing the class, ensure this environment variable is set: 114 | 115 | ```bash 116 | export PINECONE_API_KEY="your-api-key-here" 117 | ``` 118 | 119 | ### Additional Usage Examples 120 | 121 | Here are some more advanced usage examples: 122 | 123 | ```python 124 | # Create an index with custom parameters 125 | PineconeTools().create_index( 126 | name="custom-index", 127 | dimension=768, 128 | metric="dotproduct", 129 | cloud="gcp", 130 | region="us-central1" 131 | ) 132 | 133 | # Query with metadata filter 134 | results = PineconeTools().query_index( 135 | "my-index", 136 | query_vector=[0.1, 0.2, 0.3], 137 | top_k=5, 138 | filter={"category": "electronics"} 139 | ) 140 | 141 | # Batch delete vectors 142 | PineconeTools().delete_vectors("my-index", ["id1", "id2", "id3"]) 143 | 144 | # Get index statistics 145 | stats = PineconeTools().describe_index_stats("my-index") 146 | print(f"Total vector count: {stats['total_vector_count']}") 147 | ``` 148 | 149 | These examples demonstrate more complex operations and show how to use some of the additional parameters available in the methods. 150 | 151 | ### Usage Notes 152 | 153 | To use the PineconeTools class, you must set the PINECONE_API_KEY environment variable or provide the API key when initializing the class. These credentials are essential for authenticating with the Pinecone API and are securely managed by the class. 154 | 155 | The class methods handle API authentication internally, abstracting away the complexity of token management. This allows developers to focus on making API calls and processing the returned data without worrying about the underlying authentication mechanism. 156 | 157 | All methods in the PineconeTools class return data in the form of Python dictionaries or lists, making it easy to work with the results in your application. The structure of the returned data closely mirrors the JSON responses from the Pinecone API, ensuring that you have access to all the details provided by the API. 158 | 159 | Error handling is built into these methods, with exceptions being caught and re-raised with additional context. This helps in debugging and handling potential issues that may arise during API interactions. 160 | 161 | Here's an example of how you might create a vector database agent using these tools: 162 | 163 | ```python 164 | vector_db_agent = Agent( 165 | role="Vector Database Manager", 166 | goal="Manage and query vector databases efficiently", 167 | attributes="Knowledgeable about vector databases, detail-oriented, efficient in data management", 168 | tools={ 169 | PineconeTools.create_index, 170 | PineconeTools.upsert_vectors, 171 | PineconeTools.query_index, 172 | PineconeTools.delete_vectors, 173 | PineconeTools.update_vector_metadata, 174 | PineconeTools.describe_index_stats 175 | }, 176 | llm=OpenrouterModels.haiku 177 | ) 178 | 179 | def manage_vector_db(agent, action, **kwargs): 180 | return Task.create( 181 | agent=agent, 182 | instruction=f"Perform the following action on the vector database: {action}", 183 | context=f"Action parameters: {kwargs}" 184 | ) 185 | 186 | # Example usage 187 | response = manage_vector_db(vector_db_agent, "create_index", name="my-index", dimension=1536) 188 | print(response) 189 | 190 | response = manage_vector_db(vector_db_agent, "upsert_vectors", index_name="my-index", vectors=[...]) 191 | print(response) 192 | 193 | response = manage_vector_db(vector_db_agent, "query_index", index_name="my-index", query_vector=[...]) 194 | print(response) 195 | ``` 196 | 197 | This vector database agent can leverage the Pinecone tools to manage and query vector databases, making it a powerful assistant for tasks involving vector embeddings and similarity search. -------------------------------------------------------------------------------- /docs/src/tools/stripe_tools.md: -------------------------------------------------------------------------------- 1 | # Stripe Tools 2 | 3 | The StripeTools class provides a simplified interface to Stripe's API through the stripe_agent_toolkit package. It handles authentication and provides centralized access to common Stripe operations. 4 | 5 | ⚠️ **IMPORTANT DISCLAIMER** ⚠️ 6 | 7 | This toolkit provides direct access to Stripe API operations that can modify your Stripe account and process financial transactions. Please note: 8 | 9 | - **Financial Risk**: Incorrect usage of these tools can result in unintended charges, refunds, or other financial consequences 10 | - **No Warranty**: This toolkit is provided "AS IS" without any warranty. The authors and maintainers are not responsible for any financial losses or damages resulting from its use 11 | - **Production Usage**: It is strongly recommended to: 12 | - Use test API keys during development 13 | - Implement additional safety checks and validation 14 | - Restrict access to write operations 15 | - Maintain audit logs of all operations 16 | 17 | ### Security Recommendations 18 | 19 | 1. **Access Control**: Implement role-based access control before allowing write operations 20 | 2. **Validation**: Add business-logic validation before executing operations 21 | 3. **Monitoring**: Log all operations and implement alerts for unusual activity 22 | 4. **Testing**: Always test with Stripe's test mode first 23 | 24 | 25 | ### Configuration 26 | 27 | Before using Stripe operations, set up your environment variable: 28 | 29 | ```bash 30 | export STRIPE_API_KEY=your_stripe_api_key 31 | ``` 32 | 33 | ### Class Methods 34 | 35 | ##### check_balance() 36 | 37 | Retrieve the current balance of your Stripe account. 38 | 39 | ```python 40 | balance = StripeTools.check_balance() 41 | ``` 42 | 43 | ##### list_customers(email: Optional[str] = None, limit: Optional[int] = None) 44 | 45 | List customers from your Stripe account with optional filtering. 46 | 47 | ```python 48 | # List all customers 49 | customers = StripeTools.list_customers() 50 | 51 | # Filter by email 52 | customers = StripeTools.list_customers(email="user@example.com") 53 | 54 | # Limit results 55 | customers = StripeTools.list_customers(limit=10) 56 | ``` 57 | 58 | ##### list_products(limit: Optional[int] = None) 59 | 60 | List products from your Stripe catalog. 61 | 62 | ```python 63 | products = StripeTools.list_products(limit=20) 64 | ``` 65 | 66 | ##### create_customer(name: str, email: Optional[str] = None) 67 | 68 | Create a new customer in Stripe. 69 | 70 | ```python 71 | customer = StripeTools.create_customer( 72 | name="John Doe", 73 | email="john@example.com" 74 | ) 75 | ``` 76 | 77 | ##### create_product(name: str, description: Optional[str] = None) 78 | 79 | Create a new product in your Stripe catalog. 80 | 81 | ```python 82 | product = StripeTools.create_product( 83 | name="Premium Plan", 84 | description="Access to all premium features" 85 | ) 86 | ``` 87 | 88 | ##### create_price(product: str, currency: str, unit_amount: int) 89 | 90 | Create a new price for a product. 91 | 92 | ```python 93 | price = StripeTools.create_price( 94 | product="prod_xyz", 95 | currency="usd", 96 | unit_amount=1999 # $19.99 97 | ) 98 | ``` 99 | 100 | ##### list_prices(product: Optional[str] = None, limit: Optional[int] = None) 101 | 102 | List prices from your Stripe catalog. 103 | 104 | ```python 105 | # List all prices 106 | prices = StripeTools.list_prices() 107 | 108 | # Filter by product 109 | prices = StripeTools.list_prices(product="prod_xyz") 110 | ``` 111 | 112 | ##### create_payment_link(price: str, quantity: int) 113 | 114 | Create a payment link for a specific price. 115 | 116 | ```python 117 | payment_link = StripeTools.create_payment_link( 118 | price="price_xyz", 119 | quantity=1 120 | ) 121 | ``` 122 | 123 | ##### create_invoice(customer: str, days_until_due: int = 30) 124 | 125 | Create a new invoice for a customer. 126 | 127 | ```python 128 | invoice = StripeTools.create_invoice( 129 | customer="cus_xyz", 130 | days_until_due=14 131 | ) 132 | ``` 133 | 134 | ##### create_invoice_item(customer: str, price: str, invoice: str) 135 | 136 | Add an item to an invoice. 137 | 138 | ```python 139 | item = StripeTools.create_invoice_item( 140 | customer="cus_xyz", 141 | price="price_xyz", 142 | invoice="inv_xyz" 143 | ) 144 | ``` 145 | 146 | ##### finalize_invoice(invoice: str) 147 | 148 | Finalize an invoice for sending. 149 | 150 | ```python 151 | result = StripeTools.finalize_invoice("inv_xyz") 152 | ``` 153 | 154 | ##### create_refund(payment_intent: str, amount: Optional[int] = None) 155 | 156 | Create a refund for a payment. 157 | 158 | ```python 159 | # Full refund 160 | refund = StripeTools.create_refund("pi_xyz") 161 | 162 | # Partial refund 163 | refund = StripeTools.create_refund( 164 | payment_intent="pi_xyz", 165 | amount=1000 # $10.00 166 | ) 167 | ``` 168 | 169 | ### Usage Notes 170 | 171 | 1. **Authentication**: 172 | - Requires `STRIPE_API_KEY` environment variable 173 | - Automatically handled through stripe_agent_toolkit 174 | 175 | 2. **Singleton Pattern**: 176 | - Uses a singleton pattern to maintain a single API instance 177 | - Automatically initializes on first use 178 | 179 | 3. **Currency Amounts**: 180 | - All amounts are in cents/smallest currency unit 181 | - Example: $10.00 = 1000 cents 182 | 183 | 4. **Response Format**: 184 | - All methods return JSON strings containing the response data 185 | - Parse the response using `json.loads()` if needed 186 | 187 | 5. **Error Handling**: 188 | - Errors from stripe_agent_toolkit are passed through 189 | - Check response for error details 190 | 191 | 6. **Rate Limiting**: 192 | - Respects Stripe's rate limits 193 | - Consider implementing retry logic for production use 194 | 195 | 7. **Best Practices**: 196 | - Always validate customer and product IDs 197 | - Use descriptive names for products and prices 198 | - Keep track of created resources 199 | 200 | The StripeTools class simplifies Stripe operations by providing a centralized, authenticated interface to stripe_agent_toolkit's functionality. -------------------------------------------------------------------------------- /docs/src/tools/text_splitters.md: -------------------------------------------------------------------------------- 1 | # Text Splitters 2 | 3 | The SentenceSplitter class is a simple wrapper around the [sentence_splitter](https://github.com/mediacloud/sentence-splitter) library, providing an easy way to split text into overlapping chunks of sentences. 4 | 5 | ## Usage 6 | 7 | ```python 8 | from mainframe_orchestra.tools import SentenceSplitter 9 | 10 | splitter = SentenceSplitter(language='en') 11 | chunks = splitter.split_text_by_sentences( 12 | text="Your long text here...", 13 | chunk_size=5, 14 | overlap=1 15 | ) 16 | ``` 17 | 18 | ## Parameters 19 | 20 | - `language`: The language of the text (default: 'en') 21 | - `text`: The input text to split 22 | - `chunk_size`: Number of sentences per chunk (default: 5) 23 | - `overlap`: Number of sentences to overlap between chunks (default: 1) 24 | 25 | ## Return Value 26 | 27 | Returns a list of strings, where each string is a chunk of text containing the specified number of sentences. 28 | 29 | ## Note 30 | 31 | For more advanced usage or language-specific options, consider using the sentence_splitter library directly. 32 | 33 | 34 | # Semantic Splitter 35 | 36 | The SemanticSplitter class provides a sophisticated method for splitting text into semantically coherent chunks. This tool is particularly useful for processing large texts, preparing data for summarization, or creating more manageable segments for further NLP tasks. It uses sentence embeddings and community detection algorithms to group similar sentences together. 37 | 38 | ### Class Methods 39 | 40 | ##### chunk_text() 41 | 42 | This is the main static method of the SemanticSplitter class. It takes a text input and returns a list of semantically coherent chunks. 43 | 44 | ```python 45 | @staticmethod 46 | def chunk_text(text: str, rearrange: bool = False, 47 | embedding_provider: str = "openai", embedding_model: str = "text-embedding-3-small") -> List[str]: 48 | ``` 49 | 50 | Parameters: 51 | - `text`: The input text to be split into chunks. 52 | - `rearrange`: If True, sentences will be grouped by their semantic similarity, potentially changing their original order. When False, it maintains the original order of the text while still grouping similar sentences together. 53 | - `embedding_provider`: The provider of the embedding model (e.g., "openai", "cohere", "mistral"). 54 | - `embedding_model`: The specific embedding model to use. 55 | 56 | ### Usage Notes 57 | 58 | To use the SemanticSplitter class, you need to have the necessary API keys set up in your environment variables for the chosen embedding provider. 59 | 60 | The class uses the sentence_splitter library for initial text segmentation and the igraph and leidenalg libraries for community detection. Make sure these dependencies are installed in your environment. 61 | 62 | The SemanticSplitter process involves creating sentence segments, embedding them, detecting communities using graph algorithms, and finally creating chunks from these communities. 63 | 64 | The class includes a method to split oversized communities, which can help manage very large chunks of text and ensure more balanced output. 65 | 66 | Basic error handling is implemented, such as returning a single community for very short inputs. 67 | 68 | The SemanticSplitter can be particularly useful when working with large text strings that need to be processed in smaller, semantically coherent chunks. This can improve the performance of downstream NLP tasks such as summarization, question-answering, or topic modeling. 69 | 70 | ### Advanced Usage 71 | 72 | The SemanticSplitter offers several parameters that can be tuned for optimal performance: 73 | 74 | - The `rearrange` parameter allows for reordering of sentences based on their semantic similarity. This can be useful for certain applications but should be used cautiously if preserving the original text order is important. Essentially, it will regroup sentences that are similar into a single chunk. If a sentence or line at the end of the document is similar to the first few, they will be regrouped together. 75 | 76 | - Different embedding providers and models can be used by specifying the `embedding_provider` and `embedding_model` parameters. This allows for flexibility in choosing the most appropriate embedding method for your specific use case. 77 | 78 | ```python 79 | # Example of advanced usage with custom parameters 80 | chunks = SemanticSplitter.chunk_text( 81 | text=large_text, 82 | rearrange=True, 83 | embedding_provider="cohere", 84 | embedding_model="embed-english-v3.0" 85 | ) 86 | ``` 87 | 88 | This advanced usage demonstrates how to customize the chunking process for specific needs, such as using a different embedding provider or adjusting the chunking parameters. 89 | 90 | ### Performance Considerations 91 | 92 | The SemanticSplitter can be computationally intensive, especially for very large texts. The performance is primarily affected by: 93 | 94 | 1. The length of the input text (number of sentences). 95 | 2. The chosen embedding model and provider. 96 | 97 | For extremely large texts, consider breaking the text into individual documents or subsections before applying the SemanticSplitter. 98 | 99 | ### Use with Knowledgebases 100 | 101 | The SemanticSplitter can be particularly useful when working with knowledge bases or large documents that need to be processed in smaller, semantically coherent chunks. This can improve the performance of downstream NLP tasks such as summarization, question-answering, or topic modeling. Articles, documents can be fed through the splitter to feed the chunks into an agent knowledgebase. 102 | 103 | By leveraging the SemanticSplitter tool, agents can efficiently process and analyze large volumes of text, breaking them down into manageable, semantically coherent chunks for further processing or analysis. 104 | 105 | ### Example Usage with Different Embedding Providers 106 | 107 | Here's how to use the SemanticSplitter with different embedding providers: 108 | 109 | ```python 110 | text = "This is a test text to demonstrate the semantic splitter. It should be split into meaningful chunks based on the content and similarity threshold. There are many different embedding providers and models available." 111 | 112 | # Using OpenAI (default) 113 | chunks = SemanticSplitter.chunk_text(text) 114 | 115 | # Using Cohere 116 | chunks = SemanticSplitter.chunk_text(text, embedding_provider="cohere", embedding_model="embed-english-v3.0") 117 | 118 | # Using Mistral 119 | chunks = SemanticSplitter.chunk_text(text, embedding_provider="mistral", embedding_model="mistral-embed") 120 | ``` 121 | 122 | These examples demonstrate how to use different embedding providers and models with the SemanticSplitter. -------------------------------------------------------------------------------- /docs/src/tools/wikipedia_tools.md: -------------------------------------------------------------------------------- 1 | # Wikipedia Tools 2 | 3 | The WikipediaTools class provides a set of static methods for interacting with the Wikipedia API, allowing you to retrieve article content, search for articles, and fetch images related to Wikipedia articles. 4 | 5 | ### Class Methods 6 | 7 | ##### get_article(title: str, include_images: bool = False) 8 | 9 | Retrieves a Wikipedia article by its title. This method fetches the content of a Wikipedia article, including its extract, URL, and optionally, images. 10 | 11 | ```python 12 | article = WikipediaTools.get_article("Python (programming language)", include_images=True) 13 | ``` 14 | 15 | ##### search_articles(query: str, num_results: int = 10) 16 | 17 | Searches for Wikipedia articles based on a given query. This method returns a list of dictionaries containing detailed information about each search result, including title, full URL, and a snippet from the article. 18 | 19 | ```python 20 | search_results = WikipediaTools.search_articles("artificial intelligence", num_results=5) 21 | ``` 22 | 23 | ##### get_main_image(title: str, thumb_size: int = 250) 24 | 25 | Retrieves the main image for a given Wikipedia article title. This method returns the URL of the main image (thumbnail) associated with the specified article. 26 | 27 | ```python 28 | main_image_url = WikipediaTools.get_main_image("Eiffel Tower", thumb_size=400) 29 | ``` 30 | 31 | ##### search_images(query: str, limit: int = 20, thumb_size: int = 250) 32 | 33 | Searches for images on Wikimedia Commons based on a given query. This method returns a list of dictionaries containing image information, including title, URL, and thumbnail URL. 34 | 35 | ```python 36 | image_results = WikipediaTools.search_images("solar system", limit=10, thumb_size=300) 37 | ``` 38 | 39 | ### Usage Notes 40 | 41 | When using the WikipediaTools class, ensure that you have a stable internet connection to make API requests. The methods raise appropriate exceptions (requests.exceptions.RequestException) if the API requests fail, so make sure to handle them accordingly in your code. 42 | 43 | The WikipediaTools class provides a set of static methods, which means you can directly call them using the class name without creating an instance of the class. 44 | 45 | Remember to adhere to the Wikipedia API usage guidelines and rate limits when making requests. Excessive or abusive requests may result in temporary or permanent restrictions on your API access. 46 | 47 | The get_article() method allows you to retrieve comprehensive information about a Wikipedia article, including its content and optionally, associated images. You can use this method to fetch detailed information about a specific topic. 48 | 49 | The search_articles() method is useful for finding relevant Wikipedia articles based on a search query. It provides a list of search results with detailed information, allowing you to present users with a summary of matching articles. 50 | 51 | Use the get_main_image() method when you need to retrieve the primary image associated with a Wikipedia article. This can be helpful for displaying visual content alongside article information. 52 | 53 | The search_images() method allows you to find images related to a specific query on Wikimedia Commons. This is particularly useful when you need to retrieve multiple images related to a topic or concept. 54 | 55 | When working with image-related methods (get_main_image() and search_images()), you can specify the desired thumbnail size to optimize image loading and display in your application. 56 | 57 | All methods in the WikipediaTools class include error handling and logging. Make sure to implement appropriate error handling in your code to gracefully manage potential issues with API requests or response parsing. 58 | 59 | -------------------------------------------------------------------------------- /docs/src/tools/yahoo_finance_tools.md: -------------------------------------------------------------------------------- 1 | # Yahoo Finance Tools 2 | 3 | The YahooFinanceTools class provides a comprehensive set of methods to interact with Yahoo Finance data. It offers powerful functionality for retrieving stock information, historical data, financial statements, and performing technical and fundamental analysis. This class is designed to simplify the process of working with financial data, handling data retrieval and processing internally. 4 | 5 | ### Class Methods 6 | 7 | ##### get_ticker_info(ticker: str) -> Dict[str, Any] 8 | 9 | Retrieves comprehensive information about a stock ticker. 10 | 11 | ```python 12 | YahooFinanceTools.get_ticker_info("AAPL") 13 | ``` 14 | 15 | ##### get_historical_data(ticker: str, period: str = "1y", interval: str = "1wk") -> str 16 | 17 | Gets historical price data for a stock ticker. 18 | 19 | ```python 20 | YahooFinanceTools.get_historical_data("AAPL", period="6mo", interval="1d") 21 | ``` 22 | 23 | ##### calculate_returns(tickers: Union[str, List[str]], period: str = "1y", interval: str = "1d") -> Dict[str, pd.Series] 24 | 25 | Calculates daily returns for given stock ticker(s). 26 | 27 | ```python 28 | YahooFinanceTools.calculate_returns(["AAPL", "GOOGL"], period="3mo", interval="1d") 29 | ``` 30 | 31 | ##### get_financials(ticker: str, statement: str = "income") -> pd.DataFrame 32 | 33 | Retrieves financial statements for a stock ticker. 34 | 35 | ```python 36 | YahooFinanceTools.get_financials("AAPL", statement="balance") 37 | ``` 38 | 39 | ##### get_recommendations(ticker: str) -> pd.DataFrame 40 | 41 | Gets analyst recommendations for a stock ticker. 42 | 43 | ```python 44 | YahooFinanceTools.get_recommendations("AAPL") 45 | ``` 46 | 47 | ##### download_multiple_tickers(tickers: List[str], period: str = "1mo", interval: str = "1d") -> pd.DataFrame 48 | 49 | Downloads historical data for multiple tickers. 50 | 51 | ```python 52 | YahooFinanceTools.download_multiple_tickers(["AAPL", "MSFT", "GOOGL"], period="3mo", interval="1d") 53 | ``` 54 | 55 | ##### get_asset_profile(ticker: str) -> Dict[str, Any] 56 | 57 | Retrieves the asset profile for a given stock ticker. 58 | 59 | ```python 60 | YahooFinanceTools.get_asset_profile("AAPL") 61 | ``` 62 | 63 | ##### get_balance_sheet(ticker: str, quarterly: bool = False) 64 | 65 | Gets the balance sheet for a given stock ticker. 66 | 67 | ```python 68 | YahooFinanceTools.get_balance_sheet("AAPL", quarterly=True) 69 | ``` 70 | 71 | ##### get_cash_flow(ticker: str, quarterly: bool = False) 72 | 73 | Retrieves the cash flow statement for a given stock ticker. 74 | 75 | ```python 76 | YahooFinanceTools.get_cash_flow("AAPL") 77 | ``` 78 | 79 | ##### get_income_statement(ticker: str, quarterly: bool = False) 80 | 81 | Gets the income statement for a given stock ticker. 82 | 83 | ```python 84 | YahooFinanceTools.get_income_statement("AAPL", quarterly=True) 85 | ``` 86 | 87 | ##### get_custom_historical_data(ticker: str, start_date: str, end_date: str, frequency: str = '1d', event: str = 'history') 88 | 89 | Retrieves custom historical data for a stock ticker with specified parameters. 90 | 91 | ```python 92 | YahooFinanceTools.get_custom_historical_data("AAPL", "2023-01-01", "2023-06-30", frequency="1wk") 93 | ``` 94 | 95 | ##### technical_analysis(ticker: str, period: str = "1y") -> Dict[str, Any] 96 | 97 | Performs technical analysis for a given stock ticker. 98 | 99 | ```python 100 | YahooFinanceTools.technical_analysis("AAPL", period="6mo") 101 | ``` 102 | 103 | ##### fundamental_analysis(ticker: str) -> Dict[str, Any] 104 | 105 | Performs a comprehensive fundamental analysis for a given stock ticker. 106 | 107 | ```python 108 | YahooFinanceTools.fundamental_analysis("AAPL") 109 | ``` 110 | 111 | ### Error Handling 112 | 113 | All methods in the YahooFinanceTools class include robust error handling. If an error occurs during data retrieval or processing, a ValueError is raised with a descriptive error message. This helps in debugging and handling potential issues that may arise during use. 114 | 115 | ### Usage Notes 116 | 117 | To use the YahooFinanceTools class, you need to install the required dependencies. You can do this by running: 118 | 119 | ```bash 120 | pip install yfinance yahoofinance pandas 121 | ``` 122 | 123 | This will install the necessary packages: `yfinance`, `yahoofinance`, and `pandas`. 124 | 125 | The class methods handle data retrieval and processing internally, abstracting away the complexity of working with different financial APIs. This allows developers to focus on analyzing the data rather than worrying about the underlying data retrieval mechanism. 126 | 127 | All methods in the YahooFinanceTools class return data in the form of Python dictionaries or pandas DataFrames, making it easy to work with the results in your application. 128 | 129 | Error handling is built into these methods, with exceptions being caught and re-raised with additional context. This helps in debugging and handling potential issues that may arise during data retrieval and processing. 130 | 131 | ### Example Usage in a Task 132 | 133 | Here's an example of how you might create a financial analyst agent using these tools: 134 | 135 | ```python 136 | financial_analyst = Agent( 137 | role="Financial Analyst", 138 | goal="Analyze stocks and provide investment recommendations", 139 | attributes="Knowledgeable about financial markets, detail-oriented, data-driven", 140 | tools={ 141 | YahooFinanceTools.get_ticker_info, 142 | YahooFinanceTools.get_historical_data, 143 | YahooFinanceTools.technical_analysis, 144 | YahooFinanceTools.fundamental_analysis 145 | }, 146 | llm=OpenrouterModels.haiku 147 | ) 148 | 149 | def analyze_stock(agent, ticker): 150 | return Task.create( 151 | agent=agent, 152 | instruction=f"Perform a comprehensive analysis of {ticker} and provide an investment recommendation." 153 | ) 154 | 155 | # Usage 156 | response = analyze_stock(financial_analyst, "AAPL") 157 | print(response) 158 | ``` 159 | 160 | This financial analyst agent can leverage the YahooFinanceTools to retrieve stock information, perform technical and fundamental analysis, and provide investment recommendations based on the data. 161 | 162 | ### Conclusion 163 | 164 | The YahooFinanceTools class provides a powerful set of methods for financial data retrieval and analysis. By integrating these tools into your Orchestra agents, you can create sophisticated financial analysis and investment recommendation systems with ease. 165 | 166 | ### Dependencies 167 | 168 | The YahooFinanceTools class relies on the following external libraries: 169 | - yfinance 170 | - yahoofinance 171 | - pandas 172 | 173 | These dependencies are automatically installed when you install the package with the yahoo_finance_tools extra: 174 | 175 | ```bash 176 | pip install Orchestra[yahoo_finance_tools] 177 | ``` 178 | 179 | If you encounter any ImportError, make sure these libraries are properly installed in your environment. -------------------------------------------------------------------------------- /docs/static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mainframecomputer/orchestra/bc547d991d9e2dd12d048fcae9d9c27f2bed584e/docs/static/favicon.ico -------------------------------------------------------------------------------- /docs/static/orchestra-loops.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mainframecomputer/orchestra/bc547d991d9e2dd12d048fcae9d9c27f2bed584e/docs/static/orchestra-loops.png -------------------------------------------------------------------------------- /docs/static/orchestrator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mainframecomputer/orchestra/bc547d991d9e2dd12d048fcae9d9c27f2bed584e/docs/static/orchestrator.png -------------------------------------------------------------------------------- /examples/python/browseruse_chat.py: -------------------------------------------------------------------------------- 1 | from mainframe_orchestra import Task, Agent, OpenaiModels 2 | from browser_use import Agent as BrowserAgent 3 | from langchain_openai import ChatOpenAI 4 | import asyncio 5 | 6 | # This example shows how to use the browser-use agent as a callable tool for an orchestra agent. 7 | # It's experimental and depends on the browser-use package at https://github.com/browser-use/browser-use 8 | # You can install it with `pip install browser-use` 9 | 10 | # This Browser tool is a simple wrapper around the browser-use Agent, and will kick off the browser-use agent as a delegate. 11 | class BrowserTools: 12 | @staticmethod 13 | async def browse_web(instruction: str) -> str: 14 | """Use browser-use to perform web browsing tasks 15 | 16 | Args: 17 | instruction (str): Web browsing task to execute, written in natural language 18 | 19 | Returns: 20 | str: Result of the executed browsing task 21 | """ 22 | browser_agent = BrowserAgent( 23 | task=f"Browse the web and find information about {instruction}. Close cookies modals and other popups before using the page.", 24 | llm=ChatOpenAI(model="gpt-4o-mini"), 25 | ) 26 | result = await browser_agent.run() 27 | return result 28 | 29 | # Define an orchestra agent that can call on the browser-agent as a tool to browse the web and find information 30 | web_research_agent = Agent( 31 | agent_id="web_research_agent", 32 | role="Web Research Agent", 33 | goal="Use your web research tools to assist with the given task", 34 | attributes="You have expertise in web research and can use your tools to assist with the given task", 35 | llm=OpenaiModels.gpt_4o_mini, 36 | tools=[BrowserTools.browse_web] 37 | ) 38 | 39 | # Define a chat task with conversation history and user input 40 | async def chat_task(conversation_history, userinput): 41 | task_result = await Task.create( 42 | agent=web_research_agent, 43 | messages=conversation_history, 44 | instruction=userinput 45 | ) 46 | return task_result 47 | 48 | # Main loop to run the chat task 49 | async def main(): 50 | conversation_history = [] 51 | while True: 52 | userinput = input("You: ") 53 | conversation_history.append({"role": "user", "content": userinput}) 54 | response = await chat_task(conversation_history, userinput) 55 | conversation_history.append({"role": "assistant", "content": response}) 56 | print(f"**Browser Agent**: {response}") 57 | 58 | # Run the main loop 59 | if __name__ == "__main__": 60 | asyncio.run(main()) -------------------------------------------------------------------------------- /examples/python/fallback_example.py: -------------------------------------------------------------------------------- 1 | from mainframe_orchestra import Task, Agent, OpenaiModels, AnthropicModels, set_verbosity 2 | set_verbosity(1) 3 | 4 | # This is a simple example of how to use a fallback model for an agent. 5 | # It demonstrates how you can specify a list of models to try in order, and the agent will be able to fall back to the next model if the priors fail. 6 | 7 | chat_agent = Agent( 8 | agent_id="chat_agent", 9 | role="Chat Agent", 10 | goal="Chat with the user", 11 | llm=[OpenaiModels.custom_model("fakemodel"), AnthropicModels.custom_model("otherbrokenmodel"), OpenaiModels.gpt_4o_mini] 12 | ) 13 | 14 | def chat_task(conversation_history, userinput): 15 | return Task.create( 16 | agent=chat_agent, 17 | messages=conversation_history, 18 | instruction=userinput 19 | ) 20 | 21 | def main(): 22 | conversation_history = [] 23 | while True: 24 | userinput = input("You: ") 25 | conversation_history.append({"role": "user", "content": userinput}) 26 | response = chat_task(conversation_history, userinput) 27 | conversation_history.append({"role": "assistant", "content": response}) 28 | print(f"**Chat Agent**: {response}") 29 | 30 | if __name__ == "__main__": 31 | main() -------------------------------------------------------------------------------- /examples/python/finance_chat.py: -------------------------------------------------------------------------------- 1 | from mainframe_orchestra import Task, Agent, OpenaiModels, YahooFinanceTools, set_verbosity 2 | # To view detailed prompt logs, set verbosity to 1. For less verbose logs, set verbosity to 0. 3 | set_verbosity(0) 4 | 5 | # This example shows how to assign a series of tools to an agent and use it in a chat loop. 6 | # It uses the YahooFinanceTools to get financial data and recommendations. 7 | # Note: This example is not meant to be a real-world example, but rather a simple demonstration of how to use tools with an agent. 8 | # YahooFinanceTools can return a large amount of data, so it's recommended to test with an inexpensive model like gpt-4o-mini. 9 | 10 | 11 | # Define Agents 12 | market_analyst = Agent( 13 | agent_id="market_analyst", 14 | role="Market Analyst", 15 | goal="Analyze market trends and provide insights", 16 | attributes="data-driven, analytical, up-to-date with market news", 17 | llm=OpenaiModels.gpt_4o_mini, 18 | tools={YahooFinanceTools.download_multiple_tickers, YahooFinanceTools.get_financials, YahooFinanceTools.get_recommendations, YahooFinanceTools.get_ticker_info} 19 | ) 20 | 21 | def chat_task(conversation_history, userinput): 22 | return Task.create( 23 | agent=market_analyst, 24 | messages=conversation_history, 25 | instruction=userinput 26 | ) 27 | 28 | def main(): 29 | conversation_history = [] 30 | while True: 31 | userinput = input("You: ") 32 | conversation_history.append({"role": "user", "content": userinput}) 33 | response = chat_task(conversation_history, userinput) 34 | conversation_history.append({"role": "assistant", "content": response}) 35 | print(f"**Market Analyst**: {response}") 36 | 37 | if __name__ == "__main__": 38 | main() 39 | -------------------------------------------------------------------------------- /examples/python/finance_flow.py: -------------------------------------------------------------------------------- 1 | from mainframe_orchestra import Task, Agent, AnthropicModels, WebTools, YahooFinanceTools, set_verbosity 2 | from rich.console import Console 3 | from rich.markdown import Markdown 4 | console = Console() 5 | # To view detailed prompt logs, set verbosity to 1. For less verbose logs, set verbosity to 0. 6 | set_verbosity(0) 7 | 8 | # Define the team of agents in the workflow 9 | market_analyst = Agent( 10 | agent_id="market_analyst", 11 | role="Market Microstructure Analyst", 12 | goal="Analyze market microstructure and identify trading opportunities", 13 | attributes="You have expertise in market microstructure, order flow analysis, and high-frequency data.", 14 | llm=AnthropicModels.haiku_3_5, 15 | tools={YahooFinanceTools.calculate_returns, YahooFinanceTools.get_historical_data} 16 | ) 17 | 18 | fundamental_analyst = Agent( 19 | agent_id="fundamental_analyst", 20 | role="Fundamental Analyst", 21 | goal="Analyze company financials and assess intrinsic value", 22 | attributes="You have expertise in financial statement analysis, valuation models, and industry analysis.", 23 | llm=AnthropicModels.haiku_3_5, 24 | tools={YahooFinanceTools.get_financials, YahooFinanceTools.get_ticker_info} 25 | ) 26 | 27 | technical_analyst = Agent( 28 | agent_id="technical_analyst", 29 | role="Technical Analyst", 30 | goal="Analyze price charts and identify trading patterns", 31 | attributes="You have expertise in technical analysis, chart patterns, and technical indicators.", 32 | llm=AnthropicModels.haiku_3_5, 33 | tools={YahooFinanceTools.get_historical_data} 34 | ) 35 | 36 | sentiment_analyst = Agent( 37 | agent_id="sentiment_analyst", 38 | role="Sentiment Analyst", 39 | goal="Analyze market sentiment, analyst recommendations and news trends", 40 | attributes="You have expertise in market sentiment analysis.", 41 | llm=AnthropicModels.haiku_3_5, 42 | tools={YahooFinanceTools.get_recommendations, WebTools.serper_search} 43 | ) 44 | 45 | # Define the series of tasks in the workflow-pipeline 46 | def analyze_market_task(ticker: str): 47 | market_report = Task.create( 48 | agent=market_analyst, 49 | instruction=f"Analyze the market microstructure for ticker {ticker} and identify trading opportunities and write a comprehensive report in markdown" 50 | ) 51 | return market_report 52 | 53 | def analyze_sentiment_task(ticker: str): 54 | sentiment_report = Task.create( 55 | agent=sentiment_analyst, 56 | instruction=f"Analyze the market sentiment, analyst recommendations and news for ticker {ticker} and write a comprehensive report in markdown" 57 | ) 58 | return sentiment_report 59 | 60 | def analyze_technical_task(ticker: str): 61 | technical_report = Task.create( 62 | agent=technical_analyst, 63 | instruction=f"Analyze the price charts and identify trading patterns for {ticker} and write a comprehensive report in markdown" 64 | ) 65 | return technical_report 66 | 67 | def analyze_fundamentals_task(ticker: str): 68 | fundamentals_report = Task.create( 69 | agent=fundamental_analyst, 70 | instruction=f"Analyze the company financials and assess intrinsic value for {ticker} and write a comprehensive report in markdown" 71 | ) 72 | return fundamentals_report 73 | 74 | def search_news_task(ticker: str): 75 | news_report = Task.create( 76 | agent=sentiment_analyst, 77 | instruction=f"Search news on '{ticker}', report the news summaries and insights and implications on the stock and market sentiment in the last month and write a comprehensive report in markdown" 78 | ) 79 | return news_report 80 | 81 | def final_report_task(market_report: str, sentiment_report: str, technical_report: str, fundamentals_report: str, news_report: str): 82 | final_report = Task.create( 83 | agent=market_analyst, 84 | context=f"Context:\n\nMarket Report:\n{market_report}\n--------\nSentiment Report:\n{sentiment_report}\n--------\nTechnical Report:\n{technical_report}\n--------\nFundamentals Report:\n{fundamentals_report}\n--------\nNews Report:\n{news_report}", 85 | instruction="Combine all the reports and create a final report in markdown" 86 | ) 87 | return final_report 88 | 89 | # Get user input 90 | userinput = input("Enter ticker: \n") 91 | 92 | # Run the series of tasks 93 | analysis = analyze_market_task(userinput) 94 | sentiment = analyze_sentiment_task(userinput) 95 | technical = analyze_technical_task(userinput) 96 | fundamentals = analyze_fundamentals_task(userinput) 97 | news = search_news_task(userinput) 98 | final = final_report_task(analysis, sentiment, technical, fundamentals, news) 99 | 100 | # Print final report to console 101 | console.print(Markdown(f"**Final Report**: {final}")) 102 | 103 | # Save final report to file 104 | with open("final_report.md", "w") as file: 105 | file.write(final) 106 | -------------------------------------------------------------------------------- /examples/python/finance_team.py: -------------------------------------------------------------------------------- 1 | from mainframe_orchestra import Task, Agent, OpenaiModels, WebTools, YahooFinanceTools, Conduct, set_verbosity 2 | 3 | # To view detailed prompt logs, set verbosity to 1. For less verbose logs, set verbosity to 0. 4 | set_verbosity(0) 5 | 6 | # This example shows how to define a team of agents and assign them to a conductor agent. 7 | # The conductor agent will orchestrate the agents to perform a task. 8 | # The conductor agent exists in a tak loop, and will be able to orchestrate the agents to perform any necessary tasks. 9 | 10 | # Define the team of agents in the workflow 11 | market_analyst = Agent( 12 | agent_id="market_analyst", 13 | role="Market Microstructure Analyst", 14 | goal="Analyze market microstructure and identify trading opportunities", 15 | attributes="You have expertise in market microstructure, order flow analysis, and high-frequency data.", 16 | llm=OpenaiModels.gpt_4o, 17 | tools={YahooFinanceTools.calculate_returns, YahooFinanceTools.get_historical_data} 18 | ) 19 | 20 | fundamental_analyst = Agent( 21 | agent_id="fundamental_analyst", 22 | role="Fundamental Analyst", 23 | goal="Analyze company financials and assess intrinsic value", 24 | attributes="You have expertise in financial statement analysis, valuation models, and industry analysis.", 25 | llm=OpenaiModels.gpt_4o, 26 | tools={YahooFinanceTools.get_financials, YahooFinanceTools.get_ticker_info} 27 | ) 28 | 29 | technical_analyst = Agent( 30 | agent_id="technical_analyst", 31 | role="Technical Analyst", 32 | goal="Analyze price charts and identify trading patterns", 33 | attributes="You have expertise in technical analysis, chart patterns, and technical indicators.", 34 | llm=OpenaiModels.gpt_4o, 35 | tools={YahooFinanceTools.get_historical_data} 36 | ) 37 | 38 | sentiment_analyst = Agent( 39 | agent_id="sentiment_analyst", 40 | role="Sentiment Analyst", 41 | goal="Analyze market sentiment, analyst recommendations and news trends", 42 | attributes="You have expertise in market sentiment analysis.", 43 | llm=OpenaiModels.gpt_4o, 44 | tools={YahooFinanceTools.get_recommendations, WebTools.serper_search} 45 | ) 46 | 47 | conductor_agent = Agent( 48 | agent_id="conductor_agent", 49 | role="Conductor", 50 | goal="Conduct the orchestra", 51 | attributes="You have expertise in orchestrating the orchestra.", 52 | llm=OpenaiModels.gpt_4o, 53 | tools=[Conduct.conduct_tool(market_analyst, fundamental_analyst, technical_analyst, sentiment_analyst)] 54 | ) 55 | 56 | def chat_task(conversation_history, userinput): 57 | return Task.create( 58 | agent=conductor_agent, 59 | messages=conversation_history, 60 | instruction=userinput 61 | ) 62 | 63 | def main(): 64 | conversation_history = [] 65 | while True: 66 | userinput = input("You: ") 67 | conversation_history.append({"role": "user", "content": userinput}) 68 | response = chat_task(conversation_history, userinput) 69 | conversation_history.append({"role": "assistant", "content": response}) 70 | print(f"**Market Analyst**: {response}") 71 | 72 | if __name__ == "__main__": 73 | main() -------------------------------------------------------------------------------- /examples/python/grapher_flow.py: -------------------------------------------------------------------------------- 1 | from mainframe_orchestra import Task, Agent, OpenaiModels, YahooFinanceTools, MatplotlibTools, set_verbosity 2 | set_verbosity(1) 3 | 4 | # This example shows how to create stock charts using a team of orchestra agents. 5 | 6 | # Required installs: 7 | # pip install mainframe-orchestra matplotlib yfinance 8 | 9 | # The script creates two agents: 10 | # 1. price_plotter: Creates a line chart of stock prices 11 | # 2. recommendation_plotter: Creates a bar chart of stock recommendations 12 | 13 | # How to use: 14 | # 1. Run the script 15 | # 2. Enter a stock ticker (like 'AAPL' for Apple) 16 | # 3. The agents will automatically: 17 | # - Fetch the stock data 18 | # - Create the charts 19 | # - Save them as image files 20 | 21 | # Define the team of agents in the workflow pipeline 22 | price_plotter = Agent( 23 | agent_id="price_plotter", 24 | role="Financial Price Analyst", 25 | goal="Analyze the price of a stock and use your tools tocreate a line plot of the price", 26 | llm=OpenaiModels.gpt_4o, 27 | tools=[YahooFinanceTools.get_historical_data, MatplotlibTools.create_line_plot] 28 | ) 29 | 30 | recommendation_plotter = Agent( 31 | agent_id="recommendation_plotter", 32 | role="Financial Recommendation Analyst", 33 | goal="Analyze the recommendations of a stock and use your tools to create a bar plot of the recommendations", 34 | llm=OpenaiModels.gpt_4o, 35 | tools=[YahooFinanceTools.get_recommendations, MatplotlibTools.create_bar_plot] 36 | ) 37 | 38 | def plot_price_chart(ticker: str): 39 | return Task.create( 40 | agent=price_plotter, 41 | instruction=f"Research the price of {ticker} over the last month, and create a line plot of the price. Ensure you use your tools to create and save the plot to file." 42 | ) 43 | 44 | def plot_recommendation_chart(ticker: str): 45 | return Task.create( 46 | agent=recommendation_plotter, 47 | instruction=f"Research the recommendations for {ticker} over the last month, and create a bar plot of the recommendations. Ensure you use your tools to create and save the plot to file." 48 | ) 49 | 50 | # get user input 51 | userinput = input("Enter ticker: ") 52 | 53 | # run tasks 54 | price_plot = plot_price_chart(userinput) 55 | print(price_plot) 56 | 57 | recommendation_plot = plot_recommendation_chart(userinput) 58 | print(recommendation_plot) 59 | 60 | -------------------------------------------------------------------------------- /examples/python/linear_chat.py: -------------------------------------------------------------------------------- 1 | from mainframe_orchestra import Task, Agent, OpenaiModels, LinearTools, set_verbosity 2 | set_verbosity(0) 3 | 4 | # This example shows how to create a chat loop with an agent that uses the LinearTools to assist with the given task. 5 | # NOTE: This team is capable of updating issue statuses in Linear. Use in a test team, or remove the update_issue_status tool if you don't want to edit issues in Linear. 6 | 7 | # Environment variables needed: 8 | # LINEAR_API_KEY=your_linear_api_key 9 | # LINEAR_TEAM_ID=your_linear_team_id 10 | 11 | # This script: 12 | # 1. Creates an agent with Linear tools 13 | # 2. Runs a chat loop where you can: 14 | # - View team issues 15 | # - Search issues 16 | # - Check workflow states 17 | # - Update issue status 18 | # 19 | # Usage: Just run the script and start chatting! 20 | 21 | # Initialize the toolkit 22 | LinearTools() 23 | 24 | # Create the agent 25 | linear_agent = Agent( 26 | agent_id="linear_agent", 27 | role="Linear Agent", 28 | goal="Use your linear tools to assist with the given task", 29 | tools=[LinearTools.get_team_issues, LinearTools.get_workflow_states, LinearTools.search_issues, LinearTools.update_issue_status], 30 | llm=OpenaiModels.gpt_4o 31 | ) 32 | 33 | # Define the task 34 | def task(user_input, conversation_history): 35 | return Task.create( 36 | agent=linear_agent, 37 | messages=conversation_history, 38 | instruction=f"Use your linear tools to assist with the given task: '{user_input}" 39 | ) 40 | 41 | # Run the agent-chat loop 42 | def main(): 43 | # Initialize the conversation history 44 | conversation_history = [] 45 | 46 | # Start the conversation loop 47 | while True: 48 | user_input = input("You: ") 49 | response = task(user_input, conversation_history) 50 | print(response) 51 | conversation_history.append({"role": "user", "content": user_input}) 52 | conversation_history.append({"role": "assistant", "content": response}) 53 | 54 | if __name__ == "__main__": 55 | main() 56 | -------------------------------------------------------------------------------- /examples/python/mcp/mcp_fast_calc.py: -------------------------------------------------------------------------------- 1 | from fastmcp import FastMCP 2 | 3 | # Simple FastMCP server 4 | mcp = FastMCP("Calculator") 5 | 6 | # Define tools 7 | @mcp.tool() 8 | def add(a: int, b: int) -> int: 9 | """Add two numbers 10 | 11 | Args: 12 | a: The first number to add 13 | b: The second number to add 14 | 15 | Returns: 16 | The sum of the two numbers 17 | """ 18 | return a + b 19 | 20 | @mcp.tool() 21 | def multiply(a: int, b: int) -> int: 22 | """Multiply two numbers 23 | 24 | Args: 25 | a: The first number to multiply 26 | b: The second number to multiply 27 | 28 | Returns: 29 | The product of the two numbers 30 | """ 31 | return a * b 32 | 33 | @mcp.resource("greeting://{name}") 34 | def get_greeting(name: str) -> str: 35 | """Get a personalized greeting 36 | 37 | Args: 38 | name: The name of the person to greet 39 | 40 | Returns: 41 | A personalized greeting 42 | """ 43 | return f"Hello, {name}!" 44 | 45 | if __name__ == "__main__": 46 | mcp.run() -------------------------------------------------------------------------------- /examples/python/mcp/mcp_test_fastmcp.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from mainframe_orchestra import Task, Agent, OpenaiModels, MCPOrchestra, set_verbosity 3 | 4 | set_verbosity(2) 5 | 6 | async def main(): 7 | # Create the MCPOrchestra client 8 | async with MCPOrchestra() as mcp_client: 9 | # Connect to the FastMCP server and start it 10 | print("Starting and connecting to FastMCP server...") 11 | await mcp_client.connect( 12 | server_name="calculator", 13 | command="python", 14 | args=["mcp_fast_calc.py"], # Replace with the path to your FastMCP server script 15 | start_server=True, 16 | server_startup_delay=2.0 17 | ) 18 | 19 | # Get all tools as callables 20 | calculator_tools = mcp_client.get_tools() 21 | 22 | # Define the agent 23 | agent = Agent( 24 | agent_id="calculator_agent", 25 | role="Math Assistant", 26 | goal="Help users perform mathematical calculations", 27 | tools=calculator_tools, 28 | llm=OpenaiModels.gpt_4o 29 | ) 30 | 31 | # Create a task 32 | result = await Task.create( 33 | agent=agent, 34 | instruction="You need to calculate the sum of 42 and 58, and then multiply the result by 5. Respond with the final answer." 35 | ) 36 | 37 | print(result) 38 | 39 | if __name__ == "__main__": 40 | asyncio.run(main()) -------------------------------------------------------------------------------- /examples/python/mcp/mcp_test_fetch.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from mainframe_orchestra import Task, Agent, OpenaiModels, MCPOrchestra, set_verbosity 3 | 4 | set_verbosity(2) 5 | 6 | async def run_fetch_mcp(): 7 | # Create and connect to MCP server using async context manager 8 | async with MCPOrchestra() as mcp_client: 9 | try: 10 | # Connect to the Fetch MCP server 11 | await mcp_client.connect( 12 | server_name="fetch", 13 | command="npx", 14 | args=["@tokenizin/mcp-npx-fetch"] # Replace with the path to the MCP server 15 | ) 16 | 17 | # Get all tools from the MCP server 18 | fetch_tools = mcp_client.get_tools() 19 | 20 | # Define the agent 21 | agent = Agent( 22 | agent_id="fetch_agent", 23 | role="Web Assistant", 24 | goal="Help users fetch and analyze web content", 25 | tools=fetch_tools, 26 | llm=OpenaiModels.gpt_4o 27 | ) 28 | 29 | # Create a task with these tools 30 | result = await Task.create( 31 | agent=agent, 32 | instruction="Fetch the content from docs.orchestra.org and summarize what the website is about.", 33 | ) 34 | 35 | print(result) 36 | 37 | except Exception as e: 38 | print(f"Error occurred: {e}") 39 | 40 | # Run the example 41 | if __name__ == "__main__": 42 | asyncio.run(run_fetch_mcp()) -------------------------------------------------------------------------------- /examples/python/mcp/mcp_test_filesystem.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | from mainframe_orchestra import Task, Agent, OpenaiModels, MCPOrchestra, set_verbosity 4 | 5 | set_verbosity(2) 6 | 7 | async def run_filesystem_mcp(): 8 | # Create and connect to MCP server using async context manager 9 | async with MCPOrchestra() as mcp_client: 10 | try: 11 | # Define directories to allow access to (customize these paths) 12 | allowed_directories = [ 13 | os.path.expanduser("~/Desktop"), # Allow access to Desktop, replace with your own paths 14 | ] 15 | 16 | await mcp_client.connect( 17 | server_name="filesystem", 18 | command="npx", 19 | args=["@modelcontextprotocol/server-filesystem"] + allowed_directories # Replace with the path to the MCP server 20 | ) 21 | 22 | # Get all tools from the MCP server 23 | filesystem_tools = mcp_client.get_tools() 24 | 25 | # Define the agent 26 | agent = Agent( 27 | agent_id="filesystem_agent", 28 | role="File System Assistant", 29 | goal="Help users manage files and directories", 30 | attributes="You know to use absolute paths when possible", 31 | tools=filesystem_tools, 32 | llm=OpenaiModels.gpt_4o 33 | ) 34 | 35 | # Create a task with these tools 36 | result = await Task.create( 37 | agent=agent, 38 | instruction="summarize the contents of my desktop", 39 | ) 40 | 41 | print(result) 42 | 43 | except Exception as e: 44 | print(f"Error: {e}") 45 | 46 | # Run the example 47 | if __name__ == "__main__": 48 | asyncio.run(run_filesystem_mcp()) 49 | -------------------------------------------------------------------------------- /examples/python/mcp/mcp_test_playwright.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from mainframe_orchestra import Task, Agent, OpenaiModels, MCPOrchestra, set_verbosity 3 | set_verbosity(2) 4 | 5 | async def run_agent_with_mcp_tools(): 6 | # Create and connect to MCP server using async context manager 7 | async with MCPOrchestra() as mcp_client: 8 | try: 9 | # Connect to the Playwright MCP server 10 | await mcp_client.connect( 11 | server_name="playwright", 12 | command="npx", 13 | args=["playwright-mcp-server"] 14 | ) 15 | 16 | # Get all tools from the MCP server 17 | mcp_tools = mcp_client.get_tools() 18 | 19 | # Define the agent 20 | agent = Agent( 21 | agent_id="playwright_agent", 22 | role="Web Automation Assistant", 23 | goal="Help users automate web tasks using Playwright", 24 | tools=mcp_tools, 25 | llm=OpenaiModels.gpt_4o 26 | ) 27 | 28 | # Create the task 29 | result = await Task.create( 30 | agent=agent, 31 | instruction=""" 32 | 1. Navigate to duckduckgo.com 33 | 2. Try to use the selector to find the search input 34 | 3. Use the identified selector to search for 'AI Agent news' 35 | 4. Click the search button or press Enter to submit the search 36 | 5. Find the first result and click it 37 | """ 38 | ) 39 | 40 | print(result) 41 | 42 | except Exception as e: 43 | print(f"Error occurred: {e}") 44 | 45 | # Run the example 46 | asyncio.run(run_agent_with_mcp_tools()) -------------------------------------------------------------------------------- /examples/python/mcp/mcp_test_slack.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import shutil 4 | from mainframe_orchestra import Task, Agent, OpenaiModels, MCPOrchestra, set_verbosity 5 | from dotenv import load_dotenv 6 | load_dotenv() 7 | 8 | set_verbosity(2) 9 | 10 | async def run_slack_mcp(): 11 | # Create and connect to MCP server using async context manager 12 | async with MCPOrchestra() as mcp_client: 13 | try: 14 | # Get Slack credentials from environment variables 15 | slack_bot_token = os.environ.get("SLACK_BOT_TOKEN") 16 | slack_team_id = os.environ.get("SLACK_TEAM_ID") 17 | 18 | if not slack_bot_token or not slack_team_id: 19 | print("ERROR: Missing Slack credentials. Set SLACK_BOT_TOKEN and SLACK_TEAM_ID environment variables.") 20 | return 21 | 22 | # Get the path to node 23 | node_path = shutil.which("node") 24 | if not node_path: 25 | print("ERROR: Node.js not found. Please install Node.js.") 26 | return 27 | 28 | # Path to the server module 29 | server_path = "node-tools/node_modules/@modelcontextprotocol/server-slack/dist/index.js" 30 | 31 | # Connect to the Slack MCP server 32 | await mcp_client.connect( 33 | server_name="slack", 34 | command=node_path, 35 | args=[server_path], 36 | env={ 37 | "SLACK_BOT_TOKEN": slack_bot_token, 38 | "SLACK_TEAM_ID": slack_team_id 39 | } 40 | ) 41 | 42 | # Get all tools from the MCP server 43 | slack_tools = mcp_client.get_server_tools("slack") 44 | 45 | # Create the agent 46 | slack_agent=Agent( 47 | agent_id="slack_agent", 48 | role="Slack Assistant", 49 | goal="Help users interact with Slack by using your tools", 50 | tools=slack_tools, 51 | llm=OpenaiModels.gpt_4o 52 | ) 53 | 54 | # Create a conversation history 55 | conversation_history = [] 56 | 57 | # Main conversation loop 58 | while True: 59 | # Get user input 60 | print("\nEnter your instruction (or 'quit' to exit):") 61 | user_instruction = input("You:") 62 | 63 | if user_instruction.lower() == 'quit': 64 | break 65 | 66 | # Create a task with these tools 67 | result = await Task.create( 68 | agent=slack_agent, 69 | instruction=user_instruction, 70 | messages=conversation_history 71 | ) 72 | 73 | # Add the interaction to conversation history 74 | conversation_history.append({"role": "user", "content": user_instruction}) 75 | conversation_history.append({"role": "assistant", "content": str(result)}) 76 | 77 | print("\nTask result:") 78 | print(result) 79 | 80 | except Exception as e: 81 | print(f"Error occurred: {e}") 82 | 83 | # Run the example 84 | if __name__ == "__main__": 85 | asyncio.run(run_slack_mcp()) -------------------------------------------------------------------------------- /examples/python/receipt_archiver.py: -------------------------------------------------------------------------------- 1 | from mainframe_orchestra import Task, Agent, Conduct, FileTools, OpenaiModels 2 | import os 3 | import requests 4 | 5 | # A receipt processing system that orchestrates agents to: 6 | # 1. Transcribe receipt images 7 | # 2. Save data to CSV files 8 | 9 | class ImageTools: 10 | @classmethod 11 | def transcribe_image(cls, image_url: str) -> dict: 12 | """ 13 | Transcribes structured text and data from an image URL. 14 | Appropriate for images containing structured data such as tables, charts, or other data visualizations. 15 | 16 | Args: 17 | image_url (str): The URL of the image containing text or structured data. 18 | 19 | Returns: 20 | dict: The response from OpenAI containing the transcribed structured data. 21 | """ 22 | try: 23 | # Prepare the payload for OpenAI API 24 | payload = { 25 | "model": "gpt-4o-mini", 26 | "messages": [ 27 | { 28 | "role": "user", 29 | "content": [ 30 | {"type": "text", "text": "Transcribe any text and structured data from this image, preserving the formatting and structure."}, 31 | {"type": "image_url", "image_url": {"url": image_url}} 32 | ] 33 | } 34 | ], 35 | "max_tokens": 1000 36 | } 37 | 38 | # Call OpenAI API 39 | response = requests.post( 40 | "https://api.openai.com/v1/chat/completions", 41 | headers={ 42 | "Authorization": f"Bearer {os.environ['OPENAI_API_KEY']}", 43 | "Content-Type": "application/json" 44 | }, 45 | json=payload 46 | ) 47 | 48 | # Parse the response 49 | result = response.json() 50 | 51 | return result 52 | 53 | except Exception as e: 54 | return {"error": f"Failed to transcribe image: {str(e)}"} 55 | 56 | 57 | transcription_agent = Agent( 58 | agent_id="transcription_agent", 59 | role="Transcription Agent", 60 | goal="Transcribe the image and return all of the data in the receipt.", 61 | tools=[ImageTools.transcribe_image], 62 | llm=OpenaiModels.gpt_4o_mini 63 | ) 64 | 65 | archivist_agent = Agent( 66 | agent_id="archivist_agent", 67 | role="Archivist Agent", 68 | goal="Archive the transcribed text to a file.", 69 | attributes="You know to name the file as the store name and date of the receipt if provided.", 70 | tools=[FileTools.write_csv], 71 | llm=OpenaiModels.gpt_4o 72 | ) 73 | 74 | coordinator_agent = Agent( 75 | agent_id="coordinator_agent", 76 | role="Coordinator Agent", 77 | goal="Coordinate the transcription and archiving agents.", 78 | tools=[Conduct.conduct_tool(transcription_agent, archivist_agent)], 79 | llm=OpenaiModels.gpt_4o 80 | ) 81 | 82 | def task(user_input): 83 | return Task.create( 84 | agent=coordinator_agent, 85 | instruction=f"Coordinate your team to assist with the given task: '{user_input}. Make sure they transcribe and save the data to a csv file in entirety." 86 | ) 87 | 88 | def main(): 89 | user_input = input("Enter the URL of the receipt image you want to transcribe and save to csv, e.g. image url, google drive image url, etc:\n") 90 | response = task(user_input) 91 | print(response) 92 | 93 | # example image: https://ocr.space/Content/Images/receipt-ocr-original.webp 94 | 95 | if __name__ == "__main__": 96 | main() -------------------------------------------------------------------------------- /examples/python/stripe_chat.py: -------------------------------------------------------------------------------- 1 | from mainframe_orchestra import Agent, Task, OpenaiModels, StripeTools 2 | 3 | # This example demonstrates how to create a chat loop with an agent that uses the StripeTools to assist with given tasks. 4 | 5 | # Required packages 6 | # pip install mainframe-orchestra stripe-agent-toolkit 7 | 8 | # Environment variables needed: 9 | # STRIPE_API_KEY=your_stripe_api_key 10 | 11 | # Define read-only tools 12 | stripe_read_tools = [ 13 | StripeTools.check_balance, 14 | StripeTools.list_customers, 15 | StripeTools.list_products, 16 | StripeTools.list_prices, 17 | ] 18 | 19 | # Define the agent 20 | stripe_agent = Agent( 21 | agent_id="stripe_agent", 22 | role="Stripe Agent", 23 | goal="Use your stripe tools to assist with the given task", 24 | tools=stripe_read_tools, 25 | llm=OpenaiModels.gpt_4o_mini 26 | ) 27 | 28 | # Define the task 29 | def task(user_input, conversation_history): 30 | return Task.create( 31 | agent=stripe_agent, 32 | instruction=f"Use your stripe tools to assist with the given task: '{user_input}", 33 | messages=conversation_history 34 | ) 35 | 36 | # Run the agent chat loop 37 | def main(): 38 | conversation_history = [] 39 | while True: 40 | user_input = input("You: ") 41 | response = task(user_input, conversation_history) 42 | print(response) 43 | conversation_history.append({"role": "user", "content": user_input}) 44 | conversation_history.append({"role": "assistant", "content": response}) 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "orchestra-monorepo", 3 | "private": true, 4 | "scripts": { 5 | "build": "turbo run build", 6 | "dev": "turbo run dev", 7 | "lint": "turbo run lint", 8 | "clean": "turbo run clean && rm -rf node_modules", 9 | "format": "prettier --write \"**/*.{ts,tsx,md}\"" 10 | }, 11 | "devDependencies": { 12 | "@changesets/cli": "^2.26.2", 13 | "prettier": "^3.1.0", 14 | "turbo": "^1.10.16" 15 | }, 16 | "packageManager": "pnpm@8.9.0" 17 | } 18 | -------------------------------------------------------------------------------- /packages/python/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "mainframe-orchestra" 3 | version = "0.0.35" 4 | description = "Mainframe-Orchestra is a lightweight, open-source agentic framework for building LLM based pipelines and self-orchestrating multi-agent teams" 5 | authors = [ 6 | "Mainframe Computer Inc. ", 7 | "Philippe Andre Page " 8 | ] 9 | readme = "README.md" 10 | packages = [{ include = "mainframe_orchestra", from = "src" }] 11 | license = "Apache 2.0" 12 | classifiers = [ 13 | "Development Status :: 4 - Beta", 14 | "Intended Audience :: Developers", 15 | "License :: OSI Approved :: Apache Software License", 16 | "Operating System :: OS Independent", 17 | "Programming Language :: Python :: 3", 18 | "Programming Language :: Python :: 3.10", 19 | "Programming Language :: Python :: 3.11", 20 | "Programming Language :: Python :: 3.12", 21 | ] 22 | 23 | [tool.poetry.dependencies] 24 | python = "^3.10" 25 | requests = "*" 26 | pydantic = ">=2.0" 27 | anthropic = "*" 28 | openai = ">=1.0" 29 | cohere = "*" 30 | beautifulsoup4 = "*" 31 | tqdm = "*" 32 | python-dotenv = "*" 33 | PyYAML = "*" 34 | ollama = "*" 35 | huggingface-hub = "*" 36 | lxml = "*" 37 | halo = "*" 38 | groq = "*" 39 | numpy = "*" 40 | faiss-cpu = "*" 41 | pinecone = "*" 42 | sentence_splitter = "*" 43 | igraph = "*" 44 | leidenalg = "*" 45 | fake-useragent = "*" 46 | google-generativeai = "*" 47 | tzdata = "^2024.2" 48 | braintrust = "*" 49 | mcp = "*" 50 | fastmcp = "*" 51 | 52 | [tool.poetry.extras] 53 | langchain_tools = [ 54 | "langchain-core", 55 | "langchain-community", 56 | "langchain-openai" 57 | ] 58 | matplotlib_tools = [ 59 | "matplotlib" 60 | ] 61 | yahoo_finance_tools = [ 62 | "yfinance", 63 | "yahoofinance", 64 | "pandas" 65 | ] 66 | fred_tools = [ 67 | "fredapi", 68 | "pandas" 69 | ] 70 | audio_tools = [ 71 | "pygame", 72 | "elevenlabs" 73 | ] 74 | stripe_tools = [ 75 | "stripe-agent-toolkit" 76 | ] 77 | 78 | [tool.poetry.dependencies.yfinance] 79 | version = "*" 80 | optional = true 81 | 82 | [tool.poetry.dependencies.yahoofinance] 83 | version = "*" 84 | optional = true 85 | 86 | [tool.poetry.dependencies.langchain-core] 87 | version = "*" 88 | optional = true 89 | 90 | [tool.poetry.dependencies.langchain-community] 91 | version = "*" 92 | optional = true 93 | 94 | [tool.poetry.dependencies.langchain-openai] 95 | version = "*" 96 | optional = true 97 | 98 | [tool.poetry.dependencies.matplotlib] 99 | version = "*" 100 | optional = true 101 | 102 | [tool.poetry.dependencies.stripe-agent-toolkit] 103 | version = "*" 104 | optional = true 105 | 106 | [tool.poetry.dependencies.elevenlabs] 107 | version = "*" 108 | optional = true 109 | 110 | [tool.poetry.group.dev.dependencies] 111 | pytest = ">=8.0.0,<9.0.0" 112 | black = ">=23.0,<25.0" 113 | isort = ">=5.0,<6.0" 114 | mypy = ">=1.0,<2.0" 115 | ruff = "^0.8.4" 116 | 117 | [build-system] 118 | requires = ["poetry-core"] 119 | build-backend = "poetry.core.masonry.api" 120 | 121 | [tool.poetry.urls] 122 | "Homepage" = "https://github.com/mainframecomputer/orchestra/" 123 | "Bug Tracker" = "https://github.com/mainframecomputer/orchestra/issues" 124 | "Documentation" = "https://orchestra.org" 125 | 126 | [tool.black] 127 | line-length = 100 128 | target-version = ['py310'] 129 | 130 | [tool.isort] 131 | profile = "black" 132 | line_length = 100 133 | 134 | [tool.mypy] 135 | python_version = "3.10" 136 | strict = true 137 | ignore_missing_imports = true 138 | 139 | [tool.pytest.ini_options] 140 | minversion = "6.0" 141 | addopts = "-ra -q" 142 | testpaths = [ 143 | "tests", 144 | ] 145 | 146 | [tool.ruff] 147 | line-length = 100 148 | -------------------------------------------------------------------------------- /packages/python/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | if __name__ == "__main__": 4 | setup() 5 | -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mainframe Orchestra: a Python framework for building and orchestrating multi-agent systems powered by LLMs. 3 | """ 4 | # Copyright 2025 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 5 | 6 | __version__ = "0.0.35" 7 | 8 | import importlib 9 | 10 | # Conditional imports for optional dependencies 11 | import sys 12 | from typing import TYPE_CHECKING 13 | 14 | from .agent import Agent 15 | from .config import Config 16 | from .llm import ( 17 | AnthropicModels, 18 | DeepseekModels, 19 | GeminiModels, 20 | GroqModels, 21 | HuggingFaceModels, 22 | OllamaModels, 23 | OpenaiModels, 24 | OpenrouterModels, 25 | TogetheraiModels, 26 | set_verbosity, 27 | ) 28 | from .orchestration import Compose, Conduct, TaskInstruction 29 | from .task import Task 30 | from .tools import ( 31 | AmadeusTools, 32 | CalculatorTools, 33 | EmbeddingsTools, 34 | FAISSTools, 35 | FileTools, 36 | GitHubTools, 37 | LinearTools, 38 | PineconeTools, 39 | SemanticSplitter, 40 | SentenceSplitter, 41 | WebTools, 42 | WhisperTools, 43 | WikipediaTools, 44 | ) 45 | from .adapters import MCPOrchestra 46 | 47 | if TYPE_CHECKING: 48 | from .tools.audio_tools import TextToSpeechTools, WhisperTools 49 | from .tools.fred_tools import FredTools 50 | from .tools.langchain_tools import LangchainTools 51 | from .tools.matplotlib_tools import MatplotlibTools 52 | from .tools.stripe_tools import StripeTools 53 | from .tools.yahoo_finance_tools import YahooFinanceTools 54 | 55 | 56 | def __getattr__(name): 57 | package_map = { 58 | "LangchainTools": ( 59 | "langchain_tools", 60 | ["langchain-core", "langchain-community", "langchain-openai"], 61 | ), 62 | "MatplotlibTools": ("matplotlib_tools", ["matplotlib"]), 63 | "YahooFinanceTools": ("yahoo_finance_tools", ["yfinance", "yahoofinance"]), 64 | "FredTools": ("fred_tools", ["fredapi"]), 65 | "StripeTools": ("stripe_tools", ["stripe", "stripe_agent_toolkit"]), 66 | "TextToSpeechTools": ("audio_tools", ["elevenlabs", "pygame"]), 67 | } 68 | 69 | if name in package_map: 70 | module_name, required_packages = package_map[name] 71 | try: 72 | for package in required_packages: 73 | importlib.import_module(package) 74 | 75 | # If successful, import and return the tool 76 | module = __import__(f"mainframe_orchestra.tools.{module_name}", fromlist=[name]) 77 | return getattr(module, name) 78 | except ImportError as e: 79 | missing_packages = " ".join(required_packages) 80 | print( 81 | f"\033[95mError: The required packages ({missing_packages}) are not installed. " 82 | f"Please install them using 'pip install {missing_packages}'.\n" 83 | f"Specific error: {str(e)}\033[0m" 84 | ) 85 | sys.exit(1) 86 | else: 87 | raise AttributeError(f"Module '{__name__}' has no attribute '{name}'") 88 | 89 | 90 | __all__ = [ 91 | # Core Classes 92 | "Task", 93 | "Agent", 94 | "Conduct", 95 | "Compose", 96 | "TaskInstruction", 97 | 98 | # Configuration and Utilities 99 | "Config", 100 | "config", 101 | "Utils", 102 | "set_verbosity", 103 | # LLM Provider Models 104 | "OpenaiModels", 105 | "AnthropicModels", 106 | "OpenrouterModels", 107 | "OllamaModels", 108 | "GroqModels", 109 | "TogetheraiModels", 110 | "GeminiModels", 111 | "DeepseekModels", 112 | "HuggingFaceModels", 113 | 114 | # List core tools 115 | "FileTools", 116 | "EmbeddingsTools", 117 | "WebTools", 118 | "GitHubTools", 119 | "WikipediaTools", 120 | "AmadeusTools", 121 | "CalculatorTools", 122 | "FAISSTools", 123 | "PineconeTools", 124 | "LinearTools", 125 | "SemanticSplitter", 126 | "SentenceSplitter", 127 | "WhisperTools", 128 | 129 | # Optional tools 130 | "LangchainTools", 131 | "MatplotlibTools", 132 | "YahooFinanceTools", 133 | "TextToSpeechTools", 134 | "FredTools", 135 | "StripeTools", 136 | 137 | # Adapters 138 | "MCPOrchestra", 139 | ] 140 | -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/adapters/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Adapters for integrating external systems with Mainframe Orchestra. 3 | """ 4 | 5 | from .mcp_adapter import MCPOrchestra 6 | 7 | __all__ = ["MCPOrchestra"] -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/agent.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 2 | 3 | from pydantic import BaseModel, Field 4 | from typing import Optional, Callable, Union, Set, List 5 | 6 | 7 | class Agent(BaseModel): 8 | agent_id: str = Field(..., description="The unique identifier for the agent") 9 | role: str = Field(..., description="The role or type of agent performing tasks") 10 | goal: str = Field(..., description="The objective or purpose of the agent") 11 | attributes: Optional[str] = Field( 12 | None, description="Additional attributes or characteristics of the agent" 13 | ) 14 | llm: Optional[Union[Callable, List[Callable]]] = Field( 15 | None, 16 | description="The language model function(s) to be used by the agent. Can be a single function or a list of functions", 17 | ) 18 | tools: Optional[Set[Callable]] = Field( 19 | default=None, 20 | description="Optional set of tool functions. Can be a single function or a set of functions", 21 | ) 22 | temperature: Optional[float] = Field( 23 | default=0.7, description="The temperature for the language model. Default is 0.7" 24 | ) 25 | max_tokens: Optional[int] = Field( 26 | default=4000, 27 | description="The maximum number of tokens for the language model. Default is 4000", 28 | ) 29 | model_config = {"arbitrary_types_allowed": True} 30 | -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/config.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 2 | 3 | import os 4 | from abc import ABC, abstractmethod 5 | from typing import Optional 6 | from zoneinfo import ZoneInfo 7 | 8 | 9 | class Config(ABC): 10 | """Base configuration class that can be extended by applications.""" 11 | 12 | DEFAULT_TIMEZONE = ZoneInfo("America/New_York") 13 | 14 | # LLM Provider API Keys 15 | OPENAI_API_KEY: Optional[str] = None 16 | OPENAI_BASE_URL: Optional[str] = None 17 | ANTHROPIC_API_KEY: Optional[str] = None 18 | GROQ_API_KEY: Optional[str] = None 19 | OPENROUTER_API_KEY: Optional[str] = None 20 | TOGETHERAI_API_KEY: Optional[str] = None 21 | GEMINI_API_KEY: Optional[str] = None 22 | DEEPSEEK_API_KEY: Optional[str] = None 23 | HF_TOKEN: Optional[str] = None 24 | 25 | @classmethod 26 | def validate_api_key(cls, key_name: str) -> str: 27 | """Validate and return a specific API key when it's needed.""" 28 | key_value = getattr(cls, key_name, None) or os.getenv(key_name) 29 | if not key_value: 30 | raise ValueError(f"{key_name} environment variable is not set") 31 | return key_value 32 | 33 | @classmethod 34 | @abstractmethod 35 | def validate_required_env_vars(cls) -> None: 36 | """Validate required environment variables for the application.""" 37 | raise NotImplementedError("Subclasses must implement validate_required_env_vars") 38 | 39 | 40 | # Simple environment-based config for direct use 41 | class EnvConfig(Config): 42 | """Environment-based configuration that pulls from env vars.""" 43 | 44 | def __init__(self): 45 | # Initialize LLM Provider API Keys from environment variables 46 | self.OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 47 | self.OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL") 48 | self.ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY") 49 | self.GROQ_API_KEY = os.getenv("GROQ_API_KEY") 50 | self.OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") 51 | self.TOGETHERAI_API_KEY = os.getenv("TOGETHERAI_API_KEY") 52 | self.GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") 53 | self.DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY") 54 | self.HF_TOKEN = os.getenv("HF_TOKEN") 55 | 56 | @classmethod 57 | def validate_required_env_vars(cls) -> None: 58 | pass # No validation required for basic usage 59 | 60 | 61 | config = EnvConfig() 62 | -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/tools/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 2 | 3 | # Core tools 4 | from .audio_tools import WhisperTools 5 | from .amadeus_tools import AmadeusTools 6 | from .calculator_tools import CalculatorTools 7 | from .embedding_tools import EmbeddingsTools 8 | from .file_tools import FileTools 9 | from .github_tools import GitHubTools 10 | from .faiss_tools import FAISSTools 11 | from .linear_tools import LinearTools 12 | from .pinecone_tools import PineconeTools 13 | from .web_tools import WebTools 14 | from .wikipedia_tools import WikipediaTools 15 | from .text_splitters import SemanticSplitter, SentenceSplitter 16 | 17 | __all__ = [ 18 | 'AmadeusTools', 19 | 'CalculatorTools', 20 | 'EmbeddingsTools', 21 | 'FAISSTools', 22 | 'FileTools', 23 | 'GitHubTools', 24 | 'LinearTools', 25 | 'PineconeTools', 26 | 'WebTools', 27 | 'WikipediaTools', 28 | 'SemanticSplitter', 29 | 'SentenceSplitter', 30 | 'WhisperTools', 31 | ] 32 | 33 | # Helper function for optional imports 34 | def _optional_import(tool_name, install_name): 35 | class OptionalTool: 36 | def __init__(self, *args, **kwargs): 37 | raise ImportError( 38 | f"The tool '{tool_name}' requires additional dependencies. " 39 | f"Please install them using: 'pip install {install_name}'" 40 | ) 41 | return OptionalTool 42 | 43 | # Conditional imports or placeholders 44 | try: 45 | from .langchain_tools import LangchainTools 46 | __all__.append('LangchainTools') 47 | except ImportError: 48 | LangchainTools = _optional_import('LangchainTools', 'langchain_tools') 49 | 50 | try: 51 | from .matplotlib_tools import MatplotlibTools 52 | __all__.append('MatplotlibTools') 53 | except ImportError: 54 | MatplotlibTools = _optional_import('MatplotlibTools', 'matplotlib_tools') 55 | 56 | try: 57 | from .yahoo_finance_tools import YahooFinanceTools 58 | __all__.append('YahooFinanceTools') 59 | except ImportError: 60 | YahooFinanceTools = _optional_import('YahooFinanceTools', 'yfinance yahoofinance') 61 | 62 | try: 63 | from .fred_tools import FredTools 64 | __all__.append('FredTools') 65 | except ImportError: 66 | FredTools = _optional_import('FredTools', 'fred_tools') 67 | 68 | try: 69 | from .stripe_tools import StripeTools 70 | __all__.append('StripeTools') 71 | except ImportError: 72 | StripeTools = _optional_import('StripeTools', 'stripe stripe_agent_toolkit') 73 | 74 | try: 75 | from .audio_tools import TextToSpeechTools 76 | __all__.append('TextToSpeechTools') 77 | except ImportError: 78 | TextToSpeechTools = _optional_import('TextToSpeechTools', 'elevenlabs pygame') -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/tools/calculator_tools.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 2 | 3 | from datetime import timedelta, datetime 4 | from ..utils.braintrust_utils import traced 5 | 6 | class CalculatorTools: 7 | @traced(type="tool") 8 | @staticmethod 9 | def basic_math(operation: str, args: list) -> float: 10 | """ 11 | Perform basic and advanced math operations on multiple numbers. 12 | 13 | Args: 14 | operation (str): One of 'add', 'subtract', 'multiply', 'divide', 'exponent', 'root', 'modulo', or 'factorial'. 15 | args (list): List of numbers to perform the operation on. 16 | 17 | Returns: 18 | float: Result of the operation. 19 | 20 | Raises: 21 | ValueError: If an invalid operation is provided, if dividing by zero, if fewer than required numbers are provided, or for invalid inputs. 22 | 23 | Note: 24 | This method does not take in letters or words. It only takes in numbers. 25 | """ 26 | if len(args) < 1: 27 | raise ValueError("At least one number is required for the operation.") 28 | 29 | # Convert all args to float, except for factorial which requires int 30 | if operation != 'factorial': 31 | args = [float(arg) for arg in args] 32 | 33 | result = args[0] 34 | 35 | if operation in ['add', 'subtract', 'multiply', 'divide']: 36 | if len(args) < 2: 37 | raise ValueError("At least two numbers are required for this operation.") 38 | 39 | if operation == 'add': 40 | for num in args[1:]: 41 | result += num 42 | elif operation == 'subtract': 43 | for num in args[1:]: 44 | result -= num 45 | elif operation == 'multiply': 46 | for num in args[1:]: 47 | result *= num 48 | elif operation == 'divide': 49 | for num in args[1:]: 50 | if num == 0: 51 | raise ValueError("Cannot divide by zero") 52 | result /= num 53 | elif operation == 'exponent': 54 | if len(args) != 2: 55 | raise ValueError("Exponent operation requires exactly two numbers.") 56 | result = args[0] ** args[1] 57 | elif operation == 'root': 58 | if len(args) != 2: 59 | raise ValueError("Root operation requires exactly two numbers.") 60 | if args[1] == 0: 61 | raise ValueError("Cannot calculate 0th root") 62 | result = args[0] ** (1 / args[1]) 63 | elif operation == 'modulo': 64 | if len(args) != 2: 65 | raise ValueError("Modulo operation requires exactly two numbers.") 66 | if args[1] == 0: 67 | raise ValueError("Cannot perform modulo with zero") 68 | result = args[0] % args[1] 69 | elif operation == 'factorial': 70 | if len(args) != 1 or args[0] < 0 or not isinstance(args[0], int): 71 | raise ValueError("Factorial operation requires exactly one non-negative integer.") 72 | result = 1 73 | for i in range(1, args[0] + 1): 74 | result *= i 75 | else: 76 | raise ValueError("Invalid operation. Choose 'add', 'subtract', 'multiply', 'divide', 'exponent', 'root', 'modulo', or 'factorial'.") 77 | 78 | # Convert the result to a string before returning 79 | return str(result) 80 | 81 | @traced(type="tool") 82 | @staticmethod 83 | def get_current_time() -> str: 84 | """ 85 | Get the current UTC time. 86 | 87 | Returns: 88 | str: The current UTC time in the format 'YYYY-MM-DD HH:MM:SS'. 89 | """ 90 | return datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") 91 | 92 | @traced(type="tool") 93 | @staticmethod 94 | def add_days(date_str: str, days: int) -> str: 95 | """ 96 | Add a number of days to a given date. 97 | 98 | Args: 99 | date_str (str): The starting date in 'YYYY-MM-DD' format. 100 | days (int): The number of days to add (can be negative). 101 | 102 | Returns: 103 | str: The resulting date in 'YYYY-MM-DD' format. 104 | """ 105 | date = datetime.strptime(date_str, "%Y-%m-%d") 106 | new_date = date + timedelta(days=days) 107 | return new_date.strftime("%Y-%m-%d") 108 | 109 | @traced(type="tool") 110 | @staticmethod 111 | def days_between(date1_str: str, date2_str: str) -> int: 112 | """ 113 | Calculate the number of days between two dates. 114 | 115 | Args: 116 | date1_str (str): The first date in 'YYYY-MM-DD' format. 117 | date2_str (str): The second date in 'YYYY-MM-DD' format. 118 | 119 | Returns: 120 | int: The number of days between the two dates. 121 | """ 122 | date1 = datetime.strptime(date1_str, "%Y-%m-%d") 123 | date2 = datetime.strptime(date2_str, "%Y-%m-%d") 124 | return abs((date2 - date1).days) 125 | 126 | @traced(type="tool") 127 | @staticmethod 128 | def format_date(date_str: str, input_format: str, output_format: str) -> str: 129 | """ 130 | Convert a date string from one format to another. 131 | 132 | Args: 133 | date_str (str): The date string to format. 134 | input_format (str): The current format of the date string. 135 | output_format (str): The desired output format. 136 | 137 | Returns: 138 | str: The formatted date string. 139 | 140 | Example: 141 | format_date("2023-05-15", "%Y-%m-%d", "%B %d, %Y") -> "May 15, 2023" 142 | """ 143 | date_obj = datetime.strptime(date_str, input_format) 144 | return date_obj.strftime(output_format) 145 | -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/tools/faiss_tools.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 2 | 3 | import os 4 | import json 5 | import numpy as np 6 | from typing import Tuple, Any 7 | from ..utils.braintrust_utils import traced 8 | 9 | class FAISSTools: 10 | def __init__(self, dimension: int, metric: str = "IP"): 11 | """ 12 | Initialize FAISSTools with the specified dimension and metric. 13 | 14 | Args: 15 | dimension (int): Dimension of the vectors to be stored in the index. 16 | metric (str, optional): Distance metric to use. Defaults to "IP" (Inner Product). 17 | """ 18 | # Load the faiss library 19 | try: 20 | import faiss 21 | except ModuleNotFoundError: 22 | raise ImportError("faiss is required for FAISSTools. Install with `pip install faiss-cpu` or `pip install faiss-gpu`") 23 | 24 | self.dimension = dimension 25 | self.metric = metric 26 | self.index = None 27 | self.embedding_model = None 28 | self.embedding_provider = None 29 | self.metadata = {} # Added to store metadata 30 | 31 | self.faiss = faiss 32 | 33 | @traced(type="tool") 34 | def create_index(self, index_type: str = "Flat") -> None: 35 | """ 36 | Create a new FAISS index. 37 | 38 | Args: 39 | index_type (str, optional): Type of index to create. Defaults to "Flat". 40 | 41 | Raises: 42 | ValueError: If an unsupported index type is specified. 43 | """ 44 | if index_type == "Flat": 45 | if self.metric == "IP": 46 | self.index = self.faiss.IndexFlatIP(self.dimension) 47 | elif self.metric == "L2": 48 | self.index = self.faiss.IndexFlatL2(self.dimension) 49 | else: 50 | raise ValueError(f"Unsupported metric: {self.metric}") 51 | else: 52 | raise ValueError(f"Unsupported index type: {index_type}") 53 | 54 | @traced(type="tool") 55 | def load_index(self, index_path: str) -> None: 56 | """ 57 | Load a FAISS index and metadata from files. 58 | 59 | Args: 60 | index_path (str): Path to the index file. 61 | 62 | Raises: 63 | FileNotFoundError: If the index file or metadata file is not found. 64 | """ 65 | if not os.path.exists(index_path): 66 | raise FileNotFoundError(f"Index file not found: {index_path}") 67 | self.index = self.faiss.read_index(index_path) 68 | 69 | metadata_path = f"{index_path}.metadata" 70 | if not os.path.exists(metadata_path): 71 | raise FileNotFoundError(f"Metadata file not found: {metadata_path}") 72 | with open(metadata_path, 'r') as f: 73 | self.metadata = json.load(f) 74 | 75 | self.dimension = self.index.d 76 | self.embedding_model = self.metadata.get('embedding_model') 77 | 78 | @traced(type="tool") 79 | def save_index(self, index_path: str) -> None: 80 | """ 81 | Save the FAISS index and metadata to files. 82 | 83 | Args: 84 | index_path (str): Path to save the index file. 85 | """ 86 | self.faiss.write_index(self.index, index_path) 87 | metadata_path = f"{index_path}.metadata" 88 | with open(metadata_path, 'w') as f: 89 | json.dump(self.metadata, f) 90 | 91 | @traced(type="tool") 92 | def add_vectors(self, vectors: np.ndarray) -> None: 93 | """ 94 | Add vectors to the FAISS index. 95 | 96 | Args: 97 | vectors (np.ndarray): Array of vectors to add. 98 | 99 | Raises: 100 | ValueError: If the vector dimension does not match the index dimension. 101 | """ 102 | if vectors.shape[1] != self.dimension: 103 | raise ValueError(f"Vector dimension {vectors.shape[1]} does not match index dimension {self.dimension}") 104 | 105 | if self.metric == "IP": 106 | # Normalize vectors for Inner Product similarity 107 | vectors = np.apply_along_axis(self.normalize_vector, 1, vectors) 108 | 109 | self.index.add(vectors) 110 | 111 | @traced(type="tool") 112 | def search_vectors(self, query_vectors: np.ndarray, top_k: int = 10) -> Tuple[np.ndarray, np.ndarray]: 113 | """ 114 | Search for similar vectors in the FAISS index. 115 | 116 | Args: 117 | query_vectors (np.ndarray): Array of query vectors. 118 | top_k (int, optional): Number of results to return for each query vector. Defaults to 10. 119 | 120 | Returns: 121 | Tuple[np.ndarray, np.ndarray]: A tuple containing the distances and indices of the top-k results. 122 | 123 | Raises: 124 | ValueError: If the query vector dimension does not match the index dimension. 125 | """ 126 | if query_vectors.shape[1] != self.dimension: 127 | raise ValueError(f"Query vector dimension {query_vectors.shape[1]} does not match index dimension {self.dimension}") 128 | 129 | if self.metric == "IP": 130 | # Normalize query vectors for Inner Product similarity 131 | query_vectors = np.apply_along_axis(self.normalize_vector, 1, query_vectors) 132 | 133 | distances, indices = self.index.search(query_vectors, top_k) 134 | return distances, indices 135 | 136 | @traced(type="tool") 137 | def remove_vectors(self, ids: np.ndarray) -> None: 138 | """ 139 | Remove vectors from the FAISS index by their IDs. 140 | 141 | Args: 142 | ids (np.ndarray): Array of vector IDs to remove. 143 | """ 144 | self.index.remove_ids(ids) 145 | 146 | def get_vector_count(self) -> int: 147 | """ 148 | Get the number of vectors in the FAISS index. 149 | 150 | Returns: 151 | int: Number of vectors in the index. 152 | """ 153 | return self.index.ntotal 154 | 155 | @traced(type="tool") 156 | @staticmethod 157 | def normalize_vector(vector: np.ndarray) -> np.ndarray: 158 | """ 159 | Normalize a vector to unit length. 160 | 161 | Args: 162 | vector (np.ndarray): The input vector. 163 | 164 | Returns: 165 | np.ndarray: The normalized vector. 166 | """ 167 | norm = np.linalg.norm(vector) 168 | return vector / norm if norm != 0 else vector 169 | 170 | def set_metadata(self, key: str, value: Any) -> None: 171 | """ 172 | Set metadata for the index. 173 | 174 | Args: 175 | key (str): Metadata key. 176 | value (Any): Metadata value. 177 | """ 178 | self.metadata[key] = value 179 | 180 | def get_metadata(self, key: str) -> Any: 181 | """ 182 | Get metadata from the index. 183 | 184 | Args: 185 | key (str): Metadata key. 186 | 187 | Returns: 188 | Any: Metadata value. 189 | """ 190 | return self.metadata.get(key) 191 | 192 | def set_embedding_info(self, provider: str, model: str) -> None: 193 | """ 194 | Set the embedding provider and model information. 195 | 196 | Args: 197 | provider (str): The embedding provider (e.g., "openai"). 198 | model (str): The embedding model name. 199 | """ 200 | self.embedding_provider = provider 201 | self.embedding_model = model 202 | self.set_metadata('embedding_provider', provider) 203 | self.set_metadata('embedding_model', model) -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/tools/fred_tools.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 2 | 3 | import os 4 | from typing import Dict, Any, List 5 | from ..utils.braintrust_utils import traced 6 | 7 | def check_pandas(): 8 | try: 9 | import pandas as pd 10 | return pd 11 | except ImportError: 12 | raise ImportError("pandas is required for FredTools. Install with `pip install pandas`") 13 | 14 | def check_fredapi(): 15 | try: 16 | from fredapi import Fred 17 | return Fred 18 | except ImportError: 19 | raise ImportError("fredapi is required for FredTools. Install with `pip install fredapi`") 20 | 21 | 22 | class FredTools: 23 | @traced(type="tool") 24 | @staticmethod 25 | def economic_indicator_analysis(indicator_ids: List[str], start_date: str, end_date: str) -> Dict[str, Any]: 26 | """ 27 | Perform a comprehensive analysis of economic indicators. 28 | 29 | Args: 30 | indicator_ids (List[str]): List of economic indicator series IDs. 31 | start_date (str): Start date for the analysis (YYYY-MM-DD). 32 | end_date (str): End date for the analysis (YYYY-MM-DD). 33 | 34 | Returns: 35 | Dict[str, Any]: A dictionary containing the analysis results for each indicator. 36 | """ 37 | Fred = check_fredapi() 38 | fred = Fred(api_key=os.getenv('FRED_API_KEY')) 39 | 40 | results = {} 41 | 42 | for indicator_id in indicator_ids: 43 | series = fred.get_series(indicator_id, observation_start=start_date, observation_end=end_date) 44 | series = series.dropna() 45 | 46 | if len(series) > 0: 47 | pct_change = series.pct_change() 48 | annual_change = series.resample('YE').last().pct_change() 49 | 50 | results[indicator_id] = { 51 | "indicator": indicator_id, 52 | "title": fred.get_series_info(indicator_id).title, 53 | "start_date": start_date, 54 | "end_date": end_date, 55 | "min_value": series.min(), 56 | "max_value": series.max(), 57 | "mean_value": series.mean(), 58 | "std_dev": series.std(), 59 | "pct_change_mean": pct_change.mean(), 60 | "pct_change_std": pct_change.std(), 61 | "annual_change_mean": annual_change.mean(), 62 | "annual_change_std": annual_change.std(), 63 | "last_value": series.iloc[-1], 64 | "last_pct_change": pct_change.iloc[-1], 65 | "last_annual_change": annual_change.iloc[-1] 66 | } 67 | else: 68 | results[indicator_id] = None 69 | 70 | return results 71 | 72 | @traced(type="tool") 73 | @staticmethod 74 | def yield_curve_analysis(treasury_maturities: List[str], start_date: str, end_date: str) -> Dict[str, Any]: 75 | """ 76 | Perform an analysis of the US Treasury yield curve. 77 | 78 | Args: 79 | treasury_maturities (List[str]): List of Treasury maturity series IDs. 80 | start_date (str): Start date for the analysis (YYYY-MM-DD). 81 | end_date (str): End date for the analysis (YYYY-MM-DD). 82 | 83 | Returns: 84 | Dict[str, Any]: A dictionary containing the yield curve analysis results. 85 | """ 86 | pd = check_pandas() 87 | Fred = check_fredapi() 88 | fred = Fred(api_key=os.getenv('FRED_API_KEY')) 89 | 90 | yield_data = {} 91 | 92 | for maturity in treasury_maturities: 93 | series = fred.get_series(maturity, observation_start=start_date, observation_end=end_date) 94 | yield_data[maturity] = series 95 | 96 | yield_df = pd.DataFrame(yield_data) 97 | yield_df = yield_df.dropna() 98 | 99 | if len(yield_df) > 0: 100 | yield_curve_slopes = {} 101 | for i in range(len(treasury_maturities) - 1): 102 | short_maturity = treasury_maturities[i] 103 | long_maturity = treasury_maturities[i + 1] 104 | slope = yield_df[long_maturity] - yield_df[short_maturity] 105 | yield_curve_slopes[f"{short_maturity}_to_{long_maturity}"] = slope 106 | 107 | yield_curve_slopes_df = pd.DataFrame(yield_curve_slopes) 108 | 109 | results = { 110 | "start_date": start_date, 111 | "end_date": end_date, 112 | "yield_data": yield_df, 113 | "yield_curve_slopes": yield_curve_slopes_df, 114 | "inverted_yield_curve": yield_curve_slopes_df.min().min() < 0 115 | } 116 | else: 117 | results = None 118 | 119 | return results 120 | 121 | @traced(type="tool") 122 | @staticmethod 123 | def economic_news_sentiment_analysis(news_series_id: str, start_date: str, end_date: str) -> Dict[str, Any]: 124 | """ 125 | Perform sentiment analysis on economic news series. 126 | 127 | Args: 128 | news_series_id (str): Economic news series ID. 129 | start_date (str): Start date for the analysis (YYYY-MM-DD). 130 | end_date (str): End date for the analysis (YYYY-MM-DD). 131 | 132 | Returns: 133 | Dict[str, Any]: A dictionary containing the sentiment analysis results. 134 | """ 135 | Fred = check_fredapi() 136 | fred = Fred(api_key=os.getenv('FRED_API_KEY')) 137 | 138 | series = fred.get_series(news_series_id, observation_start=start_date, observation_end=end_date) 139 | series = series.dropna() 140 | 141 | if len(series) > 0: 142 | sentiment_scores = series.apply(lambda x: 1 if x > 0 else (-1 if x < 0 else 0)) 143 | sentiment_counts = sentiment_scores.value_counts() 144 | 145 | results = { 146 | "series_id": news_series_id, 147 | "title": fred.get_series_info(news_series_id).title, 148 | "start_date": start_date, 149 | "end_date": end_date, 150 | "positive_sentiment_count": sentiment_counts.get(1, 0), 151 | "negative_sentiment_count": sentiment_counts.get(-1, 0), 152 | "neutral_sentiment_count": sentiment_counts.get(0, 0), 153 | "net_sentiment_score": sentiment_scores.sum() 154 | } 155 | else: 156 | results = None 157 | 158 | return results 159 | -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/tools/langchain_tools.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 2 | 3 | from typing import List 4 | from langchain_core.tools import module 5 | from ..utils.braintrust_utils import traced 6 | 7 | class LangchainTools: 8 | @staticmethod 9 | def _check_dependencies(): 10 | try: 11 | from langchain_core.tools import BaseTool 12 | from langchain_community.tools import _module_lookup 13 | except ImportError as e: 14 | raise ImportError( 15 | "Langchain dependencies are not installed. " 16 | "To use LangchainTools, install the required packages with " 17 | "'pip install langchain-core langchain-community'\n" 18 | f"Original error: {e}" 19 | ) 20 | 21 | @staticmethod 22 | def _wrap(langchain_tool): 23 | LangchainTools._check_dependencies() 24 | # Import optional dependencies inside the method 25 | from typing import Any, Callable, Type 26 | from pydantic.v1 import BaseModel 27 | import json 28 | from langchain_core.tools import BaseTool 29 | 30 | # Now proceed with the implementation 31 | def wrapped_tool(**kwargs: Any) -> str: 32 | tool_instance = langchain_tool() 33 | # Convert kwargs to a single string input 34 | tool_input = json.dumps(kwargs) 35 | return tool_instance.run(tool_input) 36 | 37 | tool_instance = langchain_tool() 38 | name = getattr(tool_instance, 'name', langchain_tool.__name__) 39 | description = getattr(tool_instance, 'description', "No description available") 40 | 41 | # Build the docstring dynamically 42 | doc_parts = [ 43 | f"- {name}:", 44 | f" Description: {description}", 45 | ] 46 | 47 | args_schema = getattr(langchain_tool, 'args_schema', None) or getattr(tool_instance, 'args_schema', None) 48 | if args_schema and issubclass(args_schema, BaseModel): 49 | doc_parts.append(" Arguments:") 50 | for field_name, field in args_schema.__fields__.items(): 51 | field_desc = field.field_info.description or "No description" 52 | doc_parts.append(f" - {field_name}: {field_desc}") 53 | 54 | wrapped_tool.__name__ = name 55 | wrapped_tool.__doc__ = "\n".join(doc_parts) 56 | return wrapped_tool 57 | @classmethod 58 | @traced(type="tool") 59 | def get_tool(cls, tool_name: str): 60 | cls._check_dependencies() 61 | from langchain_community.tools import _module_lookup 62 | import importlib 63 | 64 | if tool_name not in _module_lookup: 65 | raise ValueError(f"Unknown Langchain tool: {tool_name}") 66 | 67 | module_path = _module_lookup[tool_name] 68 | module = importlib.import_module(module_path) 69 | tool_class = getattr(module, tool_name) 70 | 71 | wrapped_tool = LangchainTools._wrap(tool_class) 72 | return wrapped_tool 73 | 74 | @classmethod 75 | def list_available_tools(cls) -> List[str]: 76 | """ 77 | List all available Langchain tools. 78 | 79 | Returns: 80 | List[str]: A list of names of all available Langchain tools. 81 | 82 | Raises: 83 | ImportError: If langchain-community is not installed. 84 | 85 | Example: 86 | >>> tools = LangchainTools.list_available_tools() 87 | >>> "WikipediaQueryRun" in tools 88 | True 89 | """ 90 | try: 91 | from langchain_community.tools import _module_lookup 92 | except ImportError: 93 | print("Error: langchain-community is not installed. Please install it using 'pip install langchain-community'.") 94 | return [] 95 | 96 | return list(_module_lookup.keys()) 97 | 98 | @classmethod 99 | @traced(type="tool") 100 | def get_tool_info(cls, tool_name: str) -> dict: 101 | """ 102 | Retrieve information about a specific Langchain tool. 103 | 104 | Args: 105 | tool_name (str): The name of the Langchain tool. 106 | 107 | Returns: 108 | dict: A dictionary containing the tool's name, description, and module path. 109 | 110 | Raises: 111 | ValueError: If an unknown tool name is provided. 112 | ImportError: If langchain-community is not installed. 113 | 114 | Example: 115 | >>> info = LangchainTools.get_tool_info("WikipediaQueryRun") 116 | >>> "name" in info and "description" in info and "module_path" in info 117 | True 118 | """ 119 | cls._check_dependencies() 120 | try: 121 | from langchain_community.tools import _module_lookup 122 | except ImportError: 123 | raise ImportError("langchain-community is not installed. Please install it using 'pip install langchain-community'.") 124 | 125 | if tool_name not in _module_lookup: 126 | raise ValueError(f"Unknown Langchain tool: {tool_name}") 127 | 128 | module_path = _module_lookup[tool_name] 129 | import importlib 130 | module = importlib.import_module(module_path) 131 | tool_class = getattr(module, tool_name) 132 | 133 | tool_instance = tool_class() 134 | name = getattr(tool_instance, 'name', tool_class.__name__) 135 | description = getattr(tool_instance, 'description', "No description available") 136 | 137 | return { 138 | "name": name, 139 | "description": description, 140 | "module_path": module_path 141 | } 142 | -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/tools/stripe_tools.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 2 | 3 | from typing import Optional 4 | from dotenv import load_dotenv 5 | import os 6 | from stripe_agent_toolkit.api import StripeAPI 7 | from ..utils.braintrust_utils import traced 8 | from stripe_agent_toolkit.configuration import Context 9 | 10 | class StripeTools: 11 | _instance = None 12 | 13 | def __init__(self): 14 | load_dotenv() 15 | self.api = StripeAPI( 16 | secret_key=os.getenv('STRIPE_API_KEY'), 17 | context=Context() 18 | ) 19 | 20 | @classmethod 21 | @traced(type="tool") 22 | def get_api(cls): 23 | if cls._instance is None: 24 | cls._instance = cls() 25 | return cls._instance.api 26 | 27 | @classmethod 28 | @traced(type="tool") 29 | def check_balance(cls) -> str: 30 | """ 31 | Retrieve the current balance of your Stripe account. 32 | 33 | Returns: 34 | str: JSON string containing available and pending balances in each currency 35 | """ 36 | return cls.get_api().run("retrieve_balance") 37 | 38 | @classmethod 39 | @traced(type="tool") 40 | def list_customers(cls, email: Optional[str] = None, limit: Optional[int] = None) -> str: 41 | """ 42 | List customers from your Stripe account with optional filtering. 43 | 44 | Args: 45 | email (str, optional): Filter customers by email address 46 | limit (int, optional): Maximum number of customers to return (1-100) 47 | 48 | Returns: 49 | str: JSON string containing list of customer objects 50 | """ 51 | return cls.get_api().run("list_customers", email=email, limit=limit) 52 | 53 | @traced(type="tool") 54 | @classmethod 55 | def list_products(cls, limit: Optional[int] = None) -> str: 56 | """ 57 | List products from your Stripe catalog. 58 | 59 | Args: 60 | limit (int, optional): Maximum number of products to return (1-100) 61 | 62 | Returns: 63 | str: JSON string containing list of product objects 64 | """ 65 | return cls.get_api().run("list_products", limit=limit) 66 | 67 | @traced(type="tool") 68 | @classmethod 69 | def create_customer(cls, name: str, email: Optional[str] = None) -> str: 70 | """ 71 | Create a new customer in Stripe. 72 | 73 | Args: 74 | name (str): The customer's full name 75 | email (str, optional): The customer's email address 76 | 77 | Returns: 78 | str: JSON string containing the created customer object 79 | """ 80 | return cls.get_api().run("create_customer", name=name, email=email) 81 | 82 | @traced(type="tool") 83 | @classmethod 84 | def create_product(cls, name: str, description: Optional[str] = None) -> str: 85 | """ 86 | Create a new product in your Stripe catalog. 87 | 88 | Args: 89 | name (str): Name of the product 90 | description (str, optional): Detailed description of the product 91 | 92 | Returns: 93 | str: JSON string containing the created product object 94 | """ 95 | return cls.get_api().run("create_product", name=name, description=description) 96 | 97 | @traced(type="tool") 98 | @classmethod 99 | def create_price(cls, product: str, currency: str, unit_amount: int) -> str: 100 | """ 101 | Create a new price for a product in Stripe. 102 | 103 | Args: 104 | product (str): The ID of the product 105 | currency (str): Three-letter currency code (e.g., 'usd') 106 | unit_amount (int): Price amount in cents/smallest currency unit 107 | 108 | Returns: 109 | str: JSON string containing the created price object 110 | """ 111 | return cls.get_api().run("create_price", product=product, currency=currency, unit_amount=unit_amount) 112 | 113 | @traced(type="tool") 114 | @classmethod 115 | def list_prices(cls, product: Optional[str] = None, limit: Optional[int] = None) -> str: 116 | """ 117 | List prices from your Stripe catalog. 118 | 119 | Args: 120 | product (str, optional): Filter prices by product ID 121 | limit (int, optional): Maximum number of prices to return (1-100) 122 | 123 | Returns: 124 | str: JSON string containing list of price objects 125 | """ 126 | return cls.get_api().run("list_prices", product=product, limit=limit) 127 | 128 | @traced(type="tool") 129 | @classmethod 130 | def create_payment_link(cls, price: str, quantity: int) -> str: 131 | """ 132 | Create a payment link for a specific price. 133 | 134 | Args: 135 | price (str): The ID of the price 136 | quantity (int): The quantity of the product 137 | 138 | Returns: 139 | str: JSON string containing the payment link object with URL 140 | """ 141 | return cls.get_api().run("create_payment_link", price=price, quantity=quantity) 142 | 143 | @traced(type="tool") 144 | @classmethod 145 | def create_invoice(cls, customer: str, days_until_due: int = 30) -> str: 146 | """ 147 | Create a new invoice for a customer. 148 | 149 | Args: 150 | customer (str): The ID of the customer 151 | days_until_due (int, optional): Number of days until invoice is due 152 | 153 | Returns: 154 | str: JSON string containing the created invoice object 155 | """ 156 | return cls.get_api().run("create_invoice", customer=customer, days_until_due=days_until_due) 157 | 158 | @traced(type="tool") 159 | @classmethod 160 | def create_invoice_item(cls, customer: str, price: str, invoice: str) -> str: 161 | """ 162 | Add an item to an invoice. 163 | 164 | Args: 165 | customer (str): The ID of the customer 166 | price (str): The ID of the price 167 | invoice (str): The ID of the invoice 168 | 169 | Returns: 170 | str: JSON string containing the created invoice item object 171 | """ 172 | return cls.get_api().run("create_invoice_item", customer=customer, price=price, invoice=invoice) 173 | 174 | @traced(type="tool") 175 | @classmethod 176 | def finalize_invoice(cls, invoice: str) -> str: 177 | """ 178 | Finalize an invoice for sending. 179 | 180 | Args: 181 | invoice (str): The ID of the invoice 182 | 183 | Returns: 184 | str: JSON string containing the finalized invoice object 185 | """ 186 | return cls.get_api().run("finalize_invoice", invoice=invoice) 187 | 188 | @traced(type="tool") 189 | @classmethod 190 | def create_refund(cls, payment_intent: str, amount: Optional[int] = None) -> str: 191 | """ 192 | Create a refund for a payment. 193 | 194 | Args: 195 | payment_intent (str): The ID of the payment intent to refund 196 | amount (int, optional): Amount to refund in cents. If not provided, refunds entire payment. 197 | 198 | Returns: 199 | str: JSON string containing the created refund object 200 | """ 201 | return cls.get_api().run("create_refund", payment_intent=payment_intent, amount=amount) 202 | -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/tools/text_splitters.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 2 | 3 | from typing import List, Union 4 | import numpy as np 5 | from dotenv import load_dotenv 6 | from .embedding_tools import EmbeddingsTools 7 | from ..utils.braintrust_utils import traced 8 | import igraph as ig 9 | import leidenalg as la 10 | from sentence_splitter import SentenceSplitter as ExternalSentenceSplitter 11 | load_dotenv() 12 | 13 | class SemanticSplitter: 14 | def __init__(self, embedding_provider: str = "openai", embedding_model: str = "text-embedding-3-small"): 15 | self.embedding_provider = embedding_provider 16 | self.embedding_model = embedding_model 17 | 18 | @traced(type="tool") 19 | @staticmethod 20 | def chunk_text(text: Union[str, List[str]], rearrange: bool = False, 21 | embedding_provider: str = "openai", embedding_model: str = "text-embedding-3-small") -> List[str]: 22 | splitter = SemanticSplitter(embedding_provider, embedding_model) 23 | 24 | if isinstance(text, str): 25 | return splitter._process_single_text(text, rearrange) 26 | elif isinstance(text, list): 27 | all_chunks = [] 28 | for doc in text: 29 | all_chunks.extend(splitter._process_single_text(doc, rearrange)) 30 | return all_chunks 31 | else: 32 | raise ValueError("Input must be either a string or a list of strings") 33 | 34 | @traced(type="tool") 35 | def _process_single_text(self, text: str, rearrange: bool) -> List[str]: 36 | segments = self._create_sentence_segments(text) 37 | embeddings = self._embed_segments(segments) 38 | communities = self._detect_communities(embeddings) 39 | chunks = self._create_chunks_from_communities(segments, communities, rearrange) 40 | 41 | print(f"Created {len(chunks)} non-empty chunks for this document") 42 | return chunks 43 | 44 | @traced(type="tool") 45 | def _create_sentence_segments(self, text: str) -> List[str]: 46 | sentences = SentenceSplitter.split_text_by_sentences(text) 47 | segments = [sentence.strip() for sentence in sentences] 48 | print(f"Created {len(segments)} segments") 49 | return segments 50 | 51 | @traced(type="tool") 52 | def _embed_segments(self, segments: List[str]) -> np.ndarray: 53 | embeddings, _ = EmbeddingsTools.get_embeddings(segments, self.embedding_provider, self.embedding_model) 54 | return np.array(embeddings) 55 | 56 | @traced(type="tool") 57 | def _detect_communities(self, embeddings: np.ndarray) -> List[int]: 58 | if embeddings.shape[0] < 2: 59 | return [0] 60 | 61 | G = self._create_similarity_graph(embeddings, similarity_threshold=0.55) 62 | 63 | partition = self._find_optimal_partition(G, resolution=0.35) 64 | 65 | communities = partition.membership 66 | 67 | num_communities = len(set(communities)) 68 | print(f"Communities: {num_communities}") 69 | 70 | return communities 71 | 72 | @traced(type="tool") 73 | def _create_chunks_from_communities(self, segments: List[str], communities: List[int], rearrange: bool) -> List[str]: 74 | if rearrange: 75 | community_groups = {} 76 | for segment, community in zip(segments, communities): 77 | if community not in community_groups: 78 | community_groups[community] = [] 79 | community_groups[community].append(segment) 80 | 81 | chunks = [' '.join(group).strip() for group in community_groups.values() if group] 82 | else: 83 | chunks = [] 84 | current_community = communities[0] 85 | current_chunk = [] 86 | 87 | for segment, community in zip(segments, communities): 88 | if community != current_community: 89 | chunks.append(' '.join(current_chunk).strip()) 90 | current_chunk = [] 91 | current_community = community 92 | current_chunk.append(segment) 93 | 94 | if current_chunk: 95 | chunks.append(' '.join(current_chunk).strip()) 96 | 97 | return [chunk for chunk in chunks if chunk] 98 | 99 | @traced(type="tool") 100 | def _identify_breakpoints(self, communities: List[int]) -> List[int]: 101 | return [i for i in range(1, len(communities)) if communities[i] != communities[i-1]] 102 | 103 | @traced(type="tool") 104 | def _create_similarity_graph(self, embeddings: np.ndarray, similarity_threshold: float) -> ig.Graph: 105 | similarities = np.dot(embeddings, embeddings.T) 106 | np.fill_diagonal(similarities, 0) 107 | similarities = np.maximum(similarities, 0) 108 | similarities = (similarities - np.min(similarities)) / (np.max(similarities) - np.min(similarities)) 109 | 110 | adjacency_matrix = (similarities >= similarity_threshold).astype(int) 111 | 112 | G = ig.Graph.Adjacency(adjacency_matrix.tolist()) 113 | G.es['weight'] = similarities[np.where(adjacency_matrix)] 114 | return G 115 | 116 | @traced(type="tool") 117 | def _find_optimal_partition(self, G: ig.Graph, resolution: float) -> la.VertexPartition: 118 | return la.find_partition( 119 | G, 120 | la.CPMVertexPartition, 121 | weights='weight', 122 | resolution_parameter=resolution 123 | ) 124 | 125 | @traced(type="tool") 126 | def _split_oversized_communities(self, membership: List[int], max_size: int) -> List[int]: 127 | community_sizes = {} 128 | for comm in membership: 129 | community_sizes[comm] = community_sizes.get(comm, 0) + 1 130 | 131 | new_membership = [] 132 | current_comm = max(membership) + 1 133 | for i, comm in enumerate(membership): 134 | if community_sizes[comm] > max_size: 135 | if i % max_size == 0: 136 | current_comm += 1 137 | new_membership.append(current_comm) 138 | else: 139 | new_membership.append(comm) 140 | 141 | return new_membership 142 | 143 | class SentenceSplitter: 144 | @traced(type="tool") 145 | @staticmethod 146 | def split_text_by_sentences(text: str, chunk_size: int = 5, overlap: int = 1, language: str = 'en') -> List[str]: 147 | """ 148 | Split the text into chunks of sentences with overlap. 149 | 150 | :param text: The input text to split. 151 | :param chunk_size: The number of sentences per chunk. 152 | :param overlap: The number of sentences to overlap between chunks. 153 | :param language: The language of the text (default: 'en'). 154 | :return: A list of text chunks. 155 | """ 156 | splitter = ExternalSentenceSplitter(language=language) 157 | sentences = splitter.split(text) 158 | chunks = [] 159 | 160 | for i in range(0, len(sentences), chunk_size - overlap): 161 | chunk = ' '.join(sentences[i:i + chunk_size]) 162 | chunks.append(chunk.strip()) 163 | 164 | print(f"Created {len(chunks)} chunks with {chunk_size} sentences each and {overlap} sentence overlap") 165 | return chunks 166 | -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/utils/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/utils/braintrust_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 2 | 3 | """ 4 | Utility module for handling optional Braintrust functionality. 5 | This module provides fallback decorators when Braintrust is not available. 6 | """ 7 | 8 | import os 9 | 10 | # Check if Braintrust integration is explicitly enabled or disabled via environment variable. 11 | # Default to enabled if API key is present and no explicit setting 12 | BRAINTRUST_API_KEY_EXISTS = os.environ.get("BRAINTRUST_API_KEY", "") != "" 13 | BRAINTRUST_ENABLED = os.environ.get("BRAINTRUST_ORCHESTRA_ENABLED", "").lower() in ("true", "1", "yes") or ( 14 | BRAINTRUST_API_KEY_EXISTS and os.environ.get("BRAINTRUST_ORCHESTRA_ENABLED", None) is None 15 | ) 16 | 17 | # Default implementation of no-op decorators 18 | def traced(func=None, **kwargs): 19 | """No-op decorator when Braintrust is not available""" 20 | if func is None: 21 | def decorator(f): 22 | return f 23 | return decorator 24 | return func 25 | 26 | def wrap_openai(func): 27 | """No-op decorator when Braintrust is not available""" 28 | return func 29 | 30 | # Try to import Braintrust if enabled 31 | if BRAINTRUST_ENABLED: 32 | try: 33 | from braintrust import traced, wrap_openai 34 | except ImportError: 35 | # Keep the no-op implementations defined above 36 | pass 37 | -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/utils/logging_config.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 2 | 3 | import os 4 | import logging 5 | 6 | # ANSI escape codes for colors 7 | class Colors: 8 | RESET = '\033[0m' 9 | BOLD = '\033[1m' 10 | DIM = '\033[2m' 11 | 12 | # Regular colors 13 | GRAY = '\033[38;5;240m' 14 | RED = '\033[31m' 15 | GREEN = '\033[32m' 16 | YELLOW = '\033[33m' 17 | BLUE = '\033[34m' 18 | MAGENTA = '\033[35m' 19 | CYAN = '\033[36m' 20 | WHITE = '\033[37m' 21 | 22 | # Bright colors 23 | BRIGHT_RED = '\033[91m' 24 | BRIGHT_GREEN = '\033[92m' 25 | BRIGHT_YELLOW = '\033[93m' 26 | BRIGHT_BLUE = '\033[94m' 27 | BRIGHT_MAGENTA = '\033[95m' 28 | BRIGHT_CYAN = '\033[96m' 29 | BRIGHT_WHITE = '\033[97m' 30 | 31 | 32 | class ColoredFormatter(logging.Formatter): 33 | """Custom formatter with colors""" 34 | 35 | FORMATS = { 36 | logging.DEBUG: Colors.GRAY + '%(asctime)s [%(levelname)s] Orchestra: %(message)s' + Colors.RESET, 37 | logging.INFO: Colors.GREEN + '%(asctime)s [%(levelname)s] Orchestra: %(message)s' + Colors.RESET, 38 | logging.WARNING: Colors.YELLOW + '%(asctime)s [%(levelname)s] Orchestra: %(message)s' + Colors.RESET, 39 | logging.ERROR: Colors.RED + '%(asctime)s [%(levelname)s] Orchestra: %(message)s' + Colors.RESET, 40 | logging.CRITICAL: Colors.BRIGHT_RED + Colors.BOLD + '%(asctime)s [%(levelname)s] Orchestra: %(message)s' + Colors.RESET 41 | } 42 | 43 | def format(self, record): 44 | log_fmt = self.FORMATS.get(record.levelno) 45 | formatter = logging.Formatter(log_fmt) 46 | return formatter.format(record) 47 | 48 | 49 | def configure_logger(): 50 | """Configure the main orchestra logger with colored output.""" 51 | logger = logging.getLogger("mainframe-orchestra") 52 | 53 | # Set log level from environment variable 54 | log_level = os.getenv("ORCHESTRA_LOG_LEVEL", "INFO").upper() 55 | logger.setLevel(getattr(logging, log_level, logging.INFO)) 56 | 57 | # Remove any existing handlers to avoid duplicates 58 | for handler in logger.handlers[:]: 59 | logger.removeHandler(handler) 60 | 61 | # Add console handler with colored formatter 62 | console_handler = logging.StreamHandler() 63 | console_handler.setFormatter(ColoredFormatter()) 64 | logger.addHandler(console_handler) 65 | 66 | # Setup file logging if ORCHESTRA_LOG_FILE is set 67 | log_file = os.getenv("ORCHESTRA_LOG_FILE") 68 | if log_file: 69 | file_handler = logging.FileHandler(log_file) 70 | file_handler.setFormatter(logging.Formatter( 71 | '%(asctime)s [%(levelname)s] %(name)s: %(message)s', 72 | datefmt='%Y-%m-%d %H:%M:%S' 73 | )) 74 | file_handler.setLevel(logging.DEBUG) # File logs capture everything 75 | logger.addHandler(file_handler) 76 | 77 | # Configure third-party loggers 78 | logging.getLogger("httpcore").setLevel(logging.WARNING) 79 | logging.getLogger("httpx").setLevel(logging.WARNING) 80 | logging.getLogger("openai").setLevel(logging.WARNING) 81 | logging.getLogger("asyncio").setLevel(logging.WARNING) 82 | logging.getLogger("anthropic").setLevel(logging.WARNING) 83 | logging.getLogger("groq").setLevel(logging.WARNING) 84 | logging.getLogger("groq._base_client").setLevel(logging.WARNING) 85 | 86 | return logger 87 | 88 | 89 | # Configure the logger when this module is imported 90 | logger = configure_logger() -------------------------------------------------------------------------------- /packages/python/src/mainframe_orchestra/utils/parse_json_response.py: -------------------------------------------------------------------------------- 1 | # Copyright 2025 Mainframe-Orchestra Contributors. Licensed under Apache License 2.0. 2 | 3 | def parse_json_response(response: str) -> dict: 4 | """ 5 | An improved JSON parser that better handles text before JSON content. 6 | """ 7 | import json 8 | import re 9 | 10 | # First attempt: Try to parse the entire response 11 | try: 12 | return json.loads(response) 13 | except json.JSONDecodeError: 14 | # Second attempt: Find JSON by looking for the first { and matching closing } 15 | try: 16 | start_idx = response.find('{') 17 | if start_idx != -1: 18 | # Count opening and closing braces to find the complete JSON object 19 | open_count = 0 20 | for i in range(start_idx, len(response)): 21 | if response[i] == '{': 22 | open_count += 1 23 | elif response[i] == '}': 24 | open_count -= 1 25 | if open_count == 0: 26 | json_str = response[start_idx:i+1] 27 | # Try to remove comments before parsing 28 | try: 29 | # Remove both single-line and multi-line comments 30 | comment_pattern = r"//.*?(?:\n|$)|/\*.*?\*/" 31 | cleaned_json = re.sub(comment_pattern, "", json_str, flags=re.DOTALL) 32 | result = json.loads(cleaned_json) 33 | if isinstance(result, dict): 34 | return result 35 | except json.JSONDecodeError: 36 | # If comment removal didn't work, try the original string 37 | result = json.loads(json_str) 38 | if isinstance(result, dict): 39 | return result 40 | break 41 | except (json.JSONDecodeError, IndexError): 42 | pass 43 | 44 | # Third attempt: Use a more robust regex pattern 45 | json_pattern = r"(\{(?:[^{}]|(?:\{(?:[^{}]|(?:\{[^{}]*\}))*\}))*\})" 46 | json_matches = re.finditer(json_pattern, response, re.DOTALL) 47 | 48 | for match in json_matches: 49 | try: 50 | json_str = match.group(1) 51 | # Try to remove comments before parsing 52 | try: 53 | # Remove both single-line and multi-line comments 54 | comment_pattern = r"//.*?(?:\n|$)|/\*.*?\*/" 55 | cleaned_json = re.sub(comment_pattern, "", json_str, flags=re.DOTALL) 56 | result = json.loads(cleaned_json) 57 | if isinstance(result, dict): 58 | return result 59 | except json.JSONDecodeError: 60 | # If comment removal didn't work, try the original string 61 | result = json.loads(json_str) 62 | if isinstance(result, dict): 63 | return result 64 | except json.JSONDecodeError: 65 | continue 66 | 67 | # If all else fails, raise an error 68 | raise ValueError(f"Could not parse JSON from response: {response[:100]}...") 69 | -------------------------------------------------------------------------------- /pnpm-workspace.yaml: -------------------------------------------------------------------------------- 1 | packages: 2 | - "apps/*" 3 | - "packages/*" 4 | - "docs" 5 | -------------------------------------------------------------------------------- /scalar.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://cdn.scalar.com/schema/scalar-config.json", 3 | "subdomain": "orchestra", 4 | "siteMeta": { 5 | "favicon": "https://utfs.io/f/lKo6VaP8kaqVN0l5gFv4RxZaLwSkvEXPYuo98sp0Gtm5DjTH", 6 | "title": "Orchestra", 7 | "description":"Cognitive Architecture", 8 | "ogImage": "https://utfs.io/f/lKo6VaP8kaqV56n5xQJhNRfrE6Qe1FKVbdY9iOas0IwUDu42" 9 | }, 10 | "guides": [ 11 | { 12 | "name": "Orchestra", 13 | "sidebar": [ 14 | { 15 | "path": "docs/src/home.md", 16 | "type": "page" 17 | }, 18 | { 19 | "path": "docs/src/tasks.md", 20 | "type": "page" 21 | }, 22 | { 23 | "path": "docs/src/agents.md", 24 | "type": "page" 25 | }, 26 | { 27 | "path": "docs/src/multi-agent-teams.md", 28 | "type": "page" 29 | }, 30 | { 31 | "path": "docs/src/agentic-tool-use.md", 32 | "type": "page" 33 | }, 34 | { 35 | "path": "docs/src/orchestration.md", 36 | "type": "page" 37 | }, 38 | { 39 | "path": "docs/src/observability.md", 40 | "type": "page" 41 | }, 42 | { 43 | "path": "docs/src/mcp-integration.md", 44 | "type": "page" 45 | }, 46 | { 47 | "name": "Tools", 48 | "type": "folder", 49 | "children": [ 50 | { 51 | "path": "docs/src/tools/amadeus_tools.md", 52 | "type": "page" 53 | }, 54 | { 55 | "path": "docs/src/tools/audio_tools.md", 56 | "type": "page" 57 | }, 58 | { 59 | "path": "docs/src/tools/calculator_tools.md", 60 | "type": "page" 61 | }, 62 | { 63 | "path": "docs/src/tools/conversation_tools.md", 64 | "type": "page" 65 | }, 66 | { 67 | "path": "docs/src/tools/embeddings_tools.md", 68 | "type": "page" 69 | }, 70 | { 71 | "path": "docs/src/tools/faiss_tools.md", 72 | "type": "page" 73 | }, 74 | { 75 | "path": "docs/src/tools/file_tools.md", 76 | "type": "page" 77 | }, 78 | { 79 | "path": "docs/src/tools/fred_tools.md", 80 | "type": "page" 81 | }, 82 | { 83 | "path": "docs/src/tools/github_tools.md", 84 | "type": "page" 85 | }, 86 | { 87 | "path": "docs/src/tools/langchain_tools.md", 88 | "type": "page" 89 | }, 90 | { 91 | "path": "docs/src/tools/pinecone_tools.md", 92 | "type": "page" 93 | }, 94 | { 95 | "path": "docs/src/tools/text_splitters.md", 96 | "type": "page" 97 | }, 98 | { 99 | "path": "docs/src/tools/web_tools.md", 100 | "type": "page" 101 | }, 102 | { 103 | "path": "docs/src/tools/wikipedia_tools.md", 104 | "type": "page" 105 | }, 106 | { 107 | "path": "docs/src/tools/yahoo_finance_tools.md", 108 | "type": "page" 109 | }, 110 | { 111 | "path": "docs/src/custom_tools.md", 112 | "type": "page" 113 | } 114 | ] 115 | }, 116 | { 117 | "path": "docs/src/faq.md", 118 | "type": "page" 119 | }, 120 | { 121 | "path": "docs/src/llms.md", 122 | "type": "page" 123 | } 124 | ] 125 | } 126 | ], 127 | "references": [], 128 | "publishOnMerge": true, 129 | "siteConfig": { 130 | "logo": { 131 | "darkMode": "https://utfs.io/f/lKo6VaP8kaqVJPtdA2EzQjuIZeG5LtSRDsEcBb970lrfKyqF", 132 | "lightMode": "https://utfs.io/f/lKo6VaP8kaqVN0l5gFv4RxZaLwSkvEXPYuo98sp0Gtm5DjTH" 133 | }, 134 | "footer": "" 135 | }, 136 | "customDomain": "docs.orchestra.org" 137 | } 138 | -------------------------------------------------------------------------------- /turbo.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://turbo.build/schema.json", 3 | "globalDependencies": ["**/.env.*local"], 4 | "pipeline": { 5 | "build": { 6 | "dependsOn": ["^build"], 7 | "outputs": ["dist/**", ".next/**", "!.next/cache/**"] 8 | }, 9 | "lint": {}, 10 | "dev": { 11 | "cache": false, 12 | "persistent": true 13 | }, 14 | "clean": { 15 | "cache": false 16 | } 17 | } 18 | } 19 | --------------------------------------------------------------------------------