├── .env.example ├── .gitbook └── assets │ ├── Agents.png │ ├── image (1).png │ ├── image (10).png │ ├── image (11).png │ ├── image (12).png │ ├── image (13).png │ ├── image (14).png │ ├── image (15).png │ ├── image (16).png │ ├── image (17).png │ ├── image (18).png │ ├── image (19).png │ ├── image (2).png │ ├── image (20).png │ ├── image (21).png │ ├── image (22).png │ ├── image (23).png │ ├── image (24).png │ ├── image (25).png │ ├── image (26).png │ ├── image (27).png │ ├── image (28).png │ ├── image (29).png │ ├── image (3).png │ ├── image (30).png │ ├── image (31).png │ ├── image (32).png │ ├── image (33).png │ ├── image (34).png │ ├── image (35).png │ ├── image (36).png │ ├── image (37).png │ ├── image (38).png │ ├── image (39).png │ ├── image (4).png │ ├── image (5).png │ ├── image (6).png │ ├── image (7).png │ ├── image (8).png │ └── image (9).png ├── .github └── workflows │ └── pr_review_check.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── assets └── openagi.png ├── benchmark.py ├── cookbook ├── custom_tool_integration.ipynb ├── file_reading_agent_xai.py ├── human_intervention.ipynb ├── manual_already_task_planned.py ├── research_paper_analysis (1).ipynb ├── single_agent_execution_groq.ipynb └── tavily_and_gemini_use.py ├── docs ├── .gitbook │ └── assets │ │ ├── 1.png │ │ ├── 2.png │ │ ├── 3.png │ │ ├── Agents.png │ │ ├── Screenshot 2024-08-22 at 15.51.33.png │ │ ├── image (1) (1).png │ │ ├── image (1).png │ │ ├── image (10).png │ │ ├── image (11).png │ │ ├── image (12).png │ │ ├── image (13).png │ │ ├── image (14).png │ │ ├── image (15).png │ │ ├── image (16).png │ │ ├── image (17).png │ │ ├── image (18).png │ │ ├── image (19).png │ │ ├── image (2) (1).png │ │ ├── image (2).png │ │ ├── image (20).png │ │ ├── image (21).png │ │ ├── image (22).png │ │ ├── image (23).png │ │ ├── image (24).png │ │ ├── image (25).png │ │ ├── image (26).png │ │ ├── image (27).png │ │ ├── image (28).png │ │ ├── image (29).png │ │ ├── image (3) (1).png │ │ ├── image (3).png │ │ ├── image (30).png │ │ ├── image (31).png │ │ ├── image (32).png │ │ ├── image (33).png │ │ ├── image (34).png │ │ ├── image (35).png │ │ ├── image (36).png │ │ ├── image (37).png │ │ ├── image (38).png │ │ ├── image (39).png │ │ ├── image (4).png │ │ ├── image (40).png │ │ ├── image (41).png │ │ ├── image (42).png │ │ ├── image (43).png │ │ ├── image (44).png │ │ ├── image (45).png │ │ ├── image (5).png │ │ ├── image (6).png │ │ ├── image (7).png │ │ ├── image (8).png │ │ ├── image (9).png │ │ └── image.png ├── README.md ├── SUMMARY.md ├── acknowledgment │ └── special-mentions.md ├── components │ ├── action │ │ ├── README.md │ │ └── tools.md │ ├── admin.md │ ├── aiagent │ │ ├── README.md │ │ └── agent-configuration.md │ ├── llm.md │ ├── memory.md │ ├── planner.md │ ├── vectorstore │ │ ├── README.md │ │ └── chromastorage.md │ └── workers.md ├── contact-us.md ├── getting-started │ ├── installation.md │ └── quickstart.md └── use-cases │ ├── blog-writing-agent.md │ ├── github-agent.md │ ├── jobsearch-agent.md │ ├── market-agent.md │ └── movie-recommender-agent.md ├── example ├── Curriculum_builder.ipynb ├── blog_post.py ├── customer_feedback.py ├── hotel_map_agent.py ├── itinerary_planner.py ├── job_post.py ├── job_search.py ├── market_research.py ├── marketing_campaign_for_product.py ├── movie_recommendation.ipynb ├── news_updates.py ├── science_lab_learning_agent.ipynb └── youtube_study_plan.py ├── poetry.lock ├── pyproject.toml ├── requirements.txt └── src ├── Readme.md └── openagi ├── __init__.py ├── actions ├── __init__.py ├── base.py ├── compressor.py ├── console.py ├── files.py ├── formatter.py ├── human_input.py ├── obs_rag.py ├── tools │ ├── __init__.py │ ├── arxiv_search.py │ ├── dalle_tool.py │ ├── ddg_search.py │ ├── document_loader.py │ ├── exasearch.py │ ├── github_search_tool.py │ ├── google_search_tool.py │ ├── luma_ai.py │ ├── pubmed_tool.py │ ├── reddit.py │ ├── searchapi_search.py │ ├── serp_search.py │ ├── serper_search.py │ ├── speech_tool.py │ ├── tavilyqasearch.py │ ├── unstructured_io.py │ ├── webloader.py │ ├── wikipedia_search.py │ ├── yahoo_finance.py │ └── youtubesearch.py └── utils.py ├── agent.py ├── cli.py ├── exception.py ├── llms ├── __init__.py ├── azure.py ├── base.py ├── cerebras.py ├── claude.py ├── cohere.py ├── gemini.py ├── groq.py ├── hf.py ├── mistral.py ├── ollama.py ├── openai.py ├── sambanova.py └── xai.py ├── memory ├── __init__.py ├── base.py ├── memory.py └── sessiondict.py ├── planner ├── LATS.py ├── __init__.py ├── base.py ├── reflexion.py └── task_decomposer.py ├── prompts ├── __init__.py ├── base.py ├── constants.py ├── execution.py ├── ltm.py ├── summarizer.py ├── task_clarification.py ├── task_creator.py └── worker_task_execution.py ├── storage ├── __init__.py ├── base.py └── chroma.py ├── tasks ├── __init__.py ├── lists.py └── task.py ├── utils ├── extraction.py ├── helper.py ├── llmTasks.py ├── tool_list.py └── yamlParse.py └── worker.py /.env.example: -------------------------------------------------------------------------------- 1 | # for azure: 2 | 3 | AZURE_BASE_URL = 4 | AZURE_DEPLOYMENT_NAME = 5 | AZURE_AZURE_DEPLOYMENT = 6 | AZURE_MODEL_NAME = 7 | AZURE_OPENAI_API_VERSION = 8 | 9 | # for openai 10 | 11 | OPENAI_API_KEY = 12 | -------------------------------------------------------------------------------- /.gitbook/assets/Agents.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/Agents.png -------------------------------------------------------------------------------- /.gitbook/assets/image (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (1).png -------------------------------------------------------------------------------- /.gitbook/assets/image (10).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (10).png -------------------------------------------------------------------------------- /.gitbook/assets/image (11).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (11).png -------------------------------------------------------------------------------- /.gitbook/assets/image (12).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (12).png -------------------------------------------------------------------------------- /.gitbook/assets/image (13).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (13).png -------------------------------------------------------------------------------- /.gitbook/assets/image (14).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (14).png -------------------------------------------------------------------------------- /.gitbook/assets/image (15).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (15).png -------------------------------------------------------------------------------- /.gitbook/assets/image (16).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (16).png -------------------------------------------------------------------------------- /.gitbook/assets/image (17).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (17).png -------------------------------------------------------------------------------- /.gitbook/assets/image (18).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (18).png -------------------------------------------------------------------------------- /.gitbook/assets/image (19).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (19).png -------------------------------------------------------------------------------- /.gitbook/assets/image (2).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (2).png -------------------------------------------------------------------------------- /.gitbook/assets/image (20).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (20).png -------------------------------------------------------------------------------- /.gitbook/assets/image (21).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (21).png -------------------------------------------------------------------------------- /.gitbook/assets/image (22).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (22).png -------------------------------------------------------------------------------- /.gitbook/assets/image (23).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (23).png -------------------------------------------------------------------------------- /.gitbook/assets/image (24).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (24).png -------------------------------------------------------------------------------- /.gitbook/assets/image (25).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (25).png -------------------------------------------------------------------------------- /.gitbook/assets/image (26).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (26).png -------------------------------------------------------------------------------- /.gitbook/assets/image (27).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (27).png -------------------------------------------------------------------------------- /.gitbook/assets/image (28).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (28).png -------------------------------------------------------------------------------- /.gitbook/assets/image (29).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (29).png -------------------------------------------------------------------------------- /.gitbook/assets/image (3).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (3).png -------------------------------------------------------------------------------- /.gitbook/assets/image (30).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (30).png -------------------------------------------------------------------------------- /.gitbook/assets/image (31).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (31).png -------------------------------------------------------------------------------- /.gitbook/assets/image (32).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (32).png -------------------------------------------------------------------------------- /.gitbook/assets/image (33).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (33).png -------------------------------------------------------------------------------- /.gitbook/assets/image (34).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (34).png -------------------------------------------------------------------------------- /.gitbook/assets/image (35).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (35).png -------------------------------------------------------------------------------- /.gitbook/assets/image (36).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (36).png -------------------------------------------------------------------------------- /.gitbook/assets/image (37).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (37).png -------------------------------------------------------------------------------- /.gitbook/assets/image (38).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (38).png -------------------------------------------------------------------------------- /.gitbook/assets/image (39).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (39).png -------------------------------------------------------------------------------- /.gitbook/assets/image (4).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (4).png -------------------------------------------------------------------------------- /.gitbook/assets/image (5).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (5).png -------------------------------------------------------------------------------- /.gitbook/assets/image (6).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (6).png -------------------------------------------------------------------------------- /.gitbook/assets/image (7).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (7).png -------------------------------------------------------------------------------- /.gitbook/assets/image (8).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (8).png -------------------------------------------------------------------------------- /.gitbook/assets/image (9).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/.gitbook/assets/image (9).png -------------------------------------------------------------------------------- /.github/workflows/pr_review_check.yml: -------------------------------------------------------------------------------- 1 | name: PR Review Check 2 | 3 | on: 4 | pull_request: 5 | types: [opened, synchronize, reopened, ready_for_review] 6 | 7 | jobs: 8 | check-review: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Check out code 12 | uses: actions/checkout@v3 13 | 14 | - name: Check for PR review 15 | uses: actions/github-script@v6 16 | with: 17 | github-token: ${{secrets.GITHUB_TOKEN}} 18 | script: | 19 | const { data: reviews } = await github.rest.pulls.listReviews({ 20 | owner: context.repo.owner, 21 | repo: context.repo.repo, 22 | pull_number: context.issue.number 23 | }); 24 | 25 | const approvedReviews = reviews.filter(review => review.state === 'APPROVED'); 26 | 27 | if (approvedReviews.length === 0) { 28 | core.setFailed('At least one approved review is required before merging.'); 29 | } else { 30 | console.log(`${approvedReviews.length} approved reviews found.`); 31 | } 32 | 33 | - name: Retry on failure 34 | if: failure() 35 | uses: actions/github-script@v6 36 | with: 37 | github-token: ${{secrets.GITHUB_TOKEN}} 38 | script: | 39 | console.log('Retrying check for approved reviews...'); 40 | const { data: reviews } = await github.rest.pulls.listReviews({ 41 | owner: context.repo.owner, 42 | repo: context.repo.repo, 43 | pull_number: context.issue.number 44 | }); 45 | 46 | const approvedReviews = reviews.filter(review => review.state === 'APPROVED'); 47 | 48 | if (approvedReviews.length === 0) { 49 | core.setFailed('PR cannot be merged as no approved reviews are found.'); 50 | } else { 51 | console.log(`${approvedReviews.length} approved reviews found on retry.`); 52 | } 53 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.pyc 4 | *.pyo 5 | *.pyd 6 | 7 | # Virtual Environment 8 | venv/ 9 | env/ 10 | env.bak/ 11 | env1/ 12 | env2/ 13 | .env 14 | 15 | # IDE 16 | .vscode/ 17 | .idea/ 18 | 19 | # Logs 20 | *.log 21 | 22 | # Build 23 | build/ 24 | dist/ 25 | *.egg-info/ 26 | *.egg 27 | 28 | # Compiled Python files 29 | *.pyc 30 | *.pyo 31 | 32 | # Test coverage 33 | htmlcov/ 34 | 35 | # Dependency directories 36 | lib/ 37 | libs/ 38 | lib64/ 39 | include/ 40 | includes/ 41 | bin/ 42 | 43 | # Directories 44 | documents/ 45 | 46 | # credentials 47 | credentials.json 48 | token.json 49 | *.pem 50 | *.yaml 51 | 52 | # Files 53 | .DS_Store 54 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to OpenAGI 2 | 3 | Thank you for your interest in contributing to OpenAGI! We appreciate your efforts in helping us make human-like agents accessible to everyone. This guide outlines the steps to start contributing effectively. 4 | 5 | ## Forking the Repository 6 | To contribute to OpenAGI, you need to fork the repository and clone it locally. Follow these steps: 7 | 8 | 1. Navigate to the [OpenAGI repository](https://github.com/aiplanethub/openagi.git). 9 | 2. Click the **Fork** button in the top-right corner to create a personal copy of the repository. 10 | 3. Clone the repository to your local machine: 11 | 12 | ```bash 13 | git clone https://github.com/your-username/openagi.git 14 | cd openagi 15 | ``` 16 | 17 | ## Setting up Your Environment 18 | Once you have cloned the repository, set up a development environment to work on the code. Follow the instructions below to create and activate a virtual environment. 19 | 20 | ## Installation 21 | 22 | 1. Setup a virtual environment. 23 | 24 | ```bash 25 | # For Mac users 26 | python3 -m venv venv 27 | source venv/bin/activate 28 | 29 | # For Windows users 30 | python -m venv venv 31 | venv/scripts/activate 32 | ``` 33 | 34 | 2. Install the openagi 35 | 36 | ```bash 37 | pip install openagi 38 | ``` 39 | or 40 | ```bash 41 | git clone https://github.com/your-username/openagi.git 42 | pip install -e . 43 | ``` 44 | 45 | ## Making Changes 46 | Before making any changes to the codebase, follow these steps: 47 | 48 | ```bash 49 | git checkout main 50 | git checkout -b feature-branch-name 51 | ``` 52 | 53 | Make your changes in the relevant directories (e.g., src, docs, cookbook, etc.). Be sure to follow the coding guidelines and maintain consistency with the project’s code style. 54 | 55 | ## Testing Your Changes 56 | 57 | Before submitting your changes, it is crucial to ensure that your modifications work as expected. Follow these steps to test your changes locally: 58 | 1. Run the necessary tests or manually check the functionality you have worked on. 59 | 2. Ensure that no other features are broken due to your changes. 60 | 61 | ## Submitting Your Pull Request 62 | Once you have tested your changes and everything is working correctly, submit your contribution by following these steps: 63 | 64 | ``` 65 | # Stage your changes: 66 | git add . 67 | 68 | # Commit your changes with a meaningful commit message: 69 | git commit -m "Brief description of the changes made" 70 | 71 | # Push your changes to your forked repository: 72 | git push origin feature-branch-name 73 | ``` 74 | 75 | ## Open a Pull Request (PR): 76 | 77 | 1. Navigate to the OpenAGI repository on GitHub. 78 | 2. Click the Pull Requests tab, then click New Pull Request. 79 | 3. Select the branch you pushed from the dropdown menu. 80 | 4. Add a title and a detailed description of your changes. 81 | 5. Click Submit Pull Request. 82 | 83 | -------------------------------------------------------------------------------- /assets/openagi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/assets/openagi.png -------------------------------------------------------------------------------- /cookbook/file_reading_agent_xai.py: -------------------------------------------------------------------------------- 1 | # Import required modules from the OpenAGI framework 2 | from openagi.planner.task_decomposer import TaskPlanner # For autonomous task planning 3 | from openagi.agent import Admin # Main agent class that orchestrates the process 4 | from openagi.actions.tools.document_loader import TextLoaderTool # Tool for reading text files 5 | from openagi.llms.xai import XAIModel # XAI language model integration 6 | import os 7 | 8 | # Configure the text loader tool to read from a specific file 9 | # This tool will look for answers in the specified file path 10 | TextLoaderTool.set_config({ 11 | "filename": "src/answer.txt" # Path to the file containing the answer 12 | }) 13 | 14 | # Set up the API key for the XAI model 15 | # This should be replaced with your actual API key 16 | os.environ['XAI_API_KEY'] = "" 17 | 18 | # Load XAI model configuration from environment variables 19 | # This typically includes settings like model type, temperature, etc. 20 | grok_config = XAIModel.load_from_env_config() 21 | 22 | # Initialize the XAI language model with the loaded configuration 23 | llm = XAIModel(config=grok_config) 24 | 25 | # Create a task planner instance 26 | # autonomous=True: The planner will execute tasks without waiting for approval 27 | # human_intervene=False: No human intervention will be requested during execution 28 | plan = TaskPlanner( 29 | autonomous=True, 30 | human_intervene=False 31 | ) 32 | 33 | # Initialize the Admin agent 34 | # This is the main orchestrator that combines: 35 | # - actions: List of available tools (only TextLoaderTool in this case) 36 | # - planner: The task planner that decides what actions to take 37 | # - llm: The language model that will process the text and generate responses 38 | admin = Admin( 39 | actions=[TextLoaderTool], 40 | planner=plan, 41 | llm=llm, 42 | ) 43 | 44 | # Execute the agent with a specific query 45 | # query: The question we want to answer 46 | # description: Brief description of what the agent should do 47 | res = admin.run( 48 | query="who is Virat kohli friend", # The question about Virat Kohli's friends 49 | description="read from file", # Instructs the agent to read the answer from the file 50 | ) 51 | 52 | # Print the result obtained from the agent 53 | print(res) 54 | -------------------------------------------------------------------------------- /cookbook/manual_already_task_planned.py: -------------------------------------------------------------------------------- 1 | # Manual Task Planned by User 2 | 3 | ## Using Mistral and Tavily 4 | 5 | from openagi.planner.task_decomposer import TaskPlanner 6 | from openagi.actions.tools.tavilyqasearch import TavilyWebSearchQA 7 | from openagi.agent import Admin 8 | from openagi.llms.mistral import MistralModel 9 | 10 | import os 11 | from getpass import getpass 12 | 13 | # setup Gemini and Tavily API Key 14 | os.environ['TAVILY_API_KEY'] = getpass("Tavily API Key: ") 15 | os.environ['MISTRAL_API_KEY'] = getpass("Mistral API Key: ") 16 | 17 | gemini_config = MistralModel.load_from_env_config() 18 | llm = MistralModel(config=gemini_config) 19 | 20 | # define the planner 21 | plan = TaskPlanner(autonomous=True,human_intervene=True) 22 | 23 | admin = Admin( 24 | actions = [TavilyWebSearchQA], 25 | planner = plan, 26 | llm = llm, 27 | ) 28 | 29 | already_planned = [{'worker_name': 'CricketNewsScraper', 'role': 'Cricket Data Extractor', 'instruction': 'Retrieve the latest cricket updates for the India vs Sri Lanka ODI series in 2024 from reliable sources.', 'task_id': '1', 'task_name': 'FetchCricketUpdates', 'description': "Use TavilyWebSearchQA to search for 'India vs Sri Lanka ODI series 2024 results' and extract the relevant information. Focus on finding the match scores, Man of the Match, and other key details. Handle any potential errors by retrying the search or providing a fallback message.", 'supported_actions': ['TavilyWebSearchQA']}, {'worker_name': 'CricketResultSummarizer', 'role': 'Data Processor', 'instruction': 'Analyze the retrieved cricket data and summarize the results for the user.', 'task_id': '2', 'task_name': 'SummarizeCricketResults', 'description': 'Use MemoryRagAction to access the results from the previous task. Extract the match scores, Man of the Match, and other key details. Format the information in a clear and concise manner for the user.', 'supported_actions': ['MemoryRagAction']}] 30 | 31 | res = admin.run( 32 | query="I need cricket updates from India vs Sri lanka 2024 ODI match in Sri Lanka", 33 | description=f"give me the results of India vs Sri Lanka ODI and respective Man of the Match", 34 | planned_tasks = already_planned 35 | ) 36 | print(res) -------------------------------------------------------------------------------- /cookbook/tavily_and_gemini_use.py: -------------------------------------------------------------------------------- 1 | """ 2 | !pip install openagi 3 | !pip install tavily-python 4 | !pip install langchain-google-genai 5 | !pip install yt-dlp youtube-search 6 | """ 7 | 8 | from openagi.planner.task_decomposer import TaskPlanner 9 | from openagi.actions.tools.tavilyqasearch import TavilyWebSearchQA 10 | from openagi.agent import Admin 11 | from openagi.llms.gemini import GeminiModel 12 | 13 | import os 14 | from getpass import getpass 15 | 16 | # setup Gemini and Tavily API Key 17 | os.environ['TAVILY_API_KEY'] = getpass("Enter Tavily API key:") 18 | 19 | os.environ['GOOGLE_API_KEY'] = getpass("Enter your Gemini API key:") 20 | os.environ['Gemini_MODEL'] = "gemini-1.5-flash" 21 | os.environ['Gemini_TEMP'] = "0.7" 22 | 23 | gemini_config = GeminiModel.load_from_env_config() 24 | llm = GeminiModel(config=gemini_config) 25 | 26 | # define the planner 27 | plan = TaskPlanner(autonomous=True,human_intervene=True) 28 | 29 | admin = Admin( 30 | actions = [TavilyWebSearchQA], 31 | planner = plan, 32 | llm = llm, 33 | ) 34 | 35 | res = admin.run( 36 | query="I need cricket updates from India vs Sri lanka 2024 ODI match in Sri Lanka", 37 | description=f"give me the results of India vs Sri Lanka ODI and respective Man of the Match", 38 | ) 39 | print(res) 40 | -------------------------------------------------------------------------------- /docs/.gitbook/assets/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/1.png -------------------------------------------------------------------------------- /docs/.gitbook/assets/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/2.png -------------------------------------------------------------------------------- /docs/.gitbook/assets/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/3.png -------------------------------------------------------------------------------- /docs/.gitbook/assets/Agents.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/Agents.png -------------------------------------------------------------------------------- /docs/.gitbook/assets/Screenshot 2024-08-22 at 15.51.33.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/Screenshot 2024-08-22 at 15.51.33.png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (1) (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (1) (1).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (1).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (10).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (10).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (11).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (11).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (12).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (12).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (13).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (13).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (14).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (14).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (15).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (15).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (16).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (16).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (17).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (17).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (18).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (18).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (19).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (19).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (2) (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (2) (1).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (2).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (2).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (20).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (20).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (21).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (21).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (22).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (22).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (23).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (23).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (24).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (24).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (25).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (25).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (26).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (26).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (27).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (27).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (28).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (28).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (29).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (29).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (3) (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (3) (1).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (3).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (3).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (30).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (30).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (31).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (31).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (32).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (32).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (33).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (33).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (34).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (34).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (35).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (35).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (36).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (36).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (37).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (37).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (38).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (38).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (39).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (39).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (4).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (4).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (40).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (40).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (41).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (41).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (42).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (42).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (43).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (43).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (44).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (44).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (45).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (45).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (5).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (5).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (6).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (6).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (7).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (7).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (8).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (8).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image (9).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image (9).png -------------------------------------------------------------------------------- /docs/.gitbook/assets/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/docs/.gitbook/assets/image.png -------------------------------------------------------------------------------- /docs/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Table of contents 2 | 3 | ## Getting started 4 | 5 | * [📝Introduction](README.md) 6 | * [🔧 Installation](getting-started/installation.md) 7 | * [🚀 Quickstart](getting-started/quickstart.md) 8 | 9 | ## Components 10 | 11 | * [👨‍💼 Admin](components/admin.md) 12 | * [👷 Workers](components/workers.md) 13 | * [🗂 Planner](components/planner.md) 14 | * [🧠 LLM](components/llm.md) 15 | * [🔧 Action](components/action/README.md) 16 | * [🛠️ Tools](components/action/tools.md) 17 | * [🧠 Memory](components/memory.md) 18 | * [📦 VectorStore](components/vectorstore/README.md) 19 | * [💾 ChromaStorage](components/vectorstore/chromastorage.md) 20 | 21 | ## 🛠️ USE CASES 22 | 23 | * [🎬 Movie Recommender Agent](use-cases/movie-recommender-agent.md) 24 | * [🔍 JobSearch Agent](use-cases/jobsearch-agent.md) 25 | * [✍️ Blog Writing Agent](use-cases/blog-writing-agent.md) 26 | * [📰 News Agent](use-cases/github-agent.md) 27 | * [📅 Itinerary Planner](use-cases/market-agent.md) 28 | 29 | ## 💬 ACKNOWLEDGMENT 30 | 31 | * [🏅 Special Mentions](acknowledgment/special-mentions.md) 32 | 33 | *** 34 | 35 | * [📞 Contact Us](contact-us.md) 36 | -------------------------------------------------------------------------------- /docs/acknowledgment/special-mentions.md: -------------------------------------------------------------------------------- 1 | # 🏅 Special Mentions 2 | 3 | This work would not have been possible without the incredible support from various open source and other open integrations. We especially thank the following open-source tools for their inspiration. 4 | 5 | * Langchain 6 | * CrewAI 7 | * AutoGen 8 | 9 | Our heartfelt gratitude goes to all the team members at AI Planet for putting this together. 10 | -------------------------------------------------------------------------------- /docs/components/action/README.md: -------------------------------------------------------------------------------- 1 | # 🔧 Action 2 | 3 | ### What is Action? 4 | 5 | Actions provide predefined functionalities that the Agent can invoke to accomplish various tasks. These tasks include fetching data from external sources, processing the data to extract meaningful insights, and storing the results for subsequent use. 6 | 7 | The Agent invokes actions during its runtime to execute specific tasks. For example, when a user queries the agent, the agent might use a search action to gather information and then a processing action to analyze it 8 | 9 | ### Attributes 10 | 11 | The parameter attributes for Actions is dynamic and it varies based on the different use cases. One can directly pass the supported tools, files as list for defining the actions. 12 | 13 | ### Code Snippet 14 | 15 | ```python 16 | from openagi.actions.files import WriteFileAction 17 | from openagi.actions.tools.ddg_search import DuckDuckGoSearch 18 | 19 | actions = [ 20 | DuckDuckGoSearch, 21 | WriteFileAction, 22 | ] 23 | ``` 24 | -------------------------------------------------------------------------------- /docs/components/admin.md: -------------------------------------------------------------------------------- 1 | # 👨‍💼 Admin 2 | 3 | ## What is an Admin? 4 | 5 | Imagine Admin as the master task executor who is responsible for all the major configurations for the execution. From planning of tasks to execution, and defining the brain which is what LLM to use and whether or not to use memory. 6 | 7 | Admin is the decision-maker that understand the specifications of tasks and execute them in a more human-like manner. 8 | 9 | ## Attributes 10 | 11 | The `Admin` class in the `openagi` library is a central component designed to manage and orchestrate various functionalities within the framework. Below is a detailed explanation of its components, attributes, and usage. 12 | 13 | The `Admin` class in the OpenAGI framework can be considered an Agent. 14 | 15 | 16 | 17 |
AttributeOptional ParameterDescription
plannerHelp us define the type of planner we can use to decompose the given task into sub tasks.
llmUsers can provide an LLM of their choosing, or use the default one.
memoryYesUsers can initiate Admin memory, to recall and remember the task and it's response
actionsAdmin can be given access to various actions to perform it's task, such as SearchAction, Github Action, etc.
output_formatYesUsers can define the output format as either "markdown" or "raw_text"
max_stepsYesThe number of iterations admin can perform to obtain appropriate output.
18 | 19 | ### Code Snippet 20 | 21 |
from openagi.agent import Admin
22 | 
23 | admin = Admin(
24 |     llm=llm,
25 |     actions=actions,
26 |     planner=planner,
27 | )
28 | 
29 | 30 | Below we have shown how one can initiate and run a simple admin query. 31 | 32 | ```python 33 | # imports 34 | from openagi.agent import Admin 35 | from openagi.llms.openai import OpenAIModel 36 | from openagi.planner.task_decomposer import TaskPlanner 37 | from openagi.actions.tools.ddg_search import DuckDuckGoSearch 38 | from openagi.memory import Memory 39 | 40 | # Define LLM 41 | config = OpenAIModel.load_from_env_config() 42 | llm = OpenAIModel(config=config) 43 | 44 | # declare the Admin 45 | admin = Admin( 46 | llm=llm, 47 | actions=[DuckDuckGoSearch], 48 | planner=TaskPlanner(human_intervene=False), 49 | memory=Memory(), 50 | output_type=OutputFormat.markdown, # Defaults to markdown 51 | ) 52 | 53 | # execute the task 54 | res = admin.run( 55 | query="sample query", 56 | description="sample description", 57 | ) 58 | ``` 59 | -------------------------------------------------------------------------------- /docs/components/aiagent/README.md: -------------------------------------------------------------------------------- 1 | # AIAgent 2 | 3 | Within this system, an agent is described as an entity that engages with its surroundings by utilizing sensors to observe and actuators to interact. This engagement is a continual cycle of sensing, thinking, and acting. The agents in this system are equipped with a broad spectrum of tools and a logic engine (LLM) that enables them to execute tasks based on set parameters. 4 | 5 | The agents have the capability to communicate with one another and make use of the tools provided to them. After completing a task, an agent forwards the outcome to another agent for subsequent processing. Each agent has required and optional characteristics, which are detailed in subsequent sections. Communication among agents is managed through priority queues, supporting different execution methods like parallel, sequential, aggregate, or dynamic execution, details of which are further discussed in later sections. 6 | 7 | A human user initiates the agent's execution, and the final output is generally presented to the user as a printed message in the current release. 8 | 9 | For more details on the specific attributes of agents, refer to the Agent Configuration section. 10 | 11 |
from openagi.agent import AIAgent
12 | 
13 | AIAgent(
14 |         agentName=agent_list[2],
15 |         aggregator=2,
16 |         onAggregationAction=onAggregationAction,
17 |         creator=None,
18 |         role="SUMMARISER",
19 |         feedback=False,
20 |         goal="summarize input into presentable points",
21 |         backstory="backstory",
22 |         capability="llm_task_executor",
23 |         agent_type="STATIC",
24 |         multiplicity=0,
25 |         task="summarize points to present to health care professionals and general public separately",
26 |         output_consumer_agent=agent_list[3],
27 |         HGI_Intf=onResultHGI,
28 |         llm_api=llm,
29 |         llm_resp_timer_value=130,
30 |         tools_list = [WikipediaTool, GoogleFinanceSearchTool, exaSearchTool, SerperSpecificSearchTool],
31 |     )
32 | 
33 | 34 | -------------------------------------------------------------------------------- /docs/components/aiagent/agent-configuration.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | The GenAI Agents platform enables developers to create solutions with 4 | autonomous agents by setting up the agents using the specified parameters. 5 | --- 6 | 7 | # Agent Configuration 8 | 9 | 10 | 11 |
AttributeParamter typeDescription
Agent Attribute and default valueParameter typeDescriptionRemarks
agentNameMandatoryIndicates the name of the agent
roleMandatoryIndicates the role of the agent
goalMandatoryGoal that the agent needs to achieve
backstoryMandatoryBackground of the agent
capabilityMandatoryThe capabilities of the agentOnly limited capabilities are supported. In future they will be extended. At present it supports search_executor and llm_task_executor.
taskMandatoryTask that needs to be accomplished by the agent
output_consumer_agentMandatoryName of the agent which consumes the output produced by the agentThe value “HGI” indicates human agent which is the final receiver of the output from the agent
llm_apiOptionalLLM that need to be used for generating responsesTo be used for local LLM as mentioned in section
llmOptionalLLM that need to be used for generating responsesused for passing LLM name to the agent. Please refer to section
tools_list=[]OptionalOrdered list of tools that the agent need to execute to accomplish the taskThe agent will execute the tools in the given order with relevant parameters after generating using AI
feedback=FalseOptionalIf the agent needs to generate feedback and pass the response to the sender agentRefer to example "usecases/ProfAgentFeedback_Review.py"
agent_type=”STATIC”OptionalWhether the agent is created at the initialization of the program.Will also be used in future for indicating “special agents types”. Specific example for dynamic usage can be found "usecases/ProfAgentDynamic.py"
multiplicity=0OptionalNumber of concurrent tasks that the agent executeFor future usage
aggregator=0OptionalIf the agent is an aggregator, the field indicates number of input messages that the agent needs to receive to accomplish the taskEx: aggregator=2 indicates that the agent needs to wait for 2 messages from the other agents to execute its task. Example "usecases/ProfAgentAggr.py"
onAggregationAction=NoneOptionalCallback function provided to perform the aggregation of input messagesThe developer needs to pass the function to handle the aggregation of input generated by various agents. Refer to above example.
creator=NoneOptionalRefers to the object of agent, which gets created dynamicallyA thread will be created based on the object description to handle the task. Example: "usecases/ProfAgentDynamic.py"
HGI_Intf=NoneOptionalHuman tool to get the control after the execution of the taskRefer to example: "usecases/ProfAgentHumanToolInteration.py"
llm_resp_timer_value=2000OptionalMaximum time that the agent can wait for the response from LLM/toolFor future use
12 | 13 | Example Configurations 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /docs/components/memory.md: -------------------------------------------------------------------------------- 1 | # 🧠 Memory 2 | 3 | ## What is Memory? 4 | 5 | Memory is one of the important components of the Agentic framework, which gives the agents their own memory to recall and remember the tasks executed and feedback received. It helps the agent make "informed decisions" by recalling previous actions and their observations. It can also store the current execution. Memory helps the agent to avoid repeating mistakes for similar tasks and improves the overall user experience by providing results based on recalled memory. 6 | 7 | The new update introduces Long-Term Memory (LTM), a breakthrough feature that enhances the way agents interact, adapt, and grow. LTM equips AI agents with the capability to store and recall information from previous interactions over extended periods, much like human memory. 8 | 9 | ### Long Term Memory 10 | 11 |
from openagi.memory import Memory
12 | 
13 | # Basic memory initialization
14 | memory = Memory()
15 | 
16 | # Long-Term Memory initialization with custom settings
17 | ltm_memory = Memory(
18 |     long_term=True,
19 |     ltm_threshold=0.8,
20 |     long_term_dir="/path/to/custom/memory/storage"
21 | )
22 | 
23 | 24 | Key Features of Long-Term Memory: 25 | 26 | 1. Seamless Integration: Enabling LTM within OpenAGI requires just a simple configuration update. 27 | 2. Customizable Memory Storage: Users have control over how and where their agent's memory is stored. 28 | 3. Smart Retrieval: LTM employs semantic similarity to retrieve and apply relevant information from past experiences. 29 | 4. Feedback-Driven Learning: Agents can incorporate user feedback to continuously enhance their performance. 30 | 5. Privacy Controls: Memory management is user-friendly, allowing easy deletion or modification of stored information. 31 | 32 | ## Parameters: 33 | 34 | The Memory class accepts several parameters that allow you to customize its behavior, particularly for Long-Term Memory: 35 | 36 | | Parameter | Type | Default | Description | 37 | | --------------- | ----- | ------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | 38 | | long\_term | bool | False | Enables or disables Long-Term Memory functionality. When set to True, the agent will store and retrieve information from past interactions. | 39 | | ltm\_threshold | float | 0.7 | Sets the semantic similarity threshold for memory retrieval. Higher values make the memory more selective, only retrieving highly similar past experiences. | 40 | | long\_term\_dir | str | None | Specifies the directory for storing long-term memories. If not provided, a default location will be used. | 41 | 42 | 43 | 44 | Below we have shown how one can initiate and run using query with Long-Term Memory enabled: 45 | 46 | ```python 47 | # imports 48 | from openagi.agent import Admin 49 | from openagi.llms.openai import OpenAIModel 50 | from openagi.planner.task_decomposer import TaskPlanner 51 | from openagi.actions.tools.ddg_search import DuckDuckGoSearch 52 | from openagi.memory import Memory 53 | 54 | # Define LLM 55 | config = OpenAIModel.load_from_env_config() 56 | llm = OpenAIModel(config=config) 57 | 58 | # Memory Usage with Long-Term Memory enabled 59 | admin = Admin( 60 | llm=llm, 61 | actions=[DuckDuckGoSearch], 62 | planner=TaskPlanner(human_intervene=False), 63 | memory=Memory(long_term=True), 64 | ) 65 | 66 | # Run Admin 67 | res = admin.run( 68 | query="sample query", 69 | description="sample description", 70 | ) 71 | ``` 72 | 73 | With LTM activated, your agent will now retain knowledge from previous interactions and use that information to provide more relevant and intelligent responses. This enhancement allows for the creation of more sophisticated AI systems that can learn and improve over time, offering a new level of continuity and context-awareness in AI-driven applications. 74 | 75 | ``` 76 | ``` 77 | -------------------------------------------------------------------------------- /docs/components/planner.md: -------------------------------------------------------------------------------- 1 | # 🗂 Planner 2 | 3 | ## What is Planner? 4 | 5 | Planner is one of the important component of any Agent framework, which enables the agent to divide a task into multiple subtasks based on the requirement. We call this step as **Task Decomposition.** 6 | 7 | The `Planner` in the `OpenAGI` contains essential modules and components that handle task planning and decomposition. These components are designed to work together to break down complex tasks into manageable sub-tasks, which are then executed by Admin. 8 | 9 | Below is a detailed explanation of the attributes and functionality of the modules within the `Planner`. 10 | 11 | ## Attributes 12 | 13 | 14 | 15 |
ParameterOptional ParameterDescription
human_interveneNoIt indicates the framework that after generating output, it should ask human for feedback and make changes to output based on that.
autonomousNoAutonomous will self assign role and instructions and divide it among the workers. The default is `False`
input_actionYesIt shows how user can provide feedback to the Admin during execution.
promptYesAn optional prompt to be used for task planning.
workersYesWorkers can represent different agents or processes that handle specific subtasks, enabling parallel execution and improving efficiency. If no workers are specified, the planner will operate without additional parallel processing capabilities.
llmYesThis parameter allows the user to specify the Large Language Model (LLM) that will be used for generating responses and planning tasks.
retry_thresholdYesThis parameter defines the maximum number of times the planner will attempt to retry a task if it fails to execute successfully. The default value is 3.
16 | 17 | 18 | 19 | ### Code Snippet 20 | 21 | The primary component, `TaskPlanner`, allows for the decomposition of tasks into smaller sub-tasks and the planning of their execution. This modular approach facilitates efficient task management and execution within the OpenAGI framework. 22 | 23 | ```python 24 | from openagi.planner.task_decomposer import TaskPlanner 25 | 26 | planner = TaskPlanner(human_intervene=False) 27 | # make TaskPlanner autonomous = True for auto creating workers 28 | # Autonomous Multi Agent Architecture 29 | # plan = TaskPlanner(autonomous=True,human_intervene=True) 30 | ``` 31 | 32 | Below we have shown how one can initiate and run using query. 33 | 34 | ```python 35 | # imports 36 | from openagi.agent import Admin 37 | from openagi.llms.openai import OpenAIModel 38 | from openagi.planner.task_decomposer import TaskPlanner 39 | from openagi.actions.tools.ddg_search import DuckDuckGoSearch 40 | 41 | # Define LLM 42 | config = OpenAIModel.load_from_env_config() 43 | llm = OpenAIModel(config=config) 44 | 45 | # Planner Usage 46 | admin = Admin( 47 | llm=llm, 48 | actions=[DuckDuckGoSearch], 49 | planner=TaskPlanner(human_intervene=False), 50 | ) 51 | 52 | # Run Admin 53 | res = admin.run( 54 | query="sample query", 55 | description="sample description", 56 | ) 57 | ``` 58 | -------------------------------------------------------------------------------- /docs/components/vectorstore/README.md: -------------------------------------------------------------------------------- 1 | # 📦 VectorStore 2 | 3 | The Vector Store provides a structured way to store, update, delete, and query documents using various storage backends. It is designed to be inherited by specific storage implementations that define the actual methods for handling data. 4 | 5 | The storage can serve as a backend for Memory to retain the activities of the Agent Execution. The storage class will be instantiated together with the memory class. 6 | 7 | OpenAGI Uses ChromaDB as default storage backend for Memory. 8 | 9 | When the Base Storage Class is inherited, it will have basic methods implemented as below: 10 | 11 | ```python 12 | from pydantic import BaseModel, ConfigDict, Field 13 | 14 | from openagi.storage.base import BaseStorage 15 | 16 | class NewStorage(BaseModel): 17 | 18 | name: str = Field(title="", description=".") 19 | 20 | def save_document(self): 21 | """Save documents to the with metadata.""" 22 | ... 23 | 24 | def update_document(self): 25 | ... 26 | 27 | def delete_document(self): 28 | ... 29 | 30 | def query_documents(self): 31 | ... 32 | 33 | @classmethod 34 | def from_kwargs(cls, **kwargs): 35 | raise NotImplementedError("Subclasses must implement this method.") 36 | ``` 37 | 38 | -------------------------------------------------------------------------------- /docs/components/vectorstore/chromastorage.md: -------------------------------------------------------------------------------- 1 | # 💾 ChromaStorage 2 | 3 | The `ChromaStorage` class is a specific implementation of the `Storage` class using `ChromaDB`. Here is how you can use it: 4 | 5 | ```python 6 | class ChromaMemory(BaseModel): 7 | storage: BaseStorage = Field( 8 | default=ChromaStorage, 9 | description="Storage to be used for the Memory.", 10 | exclude=True, 11 | ) 12 | 13 | def model_post_init(self, __context: Any) -> None: 14 | instance = super().model_post_init(__context) 15 | self.storage = ChromaStorage.from_kwargs(collection_name=self.sessiond_id) 16 | return instance 17 | ``` 18 | 19 | ```python 20 | class ChromaMemory(BaseModel): 21 | sessiond_id: str = Field(default=uuid4().hex) 22 | storage: BaseStorage = Field( 23 | default=ChromaStorage, 24 | description="Storage to be used for the Memory.", 25 | exclude=True, 26 | ) 27 | 28 | def model_post_init(self, __context: Any) -> None: 29 | inst = super().model_post_init(__context) 30 | logging.info(f"{self.sessiond_id=}") 31 | self.storage = ChromaStorage.from_kwargs(collection_name=self.sessiond_id) 32 | return inst 33 | 34 | def search(self, query: str, n_results: int = 10, **kwargs) -> Dict[str, Any]: 35 | """Search for similar tasks based on a query.""" 36 | query_data = { 37 | "query_texts": query, 38 | "n_results": n_results, 39 | "where": {"$contains": self.sessiond_id}, 40 | **kwargs, 41 | } 42 | return self.storage.query_documents(**query_data) 43 | 44 | def display_memory(self) -> Dict[str, Any]: 45 | """Retrieve and display the current memory state from the database.""" 46 | result = self.storage.query_documents(self.session_id, n_results=2) 47 | if result: 48 | return result 49 | return {} 50 | 51 | def save_task(self, task: Task) -> None: 52 | """Save execution details into Memory.""" 53 | document = task.result 54 | metadata = { 55 | "task_id": task.id, 56 | "session_id": self.sessiond_id, 57 | "task_name": task.name, 58 | "task_description": task.description, 59 | "task_result": task.result, 60 | "task_actions": task.actions, 61 | } 62 | 63 | return self.storage.save_document( 64 | id=task.id, 65 | document=document, 66 | metadata=metadata, 67 | ) 68 | 69 | def save_planned_tasks(self, tasks: TaskLists): 70 | for task in tasks: 71 | self.save_task(task=task) 72 | ``` 73 | -------------------------------------------------------------------------------- /docs/components/workers.md: -------------------------------------------------------------------------------- 1 | # 👷 Workers 2 | 3 | ### What is a Worker? 4 | 5 | Workers are special type of classes, responsible for carrying out the tasks assigned by the class "Admin". They utilize tools such as internet search engines, LLMs, and document writers to perform their tasks. Additionally, they can determine which tools to use from a predefined set. 6 | 7 | Similarly to how a large task like writing a blog is decomposed into smaller steps such as researching, drafting, and publishing, the admin can define a large task and split it into smaller tasks that are then assigned to the workers. 8 | 9 | ### Attributes 10 | 11 | Workers possess attributes that facilitate the execution and completion of smaller, independent tasks. 12 | 13 |
AttributeOptional ParameterDescription
roleIt is a string input that defines the Functionality or Responsibility of the worker.
instructionsA paragraph about how the LLM should behave related to its role can also include the backstory and other relevant details that might aid in generating the output.
actionsYesThis configurable parameter takes a list that lets us specify the set of tools available to the worker. The worker may or may not use these tools. If no tools are specified, or if the action list is empty, the worker defaults to the actions set by the admin.
llmYesThis parameter is configurable, allowing the worker to either use a specified LLM or default to the LLM designated by the admin.
max_iterationsYesThis parameter specifies the maximum number of iterations, as an integer, allowed to achieve the objective of the given task.
force_outputYesThis boolean parameter determines whether to force an output or answer after reaching the maximum iteration limit.
14 | 15 | ### Code Snippet 16 | 17 | The primary components,`TaskWorker`, provide a structured way to define and execute tasks. The `TaskWorker` class specializes in executing specific tasks assigned by the planner. 18 | 19 | ```python 20 | from openagi.worker import Worker 21 | 22 | worker = Worker( 23 | role=role, 24 | instructions=instructions, 25 | actions=actions, 26 | llm=llm, 27 | max_iterations=max_iterations, 28 | force_output=force_output 29 | ) 30 | ``` 31 | 32 | Below we have shown how one can initiate and run a simple admin-worker query. 33 | 34 | ```python 35 | # import the required packages 36 | from openagi.actions.files import WriteFileAction 37 | from openagi.actions.tools.ddg_search import DuckDuckGoNewsSearch 38 | from openagi.actions.tools.webloader import WebBaseContextTool 39 | from openagi.agent import Admin 40 | from openagi.llms.azure import AzureChatOpenAIModel 41 | from openagi.memory import Memory 42 | from openagi.planner.task_decomposer import TaskPlanner 43 | from openagi.worker import Worker 44 | 45 | # configure the LLM 46 | config = AzureChatOpenAIModel.load_from_env_config() 47 | llm = AzureChatOpenAIModel(config=config) 48 | 49 | # Declare the Worker objects 50 | 51 | # Initialize the researcher who uses DuckDuckGo to search a topic and extract information from the web pages. 52 | researcher = Worker( 53 | role="Researcher", 54 | instructions="sample instruction.", 55 | actions=[ 56 | DuckDuckGoNewsSearch, 57 | WebBaseContextTool, 58 | ], 59 | ) 60 | # initialize the writer who writes the content of the topic using the tools provided 61 | writer = Worker( 62 | role="Writer", 63 | instructions="sample instruction.", 64 | actions=[ 65 | DuckDuckGoNewsSearch, 66 | WebBaseContextTool, 67 | ], 68 | ) 69 | # initialize the reviewer who reviews the content written by the writer and saves the content into a file using the write file action tool. 70 | reviewer = Worker( 71 | role="Reviewer", 72 | instructions="sample instruction.", 73 | actions=[ 74 | DuckDuckGoNewsSearch, 75 | WebBaseContextTool, 76 | WriteFileAction, 77 | ], 78 | ) 79 | 80 | # declare the Admin object with Task Planner, Memory, and LLM 81 | admin = Admin( 82 | planner=TaskPlanner(human_intervene=False), 83 | memory=Memory(), 84 | llm=llm, 85 | ) 86 | 87 | # Assign sub-tasks to workers 88 | admin.assign_workers([researcher, writer, reviewer]) 89 | 90 | # run the admin object 91 | res = admin.run( 92 | query="Write a blog post.", 93 | description="sample description.", 94 | ) 95 | ``` 96 | -------------------------------------------------------------------------------- /docs/contact-us.md: -------------------------------------------------------------------------------- 1 | # 📞 Contact Us 2 | 3 | Please email us at openagi@aiplanet.com for any feedback/issues. 4 | -------------------------------------------------------------------------------- /docs/getting-started/installation.md: -------------------------------------------------------------------------------- 1 | # 🔧 Installation 2 | 3 | To install OpenAGI, lets practice some best practice by creating a virtual environment and installing the package. 4 | 5 | #### Setup a virtual environment 6 | 7 | ```bash 8 | # For Mac users 9 | python3 -m venv venv 10 | source venv/bin/activate 11 | 12 | # For Windows users 13 | python -m venv venv 14 | venv/scripts/activate 15 | 16 | # to create virtual env using particular python version (in Windows) 17 | py -3.11 -m venv venv 18 | ``` 19 | 20 | #### Install the Package 21 | 22 | ```bash 23 | pip install openagi 24 | ``` 25 | -------------------------------------------------------------------------------- /docs/getting-started/quickstart.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Lets build our first Agent use cases. 3 | --- 4 | 5 | # 🚀 Quickstart 6 | 7 | In this quickstart, we'll explore one of the use cases to demonstrate the execution of the Agent. We'll focus on querying Search Engines tools like `DuckDuckGoSearchTool` to gather the information on 3 Days Trip to San Francisco and Bay area based on recent days. 8 | 9 | Agents excel at autonomously performing multiple tasks, making decisions on the fly, and communicating with other agents. For this use case, we will let `Admin` Agent to auto-decompose or `Plan` the task and use tools as the supported `Actions`. 10 | 11 | ### 1. Import required modules 12 | 13 | To get started, we need to initialize a few methods from the modules. 14 | 15 | * Admin 16 | * Worker 17 | * Action 18 | * Large Language Model 19 | * Memory 20 | * Planner 21 | 22 | ```python 23 | from openagi.agent import Admin 24 | from openagi.worker import Worker 25 | from openagi.actions.files import WriteFileAction 26 | from openagi.actions.tools.ddg_search import DuckDuckGoNewsSearch 27 | from openagi.actions.tools.webloader import WebBaseContextTool 28 | from openagi.llms.openai import OpenAIModel 29 | from openagi.memory import Memory 30 | from openagi.planner.task_decomposer import TaskPlanner 31 | ``` 32 | 33 | ### 2. Setting LLM configuration 34 | 35 | To authenticate your requests to the OpenAI API (by default OpenAI Model will be used), you need to set your API key as an environment variable. This is essential for ensuring secure and authorised access to the API services. 36 | 37 | ```python 38 | import os 39 | os.environ["OPENAI_API_KEY"] = "sk-proj-xxxxxxxxxxxxxxxxxx" 40 | 41 | config = OpenAIModel.load_from_env_config() 42 | llm = OpenAIModel(config=config) 43 | ``` 44 | 45 | Replace `sk-proj-xxxxxxxxxxxxxxxxxx` with your actual OpenAI API key. 46 | 47 | ### 3. Setup Workers with Tools and Action 48 | 49 | Workers are specialized classes tasked with executing the assignments given by the "Admin" class. They use tools such as internet news search engines, LLMs, and document writers to complete their tasks, individually and in cohesion (for complex tasks like writing blog articles). 50 | 51 | An action is a functionality that enables the Agent to fetch, process, and store data for further analysis and decision-making. 52 | 53 | * `DuckDuckGoNewsSearch`: This tool fetches real-time news data using the DuckDuckGo search engine, providing up-to-date information. 54 | * `WebBaseContextTool`: This tool is used to extract information from Web Pages. It also provides a way to load and optionally summarize the content of a webpage. 55 | * `WriteFileAction`: This action saves the written file to the specified location, ensuring data persistence. 56 | 57 | ```python 58 | # Declare the Worker objects 59 | 60 | # Initialize the researcher who uses DuckDuckGo to search a topic and extract information from the web pages. 61 | researcher = Worker( 62 | role="Researcher", 63 | instructions="sample instruction.", 64 | actions=[ 65 | DuckDuckGoNewsSearch, 66 | WebBaseContextTool, 67 | ], 68 | ) 69 | # initialize the writer who writes the content of the topic using the tools provided 70 | writer = Worker( 71 | role="Writer", 72 | instructions="sample instruction.", 73 | actions=[ 74 | DuckDuckGoNewsSearch, 75 | WebBaseContextTool, 76 | ], 77 | ) 78 | # initialize the reviewer who reviews the content written by the writer and saves the content into a file using the write file action tool. 79 | reviewer = Worker( 80 | role="Reviewer", 81 | instructions="sample instruction.", 82 | actions=[ 83 | DuckDuckGoNewsSearch, 84 | WebBaseContextTool, 85 | WriteFileAction, 86 | ], 87 | ) 88 | ``` 89 | 90 | ### 4. Execute the Admin Agent 91 | 92 | The Admin Agent serves as the central part for decision-maker, comprehending task specifications in form of supported actions and executing them in a human-like manner. 93 | 94 | In order to execute the agent, user needs to specify their query and description to get the response from the Admin agent. 95 | 96 | ```python 97 | # define the Admin with Planner, Memory and LLM. Further assign the workers in order 98 | admin = Admin( 99 | planner=TaskPlanner(human_intervene=False), 100 | memory=Memory(), 101 | llm=llm, 102 | ) 103 | 104 | # Assign sub-tasks to workers 105 | admin.assign_workers([researcher, writer, reviewer]) 106 | 107 | result = admin.run( 108 | query="Write an article on places to visit in Spain.", 109 | description="You are a knowledgeable local guide with extensive information about Spain, its attractions and customs.", 110 | ) 111 | 112 | print(result) 113 | ``` 114 | -------------------------------------------------------------------------------- /docs/use-cases/github-agent.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | Staying current with the latest developments is crucial, especially in the 4 | fast-paced world of technology and artificial intelligence. A News Agent can 5 | help you stay informed by gathering the latest n 6 | --- 7 | 8 | # 📰 News Agent 9 | 10 | Be upto date on what's happening using News Agent 11 | 12 | **Import Required Libraries** 13 | 14 | First, import the necessary libraries and modules. These modules will enable the agent to perform web searches, handle task planning, and display results in a readable format. 15 | 16 | ```python 17 | from openagi.actions.tools.ddg_search import DuckDuckGoSearch 18 | from openagi.agent import Admin 19 | from openagi.llms.azure import AzureChatOpenAIModel 20 | from openagi.planner.task_decomposer import TaskPlanner 21 | from rich.console import Console 22 | from rich.markdown import Markdown 23 | import os 24 | ``` 25 | 26 | **Setup LLM** 27 | 28 | Set up the environment variables required for Azure OpenAI configuration. These environment variables include the base URL, deployment name, model name, API version, and API key. This configuration is essential for the Large Language Model (LLM) to function correctly. 29 | 30 | ```python 31 | os.environ["AZURE_BASE_URL"] = "https://.openai.azure.com/" 32 | os.environ["AZURE_DEPLOYMENT_NAME"] = "" 33 | os.environ["AZURE_MODEL_NAME"] = "gpt4-32k" 34 | os.environ["AZURE_OPENAI_API_VERSION"] = "2023-05-15" 35 | os.environ["AZURE_OPENAI_API_KEY"] = "" 36 | 37 | config = AzureChatOpenAIModel.load_from_env_config() 38 | llm = AzureChatOpenAIModel(config=config) 39 | ``` 40 | 41 | **Define Admin** 42 | 43 | Create an Admin instance to manage actions and execute tasks. The Admin will use the DuckDuckGoSearch tool to perform web searches and the TaskPlanner to manage task execution without human intervention. 44 | 45 | ```python 46 | admin = Admin( 47 | llm=llm, 48 | actions=[DuckDuckGoSearch], 49 | planner=TaskPlanner(human_intervene=False), 50 | ) 51 | ``` 52 | 53 | **Execute Agent LLM** 54 | 55 | Run the Admin with a specific query to fetch the latest news about AI from the web. In this case, the query is set to find recent news related to "Recent AI News Microsoft." The Admin will process this query and return the relevant news articles. 56 | 57 | ```python 58 | res = admin.run( 59 | query="Recent AI News Microsoft", 60 | description="", 61 | ) 62 | ``` 63 | 64 | **Print the Results** 65 | 66 | Finally, use the rich library to output the results in a readable format. The Markdown class helps in rendering the news content neatly in the console. 67 | 68 | ```python 69 | Console().print(Markdown(res)) 70 | ``` 71 | 72 | By following these steps, you can set up a News Agent that keeps you updated with the latest news in the field of artificial intelligence. This example uses the power of Azure's GPT-4 model and OpenAGI to perform efficient web searches and present the information in an easily digestible format. 73 | 74 | ### Sample Output 75 | 76 | When the above code is executed, the output in the console might look like this: 77 | 78 | ``` 79 | # Recent AI News from Microsoft 80 | 81 | ## 1. Microsoft Unveils New AI Features in Office Suite 82 | *Date: August 8, 2024* 83 | Microsoft has announced the integration of advanced AI features in its Office suite, aiming to enhance productivity and collaboration among users. 84 | 85 | ## 2. Microsoft AI Research Breakthroughs 86 | *Date: August 7, 2024* 87 | Recent research from Microsoft AI has shown significant improvements in natural language processing, potentially revolutionizing how machines understand human language. 88 | 89 | ## 3. Microsoft Partners with OpenAI for New Developments 90 | *Date: August 6, 2024* 91 | In a strategic partnership, Microsoft and OpenAI are set to collaborate on new AI technologies that promise to push the boundaries of artificial intelligence applications. 92 | ``` 93 | 94 | This output showcases the latest news articles related to Microsoft's developments in artificial intelligence, formatted neatly for readability. 95 | -------------------------------------------------------------------------------- /docs/use-cases/market-agent.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | This example uses OpenAGI to create trip itineraries by leveraging an OpenAI 4 | model through an Admin agent, with results displayed using Markdown via the 5 | rich library. 6 | --- 7 | 8 | # 📅 Itinerary Planner 9 | 10 | **Import Required Libraries** 11 | 12 | First, import the necessary libraries and modules. These modules will enable the agent to perform web searches, handle task planning, write files, and display results in a readable format. 13 | 14 | ```python 15 | from openagi.actions.files import WriteFileAction 16 | from openagi.actions.tools.ddg_search import DuckDuckGoSearch 17 | from openagi.agent import Admin 18 | from openagi.llms.openai import OpenAIModel 19 | from openagi.planner.task_decomposer import TaskPlanner 20 | from rich.console import Console 21 | from rich.markdown import Markdown 22 | import os 23 | ``` 24 | 25 | **Setup LLM** 26 | 27 | Set up the environment variables required for the OpenAI configuration. These environment variables include the API key necessary for accessing the OpenAI services. This configuration is essential for the Large Language Model (LLM) to function correctly. 28 | 29 | ```python 30 | # Set up the environment variables for OpenAI 31 | os.environ["OPENAI_API_KEY"] = "sk-proj-xxxxxxxxxxxxxxxxxx" 32 | 33 | # Initialize the OpenAI Model 34 | config = OpenAIModel.load_from_env_config() 35 | llm = OpenAIModel(config=config) 36 | ``` 37 | 38 | **Define Admin** 39 | 40 | Create an Admin instance to manage actions and execute tasks. The Admin will use the DuckDuckGoSearch tool to perform web searches, the WriteFileAction to save results, and the TaskPlanner to manage task execution without human intervention. 41 | 42 | ```python 43 | # Set up the Admin Agent 44 | admin = Admin( 45 | llm=llm, 46 | actions=[ 47 | DuckDuckGoSearch, 48 | WriteFileAction, 49 | ], 50 | planner=TaskPlanner( 51 | human_intervene=False, 52 | ), 53 | ) 54 | ``` 55 | 56 | **Execute Agent LLM** 57 | 58 | Run the Admin with a specific query to create an itinerary for a trip to the San Francisco Bay Area. The Admin will process this query and return a detailed itinerary based on the latest information available. 59 | 60 | ```python 61 | # Execute the Agent to create an itinerary 62 | response = Admin(actions=[DuckDuckGoSearch]).run( 63 | query="3 Days Trip to san francisco bay area", 64 | description="You are a knowledgeable local guide with extensive information about the city, it's attractions and customs", 65 | ) 66 | ``` 67 | 68 | **Print the Results** 69 | 70 | Finally, use the rich library to output the results in a readable format. The Markdown class helps in rendering the itinerary content neatly in the console. 71 | 72 | ```python 73 | # Print the results from OpenAGI using rich library 74 | Console().print(Markdown(res)) 75 | ``` 76 | 77 | By following these steps, you can set up a News Agent that helps you plan activities or trips effectively. This example uses the power of the OpenAI model and OpenAGI to perform efficient web searches and present the information in an easily digestible format, ensuring you stay informed and well-prepared. 78 | 79 | ### Sample Output 80 | 81 | When this code is executed, the output in the console might resemble the following itinerary: 82 | 83 | ``` 84 | # Itinerary for a 3-Day Trip to San Francisco Bay Area 85 | 86 | ## Day 1: Explore San Francisco 87 | 88 | - **Morning**: Visit the iconic Golden Gate Bridge. Enjoy a walk or rent a bike to cross the bridge for stunning views. 89 | 90 | - **Afternoon**: Head to Fisherman’s Wharf for lunch. Try the famous clam chowder in a sourdough bread bowl. 91 | 92 | - **Evening**: Explore Pier 39, watch the sea lions, and enjoy street performances. Consider dining at one of the waterfront restaurants. 93 | 94 | ## Day 2: Culture and History 95 | 96 | - **Morning**: Visit Alcatraz Island. Book your tickets in advance to explore the historic prison. 97 | 98 | - **Afternoon**: Discover the San Francisco Museum of Modern Art (SFMOMA). Enjoy lunch at a nearby café. 99 | 100 | - **Evening**: Stroll through the Mission District and enjoy the vibrant murals. Dine at a local taqueria for authentic Mexican food. 101 | 102 | ## Day 3: Nature and Surroundings 103 | 104 | - **Morning**: Take a trip to Muir Woods National Monument. Enjoy a hike among the towering redwoods. 105 | 106 | - **Afternoon**: Visit Sausalito for lunch and explore the charming waterfront town. 107 | 108 | - **Evening**: Return to San Francisco and enjoy a sunset view from Twin Peaks. Consider a farewell dinner in the city. 109 | ``` 110 | 111 | This output provides a structured and detailed itinerary for a three-day trip to the San Francisco Bay Area, formatted for easy reading. 112 | -------------------------------------------------------------------------------- /example/hotel_map_agent.py: -------------------------------------------------------------------------------- 1 | # Import required modules from the OpenAGI framework 2 | from openagi.planner.task_decomposer import TaskPlanner # For autonomous task planning 3 | from openagi.agent import Admin # Main agent class that orchestrates the process 4 | from openagi.actions.tools.searchapi_search import SearchApiSearch # Maps tool from SearchAPI Search 5 | from openagi.llms.xai import XAIModel # XAI language model integration 6 | import os 7 | 8 | # Set up the API key for the XAI model and Searchapi.io 9 | # This should be replaced with your actual API key 10 | os.environ['XAI_API_KEY'] = "" # get your key: https://console.x.ai/ 11 | 12 | SearchApiSearch.set_config({ 13 | "api_key": "", # get your key: https://www.searchapi.io/ 14 | "engine": "google_maps" 15 | }) 16 | 17 | # Load XAI model configuration from environment variables 18 | # This typically includes settings like model type, temperature, etc. 19 | grok_config = XAIModel.load_from_env_config() 20 | 21 | # Initialize the XAI language model with the loaded configuration 22 | llm = XAIModel(config=grok_config) 23 | 24 | # Create a task planner instance 25 | # autonomous=True: The planner will execute tasks without waiting for approval 26 | # human_intervene=False: No human intervention will be requested during execution 27 | plan = TaskPlanner( 28 | autonomous=True, 29 | human_intervene=False 30 | ) 31 | 32 | # Initialize the Admin agent 33 | # This is the main orchestrator that combines: 34 | # - actions: List of available tools (only SearchApiSearch in this case) 35 | # - planner: The task planner that decides what actions to take 36 | # - llm: The language model that will process the text and generate responses 37 | admin = Admin( 38 | actions=[SearchApiSearch], 39 | planner=plan, 40 | llm=llm, 41 | ) 42 | 43 | # Execute the agent with a specific query 44 | # query: The question we want to answer 45 | # description: Brief description of what the agent should do 46 | res = admin.run( 47 | query="list down the hotels in the Budapest. I need 10 hotel names", 48 | description="make sure to look for those hotel that are nearby Gellart in Budapest", 49 | ) 50 | 51 | # Print the result obtained from the agent 52 | print(res) 53 | -------------------------------------------------------------------------------- /example/itinerary_planner.py: -------------------------------------------------------------------------------- 1 | from openagi.actions.tools.ddg_search import DuckDuckGoSearch 2 | from openagi.agent import Admin 3 | from openagi.agent import Admin 4 | from openagi.llms.azure import AzureChatOpenAIModel 5 | from openagi.planner.task_decomposer import TaskPlanner 6 | from rich.console import Console 7 | from rich.markdown import Markdown 8 | 9 | config = AzureChatOpenAIModel.load_from_env_config() 10 | llm = AzureChatOpenAIModel(config=config) 11 | 12 | plan = TaskPlanner(autonomous=True) 13 | 14 | admin = Admin( 15 | actions = [DuckDuckGoSearch], 16 | planner = plan, 17 | llm=llm, 18 | 19 | ) 20 | res = admin.run( 21 | query="3 Days Trip to san francisco bay area", 22 | description= 23 | "You are a knowledgeable local guide with extensive information about the city, it's attractions and customs. Do not use a quality assurance agent and output it without writing in the file.", 24 | ) 25 | print(res) 26 | 27 | 28 | 29 | # Print the results from the OpenAGI 30 | print("-" * 100) # Separator 31 | Console().print(Markdown(res)) 32 | 33 | # The Agent did some research using the given actions and returned the below itinerary. 34 | """ 35 | 36 | Day 1: 37 | 38 | - Breakfast at Mama's on Washington Square 39 | - Explore Golden Gate Park 40 | - Lunch at The Cliff House 41 | - Walk across the Golden Gate Bridge 42 | - Dinner at Kokkari Estiatorio 43 | 44 | Day 2: 45 | 46 | - Breakfast at Brenda's French Soul Food 47 | - Take a ferry to Alcatraz Island 48 | - Lunch at The Buena Vista Cafe (try their famous Irish Coffee) 49 | - Explore Fisherman's Wharf and Pier 39 50 | - Ride a cable car to Nob Hill 51 | - Dinner at House of Prime Rib 52 | 53 | Day 3: 54 | 55 | - Breakfast at Tartine Bakery 56 | - Visit the Palace of Fine Arts 57 | - Lunch at Tony's Pizza Napoletana in North Beach 58 | - Explore Chinatown 59 | - Dinner at Nopa 60 | 61 | Note: Don't forget to make reservations for popular restaurants in advance! 62 | """ 63 | -------------------------------------------------------------------------------- /example/job_search.py: -------------------------------------------------------------------------------- 1 | 2 | from openagi.actions.tools.ddg_search import DuckDuckGoSearch 3 | from openagi.agent import Admin 4 | from openagi.planner.task_decomposer import TaskPlanner 5 | from rich.console import Console 6 | from rich.markdown import Markdown 7 | from openagi.llms.azure import AzureChatOpenAIModel 8 | from openagi.planner.task_decomposer import TaskPlanner 9 | from openagi.actions.tools.ddg_search import DuckDuckGoSearch 10 | from openagi.actions.files import WriteFileAction 11 | from openagi.agent import Admin 12 | 13 | 14 | 15 | 16 | import os 17 | from getpass import getpass 18 | 19 | 20 | config = AzureChatOpenAIModel.load_from_env_config() 21 | llm = AzureChatOpenAIModel(config=config) 22 | 23 | plan = TaskPlanner(autonomous=True) 24 | 25 | admin = Admin( 26 | actions = [DuckDuckGoSearch], 27 | planner = plan, 28 | llm=llm, 29 | 30 | ) 31 | res = admin.run( 32 | query="Create a job posting for an SDE 2 Full Stack Developer at AI Planet", 33 | description=""" 34 | AI Planet (https://aiplanet.com/), a cutting-edge company at the forefront of technology, artificial intelligence, 35 | and machine learning, is seeking a talented SDE 2 Full Stack Developer to join our innovative team. The ideal candidate will possess strong proficiency in Python, 36 | ReactJS, Golang, NodeJS, and SQL. Experience with Machine Learning and Deep Learning is highly preferred, as it aligns with our company's core focus. At AI Planet, 37 | we foster a culture of innovation, collaboration, and continuous learning. We're looking for a passionate developer who can contribute to our dynamic environment and 38 | help drive our mission to advance AI technology. If you're excited about pushing the boundaries of what's possible in the world of AI and want to work with a team of 39 | like-minded professionals, we encourage you to apply and become part of our journey in shaping the future of technology. 40 | """, 41 | ) 42 | print(res) 43 | 44 | 45 | 46 | # Job Posting 47 | 48 | ## Company: AI Planet 49 | 50 | ### Role: SDE 2 Full Stack Developer 51 | 52 | # **Description:** 53 | 54 | # AI Planet is seeking a passionate and talented SDE 2 Full Stack Developer to design, develop, and maintain web applications. The ideal candidate will collaborate with cross-functional teams, write clean and efficient code, conduct code reviews, troubleshoot issues, and optimize application performance. 55 | 56 | # **Qualifications:** 57 | 58 | # - Bachelor's degree in a relevant field 59 | # - Experience with front-end technologies (HTML, CSS, JavaScript, frameworks like React) 60 | # - Experience with back-end technologies (Node.js, Python, Java) 61 | # - Proficiency with database systems and version control (Git) 62 | # - Strong communication skills 63 | 64 | # **Preferred Qualifications:** 65 | 66 | # - Experience with cloud platforms 67 | # - Familiarity with DevOps practices 68 | # - Experience with containerization (Docker, Kubernetes) 69 | 70 | # **Benefits:** 71 | 72 | # - Competitive salary 73 | # - Health insurance 74 | # - Flexible work hours 75 | # - Professional development opportunities 76 | # - Inclusive culture 77 | 78 | # **Company Culture:** 79 | 80 | # AI Planet fosters a culture of innovation, collaboration, and continuous learning. We emphasize adaptability and provide a supportive environment where leveraging generative AI is encouraged. 81 | -------------------------------------------------------------------------------- /example/market_research.py: -------------------------------------------------------------------------------- 1 | from openagi.actions.files import WriteFileAction, ReadFileAction 2 | from openagi.actions.tools.ddg_search import DuckDuckGoSearch 3 | from openagi.actions.tools.webloader import WebBaseContextTool 4 | from openagi.agent import Admin 5 | from openagi.llms.azure import AzureChatOpenAIModel 6 | from openagi.memory import Memory 7 | from openagi.planner.task_decomposer import TaskPlanner 8 | from openagi.worker import Worker 9 | from rich.console import Console 10 | from rich.markdown import Markdown 11 | 12 | if __name__ == "__main__": 13 | config = AzureChatOpenAIModel.load_from_env_config() 14 | llm = AzureChatOpenAIModel(config=config) 15 | 16 | 17 | plan = TaskPlanner(autonomous=True) 18 | 19 | admin = Admin( 20 | actions = [DuckDuckGoSearch], 21 | planner = plan, 22 | llm=llm, 23 | ) 24 | 25 | 26 | res = admin.run( 27 | query=""" 28 | Create a comprehensive market research report on renewable energy trends. The report should cover major sectors within renewable energy, assess current market size, and provide growth projections. Include an analysis of technological advancements and profiles of key industry players. Examine relevant policies, explore emerging markets, and discuss challenges facing the industry. Provide valuable insights for potential investors considering entering the renewable energy market. 29 | """, 30 | description=""" 31 | Lead the team in producing this high-quality renewable energy market report. Oversee the research and writing process, ensure effective collaboration among team members, and manage timely completion of all sections. Verify the report's accuracy, clarity, and overall value to readers. The final deliverable should be a strategic resource that guides decision-making in the renewable energy sector. 32 | """ 33 | ) 34 | 35 | print("-" * 100) # Separator 36 | Console().print(Markdown(res)) 37 | 38 | 39 | 40 | ### Output 41 | 42 | # The comprehensive market research report on renewable energy includes sections on trends, technological innovations, major players, regulatory impacts, and future potential. Key highlights are: 43 | 44 | # 1. **Major Players**: Leading companies like NextEra Energy and Brookfield Renewable Partners. 45 | # 2. **Technological Innovations**: Advancements in high-efficiency solar technologies, AI, big data, and distributed energy storage systems. 46 | # 3. **Trends and Projections**: Renewables are expected to surpass coal by 2025. 47 | # 4. **Regulatory and Investment Impacts**: Historic investments and the importance of reskilling the workforce. 48 | # 5. **Future Potential**: Innovations in solar, wind, bioenergy, and green hydrogen technologies. -------------------------------------------------------------------------------- /example/marketing_campaign_for_product.py: -------------------------------------------------------------------------------- 1 | from openagi.actions.files import WriteFileAction, ReadFileAction 2 | from openagi.actions.tools.ddg_search import DuckDuckGoSearch 3 | from openagi.actions.tools.webloader import WebBaseContextTool 4 | from openagi.agent import Admin 5 | from openagi.llms.azure import AzureChatOpenAIModel 6 | from openagi.memory import Memory 7 | from openagi.planner.task_decomposer import TaskPlanner 8 | from openagi.worker import Worker 9 | from rich.console import Console 10 | from rich.markdown import Markdown 11 | 12 | from dotenv import load_dotenv 13 | load_dotenv() 14 | 15 | if __name__ == "__main__": 16 | config = AzureChatOpenAIModel.load_from_env_config() 17 | llm = AzureChatOpenAIModel(config=config) 18 | plan = TaskPlanner(autonomous=True) 19 | 20 | 21 | 22 | admin = Admin( 23 | actions = [DuckDuckGoSearch], 24 | memory=Memory(), 25 | llm=llm, 26 | ) 27 | 28 | 29 | res = admin.run( 30 | query="Create a comprehensive marketing campaign for the launch of Pineapple X smartphone.", 31 | description=""" 32 | 1. Conduct market research to identify target audiences and analyze the competitive landscape. 33 | 2. Develop engaging marketing content across multiple channels (social media, blog, email, ads). 34 | 3. Review and refine all materials to ensure a cohesive, high-quality campaign that highlights Pineapple X's unique selling points. 35 | """, 36 | ) 37 | 38 | print("-" * 100) # Separator 39 | Console().print(Markdown(res)) 40 | 41 | 42 | 43 | ### Output 44 | 45 | # To refine and ensure cohesion in the Pineapple X marketing campaign, the following strategies should be considered: 46 | 47 | # 1. **Social Media**: Utilize top strategies such as engaging content, influencer partnerships, and interactive posts. 48 | # 2. **Blog**: Create informative and engaging blog posts that highlight unique features and benefits. 49 | # 3. **Email**: Implement best practices for email marketing, including personalized content and clear calls to action. 50 | # 4. **Advertising**: Leverage successful advertising strategies, such as targeted ads and compelling visuals. 51 | 52 | # By integrating these strategies, the Pineapple X marketing campaign can achieve greater coherence and effectiveness. 53 | 54 | # Join us in embracing the future of mobile technology. Order your Pineapple X today! -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "openagi" 3 | version = "0.3.0" 4 | description = "Making the development of autonomous human-like agents accessible to all" 5 | authors = ["AI Planet "] 6 | readme = "README.md" 7 | include = ["src/*"] 8 | packages = [{ include = "openagi", from = "src" }] # This tells Poetry where to find the package 9 | 10 | [tool.poetry.dependencies] 11 | python = "^3.9, <3.12" 12 | protobuf = "^3.20.3" 13 | langchain-core = "^0.1.27,<0.2.0" 14 | langchain = "^0.1.9" 15 | langchain-community = "^0.0.21" 16 | langchain-openai = "^0.0.6" 17 | langchain-text-splitters = "^0.0.1" 18 | langchain-experimental = "^0.0.53" 19 | duckduckgo-search = "^6.1.0" 20 | spacy = "3.7.4" 21 | xorbits = "^0.7.2" 22 | azure-identity = "^1.15.0" 23 | python-dotenv = "^1.0.1" 24 | spacytextblob = "^4.0.0" 25 | google-auth-oauthlib = "^1.2.0" 26 | google-auth-httplib2 = "^0.2.0" 27 | google-api-python-client = "^2.121.0" 28 | langchain-exa = "^0.0.1" 29 | ollama = "^0.1.7" 30 | wikipedia = "^1.4.0" 31 | yfinance = "^0.2.37" 32 | google-search-results = "^2.4.2" 33 | fastapi = "^0.110.0" 34 | uvicorn = "^0.29.0" 35 | pygithub = "^2.3.0" 36 | langchainhub = "^0.1.15" 37 | python-multipart = "^0.0.9" 38 | pandas = "^2.2.2" 39 | numpy = "^1.26.4" 40 | transformers = "^4.40.0" 41 | pypdf = "^4.2.0" 42 | faiss-cpu = "^1.8.0" 43 | pytest = "^8.2.0" 44 | chromadb = "^0.5.0" 45 | sumy = "^0.11.0" 46 | fake-useragent = "^1.5.1" 47 | 48 | yt-dlp = "^2024.8.6" 49 | youtube-search = "^2.1.2" 50 | tavily-python = "^0.4.0" 51 | [tool.poetry.group.dev.dependencies] 52 | ruff = "^0.1.11" 53 | 54 | [build-system] 55 | requires = ["poetry-core"] 56 | build-backend = "poetry.core.masonry.api" 57 | 58 | [tool.poetry.scripts] 59 | openagi = "openagi.cli:main" 60 | 61 | [tool.ruff] 62 | ignore-init-module-imports = true 63 | line-length = 98 64 | output-format = "grouped" 65 | -------------------------------------------------------------------------------- /src/Readme.md: -------------------------------------------------------------------------------- 1 | ## Run Agentic workflow without using OpenAI API keys 2 | 3 | ### Installation 4 | 5 | ```bash 6 | !pip install openagi 7 | !pip install langchain_groq 8 | ``` 9 | 10 | ### Import Statements 11 | 12 | ```py 13 | from openagi.actions.tools.ddg_search import DuckDuckGoSearch 14 | from openagi.agent import Admin 15 | from openagi.llms.groq import GroqModel 16 | from openagi.memory import Memory 17 | from openagi.planner.task_decomposer import TaskPlanner 18 | ``` 19 | 20 | ### Setup Groq LLM and save variables in environment 21 | 22 | ```py 23 | import os 24 | 25 | os.environ['GROQ_API_KEY'] = "" 26 | os.environ['GROQ_MODEL'] = "mixtral-8x7b-32768" 27 | os.environ['GROQ_TEMP'] = "0.1" 28 | 29 | config = GroqModel.load_from_env_config() 30 | llm = GroqModel(config=config) 31 | ``` 32 | 33 | ### Define Task Planner and Action 34 | 35 | ```py 36 | planner = TaskPlanner(human_intervene=False) 37 | action = DuckDuckGoSearch 38 | ``` 39 | 40 | ### Define Admin 41 | 42 | ```py 43 | admin = Admin( 44 | actions= [action], 45 | planner = planner, 46 | memory=Memory(), 47 | llm = llm, 48 | max_iterations = 5 49 | ) 50 | 51 | res = admin.run( 52 | query="3 Days Trip to San francisco Bay area", 53 | description="You are a knowledgeable local guide with extensive information about the city, it's attractions and customs", 54 | ) 55 | print(res) 56 | ``` 57 | 58 | ### Output 59 | 60 | Here is the requested 3-day itinerary for San Francisco in markdown format: 61 | 62 | ```markdown 63 | Here is a 3-day itinerary for San Francisco that takes into account local customs and etiquette: 64 | 65 | ## Day 1 66 | - Morning: Visit the Golden Gate Bridge and Park. Remember to respect the natural surroundings and other visitors. 67 | - Afternoon: Explore Fisherman's Wharf and Pier 39. Be mindful of the sea lions and their space. 68 | - Evening: Have dinner in Chinatown. Follow local dining etiquette and be respectful of the culture. 69 | 70 | ## Day 2 71 | - Morning: Take a stroll through the Presidio and visit the Walt Disney Family Museum. Dress appropriately for the weather and be respectful of the museum's rules. 72 | - Afternoon: Visit the Exploratorium or California Academy of Sciences. Follow the institutions' guidelines and be considerate of other visitors. 73 | - Evening: Enjoy a meal in the Mission District. Familiarize yourself with local customs and try some authentic Mexican cuisine. 74 | 75 | ## Day 3 76 | - Morning: Ride a cable car and visit Lombard Street. Follow traffic rules and be respectful of residents and other tourists. 77 | - Afternoon: Explore the Haight-Ashbury neighborhood and the Golden Gate Park. Respect the local culture and the environment. 78 | - Evening: Have dinner in North Beach, also known as Little Italy. Be aware of local dining etiquette and try some Italian-American dishes. 79 | 80 | I hope you find this itinerary helpful and enjoy your trip to San Francisco! 81 | ``` 82 | -------------------------------------------------------------------------------- /src/openagi/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from datetime import datetime 4 | from pathlib import Path 5 | 6 | # Define custom formatter with ANSI escape codes for colors 7 | class ColoredFormatter(logging.Formatter): 8 | COLORS = { 9 | logging.DEBUG: "\033[94m", # Blue 10 | logging.INFO: "\033[92m", # Green 11 | logging.WARNING: "\033[93m", # Yellow 12 | logging.ERROR: "\033[91m", # Red 13 | logging.CRITICAL: "\033[95m", # Magenta 14 | } 15 | RESET = "\033[0m" 16 | 17 | def format(self, record): 18 | log_color = self.COLORS.get(record.levelno, self.RESET) 19 | log_fmt = f"{log_color}%(asctime)s :%(funcName)s: %(message)s{self.RESET}" 20 | formatter = logging.Formatter(log_fmt, datefmt="%Y-%m-%d %H:%M:%S") 21 | return formatter.format(record) 22 | 23 | 24 | BASE_PATH = "logs" 25 | pth = Path(BASE_PATH) 26 | pth.mkdir(parents=True, exist_ok=True) 27 | filename = f'{pth.absolute()}/application_{datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}.log' 28 | 29 | LOG_LEVEL = getattr( 30 | logging, 31 | os.environ.get("OPENAGI_LOG_LEVEL", "INFO").upper(), 32 | logging.INFO, 33 | ) 34 | 35 | console_handler = logging.StreamHandler() 36 | console_handler.setLevel(LOG_LEVEL) 37 | console_handler.setFormatter(ColoredFormatter()) 38 | # Setup basic logging configuration 39 | logging.basicConfig( 40 | level=LOG_LEVEL, 41 | datefmt="%Y-%m-%d %H:%M:%S", 42 | handlers=[ 43 | logging.FileHandler(filename=filename), 44 | console_handler 45 | ] 46 | ) 47 | -------------------------------------------------------------------------------- /src/openagi/actions/__init__.py: -------------------------------------------------------------------------------- 1 | from openagi.actions.base import BaseAction 2 | from openagi.actions.human_input import HumanCLIInput 3 | from openagi.actions.console import ConsolePrint 4 | from openagi.actions.tools import * 5 | -------------------------------------------------------------------------------- /src/openagi/actions/base.py: -------------------------------------------------------------------------------- 1 | from textwrap import dedent 2 | from pydantic import BaseModel, Field 3 | from typing import Any, Optional 4 | 5 | from openagi.llms.base import LLMBaseModel 6 | from openagi.memory.memory import Memory 7 | from typing import ClassVar, Dict, Any 8 | 9 | class BaseAction(BaseModel): 10 | """Base Actions class to be inherited by other actions, providing basic functionality and structure.""" 11 | 12 | session_id: int = Field(default_factory=str, description="SessionID of the current run.") 13 | previous_action: Optional[Any] = Field( 14 | default=None, 15 | description="Observation or Result of the previous action that might needed to run the current action.", 16 | ) 17 | llm: Optional[LLMBaseModel] = Field( 18 | description="LLM Model to be used.", default=None, exclude=True 19 | ) 20 | memory: Optional[Memory] = Field( 21 | description="Memory that stores the results of the earlier tasks executed for the current objective.", 22 | exclude=True, 23 | default=None, 24 | ) 25 | 26 | def execute(self): 27 | """Executes the action""" 28 | raise NotImplementedError("Subclasses must implement this method.") 29 | 30 | @classmethod 31 | def cls_doc(cls): 32 | default_exclude_doc_fields = ["llm", "memory", "session_id", "name", "description"] 33 | return { 34 | "cls": { 35 | "kls": cls.__name__, 36 | "module": cls.__module__, 37 | "doc": dedent(cls.__doc__).strip() if cls.__doc__ else "", 38 | }, 39 | "params": { 40 | field_name: field.description 41 | for field_name, field in cls.model_fields.items() 42 | if field_name not in default_exclude_doc_fields 43 | }, 44 | } 45 | 46 | class ConfigurableAction(BaseAction): 47 | config: ClassVar[Dict[str, Any]] = {} 48 | 49 | @classmethod 50 | def set_config(cls, *args, **kwargs): 51 | if args: 52 | if len(args) == 1 and isinstance(args[0], dict): 53 | cls.config.update(args[0]) 54 | else: 55 | raise ValueError("If using positional arguments, a single dictionary must be provided.") 56 | cls.config.update(kwargs) 57 | 58 | @classmethod 59 | def get_config(cls, key: str, default: Any = None) -> Any: 60 | return cls.config.get(key, default) -------------------------------------------------------------------------------- /src/openagi/actions/compressor.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from pydantic import Field 3 | from openagi.actions.base import BaseAction 4 | from openagi.prompts.summarizer import SummarizerPrompt 5 | 6 | 7 | class SummarizerAction(BaseAction): 8 | """Summarizer Action""" 9 | 10 | past_messages: Any = Field( 11 | ..., 12 | description="Messages/Data to be summarized", 13 | ) 14 | 15 | def execute(self): 16 | summarizer: str = SummarizerPrompt.from_template({"past_messages": self.past_messages}) 17 | return self.llm.run(summarizer) 18 | -------------------------------------------------------------------------------- /src/openagi/actions/console.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | 3 | from pydantic import Field 4 | 5 | from openagi.actions.base import BaseAction 6 | 7 | 8 | class ConsolePrint(BaseAction): 9 | content: str = Field( 10 | ..., 11 | description="The content/data passed will be logged into the console using pprint.pprint() module.", 12 | ) 13 | 14 | def execute(self): 15 | pprint(self.content) 16 | return self.content 17 | -------------------------------------------------------------------------------- /src/openagi/actions/files.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | from typing import Dict, Optional 4 | from pydantic import Field 5 | 6 | from openagi.actions.base import BaseAction 7 | 8 | 9 | class CreateFileAction(BaseAction): 10 | """ 11 | Creates a new file with the specified content and directory structure. 12 | """ 13 | 14 | filename: str = Field(..., description="Name of the file along with the directory.") 15 | parent_mkdir: bool = Field( 16 | default=True, description="Create parent directories of the file if not exist." 17 | ) 18 | exist_ok: bool = Field( 19 | default=True, 20 | description="Do not raise error if any of the parent directories exists.", 21 | ) 22 | file_content: str = Field(default="", description="String content of the file to insert") 23 | write_text_kargs: Optional[Dict] = Field( 24 | default=None, description="Kwargs to be passed to pathlib's write_text method" 25 | ) 26 | 27 | def execute(self): 28 | output_file = Path(self.filename) 29 | print(f"Created file - {output_file.absolute()}") 30 | output_file.parent.mkdir( 31 | parents=self.parent_mkdir, 32 | exist_ok=self.exist_ok, 33 | ) 34 | 35 | write_kwargs = {} 36 | if self.write_text_kargs: 37 | write_kwargs = {**write_kwargs} 38 | 39 | output_file.write_text(data=self.file_content, **write_kwargs) 40 | return self.file_content 41 | 42 | 43 | class WriteFileAction(BaseAction): 44 | """ 45 | Executes the action to write the provided content to a file at the specified path. 46 | """ 47 | 48 | filename: str = Field(..., description="Name of the file along with the directory.") 49 | file_content: str = Field(default="", description="String content of the file to insert") 50 | file_mode: str = Field( 51 | default="w", 52 | description="File mode to open the file with while using python's open() func. Defaults to 'w'", 53 | ) 54 | 55 | def execute(self): 56 | output_file = Path(self.filename) 57 | logging.info(f"Writing file - {output_file.absolute()}") 58 | with open(output_file.absolute(), self.file_mode) as f: 59 | f.write(self.file_content) 60 | return self.file_content 61 | 62 | 63 | class ReadFileAction(BaseAction): 64 | """ 65 | Reads the contents of a file specified by the `file_path` parameter. 66 | """ 67 | 68 | file_path: str = Field(..., description="Name of the file along with the directory.") 69 | 70 | def execute(self): 71 | output_file = Path(self.file_path) 72 | logging.info(f"Reading file - {output_file.absolute()}") 73 | with open(output_file.absolute(), "r") as f: 74 | return f.read() 75 | -------------------------------------------------------------------------------- /src/openagi/actions/formatter.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from pydantic import Field 4 | 5 | from openagi.actions.base import BaseAction 6 | 7 | 8 | class FormatterAction(BaseAction): 9 | """Content Formatter Action""" 10 | 11 | content: Any = Field(..., description="Data/Content to be formatted.") 12 | format_type: str = Field( 13 | default="markdown", 14 | description="Type to which the content will be formatted to. It will be modified to the supported formats and returned. Supported Formats - markdown/plan-text", 15 | ) 16 | 17 | def execute(self): 18 | return self.llm.run( 19 | f"Format and return the below response in {self.format_type} format without removing any content. You can rephrase if required.\n{self.content}" 20 | ) 21 | -------------------------------------------------------------------------------- /src/openagi/actions/human_input.py: -------------------------------------------------------------------------------- 1 | from pydantic import Field 2 | 3 | from openagi.actions.base import BaseAction 4 | 5 | 6 | class HumanCLIInput(BaseAction): 7 | ques_prompt: str = Field( 8 | default="Do you think this task is progressing as expected [y/n] or [yes/no]: ", 9 | description="question to be asked to human", 10 | ) 11 | 12 | def execute(self, prompt=ques_prompt): 13 | response = input(f"Agent: {prompt}\nYou: ") 14 | return response 15 | -------------------------------------------------------------------------------- /src/openagi/actions/obs_rag.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from pydantic import Field 4 | 5 | from openagi.actions.base import BaseAction 6 | 7 | 8 | class MemoryRagAction(BaseAction): 9 | """Action class to get all the results from the previous tasks for the current objetive. 10 | This action is responsible to reading and not writing. Writing is done by default for every task. 11 | """ 12 | 13 | query: str = Field( 14 | ..., 15 | description="Query, a string, to run to retrieve the data from the results of previous tasks. Returns an Array of the results.", 16 | ) 17 | max_results: int = Field( 18 | default=10, 19 | description="Max results to be used by querying the memory Defaults to integer 10.", 20 | ) 21 | 22 | def execute(self): 23 | resp = self.memory.search(query=self.query, n_results=self.max_results or 10) 24 | logging.debug(f"Retreived MEMORY DATA - {resp}") 25 | return resp 26 | -------------------------------------------------------------------------------- /src/openagi/actions/tools/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/openagi/actions/tools/arxiv_search.py: -------------------------------------------------------------------------------- 1 | from openagi.actions.base import ConfigurableAction 2 | from pydantic import Field 3 | from openagi.exception import OpenAGIException 4 | from typing import ClassVar, Dict, Any 5 | 6 | try: 7 | import arxiv 8 | except ImportError: 9 | raise OpenAGIException("Install arxiv with cmd `pip install arxiv`") 10 | 11 | 12 | class ArxivSearch(ConfigurableAction): 13 | """ 14 | Arxiv Search is a tool used to search articles in Physics, Mathematics, Computer Science, Quantitative Biology, Quantitative Finance, and Statistics 15 | """ 16 | query: str = Field(..., description="User query or question") 17 | max_results: int = Field(10, description="Total results, in int, to be executed from the search. Defaults to 10.") 18 | 19 | def execute(self): 20 | search = arxiv.Search( 21 | query = self.query, 22 | max_results = self.max_results, 23 | ) 24 | client = arxiv.Client() 25 | results = client.results(search) 26 | meta_data = "" 27 | for result in results: 28 | meta_data += f"title : {result.title}\n " 29 | meta_data += f"summary : {result.summary}\n " 30 | meta_data += f"published : {result.published}\n " 31 | meta_data += f"authors : {result.authors}\n " 32 | meta_data += f"pdf_url : {result.pdf_url}\n " 33 | meta_data += f"entry_id : {result.entry_id}\n\n " 34 | return meta_data.strip() 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /src/openagi/actions/tools/dalle_tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | import warnings 5 | from typing import Any 6 | 7 | from openagi.actions.base import ConfigurableAction 8 | from pydantic import Field 9 | from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper 10 | 11 | class DallEImageGenerator(ConfigurableAction): 12 | """Use this Action to generate images using DALL·E.""" 13 | 14 | name: str = Field( 15 | default_factory=str, 16 | description="DallEImageGenerator Action to generate an image using OpenAI's DALL·E model.", 17 | ) 18 | description: str = Field( 19 | default_factory=str, 20 | description="This action is used to create images based on textual descriptions using the DALL·E model.", 21 | ) 22 | 23 | query: Any = Field( 24 | default_factory=str, 25 | description="User query, a string, describing the image to be generated.", 26 | ) 27 | 28 | def execute(self): 29 | logging.info(f"Generating image for prompt: {self.query}") 30 | if 'OPENAI_API_KEY' not in os.environ: 31 | warnings.warn( 32 | "Dall-E expects an OPENAI_API_KEY. Please add it to your environment variables.", 33 | UserWarning, 34 | stacklevel=2 35 | ) 36 | return json.dumps({"error": "Dall-E requires an OPENAI_API_KEY. Please add it to your environment variables."}) 37 | 38 | try: 39 | # Use the query directly without the LLM chain 40 | dalle_wrapper = DallEAPIWrapper() 41 | result = dalle_wrapper.run(self.query) 42 | return json.dumps(result) 43 | 44 | except Exception as e: 45 | logging.error(f"Error generating image: {str(e)}") 46 | return json.dumps({"error": f"Failed to generate image: {str(e)}"}) -------------------------------------------------------------------------------- /src/openagi/actions/tools/ddg_search.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Any 3 | from openagi.actions.base import ConfigurableAction 4 | from pydantic import Field 5 | from duckduckgo_search import DDGS 6 | import logging 7 | 8 | class DuckDuckGoSearch(ConfigurableAction): 9 | """Use this Action to search DuckDuckGo for a query.""" 10 | 11 | name: str = Field( 12 | default_factory=str, 13 | description="DuckDuckGoSearch Action to search over duckduckgo using the query.", 14 | ) 15 | description: str = Field( 16 | default_factory=str, 17 | description="This action is used to search for words, documents, images, videos, news, maps and text translation using the DuckDuckGo.com search engine.", 18 | ) 19 | 20 | query: Any = Field( 21 | default_factory=str, 22 | description="User query, a string, to fetch web search results from DuckDuckGo", 23 | ) 24 | 25 | max_results: int = Field( 26 | default=10, 27 | description="Total results, in int, to be executed from the search. Defaults to 10.", 28 | ) 29 | 30 | def _get_ddgs(self): 31 | return DDGS() 32 | 33 | def execute(self): 34 | if self.max_results > 15: 35 | logging.info("Over threshold value... Limiting the Max results to 15") 36 | self.max_results = 15 37 | 38 | result = self._get_ddgs().text( 39 | self.query, 40 | max_results=self.max_results, 41 | ) 42 | return json.dumps(result) -------------------------------------------------------------------------------- /src/openagi/actions/tools/document_loader.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from openagi.actions.base import ConfigurableAction 3 | from langchain_community.document_loaders import TextLoader 4 | from langchain_community.document_loaders.csv_loader import CSVLoader 5 | from langchain_community.document_loaders.pdf import PyPDFLoader 6 | from pydantic import Field 7 | 8 | class TextLoaderTool(ConfigurableAction): 9 | """Load content from a text file. 10 | 11 | This action loads and processes content from .txt files, combining 12 | metadata and content into a single context string. 13 | """ 14 | 15 | def execute(self) -> str: 16 | file_path: str = self.get_config('filename') 17 | loader = TextLoader(file_path=file_path) 18 | documents = loader.load() 19 | 20 | if not documents: 21 | return "" 22 | 23 | page_content = documents[0].page_content 24 | source = documents[0].metadata["source"] 25 | return f"{source} {page_content}" 26 | 27 | class PDFLoaderTool(ConfigurableAction): 28 | """Load content from a PDF file. 29 | 30 | This action loads and processes content from .pdf files, combining 31 | metadata and content into a single context string. 32 | """ 33 | 34 | def execute(self) -> str: 35 | file_path: str = self.get_config('filename') 36 | loader = PyPDFLoader(file_path=file_path) 37 | documents = loader.load() 38 | 39 | if not documents: 40 | return "" 41 | 42 | page_content = documents[0].page_content 43 | source = documents[0].metadata["source"] 44 | return f"{source} {page_content}" 45 | 46 | class CSVLoaderTool(ConfigurableAction): 47 | """Load content from a CSV file. 48 | 49 | This action loads and processes content from .csv files, combining 50 | row numbers and content into a formatted string representation. 51 | """ 52 | 53 | def execute(self) -> str: 54 | file_path: str = self.get_config('filename') 55 | loader = CSVLoader(file_path=file_path) 56 | documents = loader.load() 57 | 58 | content_parts = [] 59 | for idx, doc in enumerate(documents): 60 | row_content = doc.page_content 61 | row_number = doc.metadata["row"] 62 | content_parts.append(f"row_no {row_number}: {row_content}") 63 | 64 | return "".join(content_parts) -------------------------------------------------------------------------------- /src/openagi/actions/tools/exasearch.py: -------------------------------------------------------------------------------- 1 | from openagi.actions.base import ConfigurableAction 2 | from pydantic import Field 3 | from openagi.exception import OpenAGIException 4 | import os 5 | import warnings 6 | 7 | try: 8 | from exa_py import Exa 9 | except ImportError: 10 | raise OpenAGIException("Install Exa Py with cmd `pip install exa_py`") 11 | 12 | class ExaSearch(ConfigurableAction): 13 | """Exa Search tool for querying and retrieving information. 14 | 15 | This action uses the Exa API to perform searches and retrieve relevant content 16 | based on user queries. Requires an API key to be configured before use. 17 | """ 18 | query: str = Field(..., description="User query or question") 19 | 20 | def __init__(self, **data): 21 | super().__init__(**data) 22 | self._check_deprecated_usage() 23 | 24 | def _check_deprecated_usage(self): 25 | if 'EXA_API_KEY' in os.environ and not self.get_config('api_key'): 26 | warnings.warn( 27 | "Using environment variables for API keys is deprecated and will be removed in a future version. " 28 | "Please use ExaSearch.set_config(api_key='your_key') instead of setting environment variables.", 29 | DeprecationWarning, 30 | stacklevel=2 31 | ) 32 | self.set_config(api_key=os.environ['EXA_API_KEY']) 33 | 34 | 35 | def execute(self) -> str: 36 | api_key: str = self.get_config('api_key') 37 | if not api_key: 38 | if 'EXA_API_KEY' in os.environ: 39 | api_key = os.environ['EXA_API_KEY'] 40 | warnings.warn( 41 | "Using environment variables for API keys is deprecated and will be removed in a future version. " 42 | "Please use ExaSearch.set_config(api_key='your_key') instead of setting environment variables.", 43 | DeprecationWarning, 44 | stacklevel=2 45 | ) 46 | else: 47 | raise OpenAGIException("API KEY NOT FOUND. Use ExaSearch.set_config(api_key='your_key') to set the API key.") 48 | 49 | exa = Exa(api_key=api_key) 50 | results = exa.search_and_contents( 51 | self.query, 52 | text={"max_characters": 512}, 53 | ) 54 | 55 | content_parts = [] 56 | for result in results.results: 57 | content_parts.append(result.text.strip()) 58 | 59 | content = "".join(content_parts) 60 | return ( 61 | content.replace("<|endoftext|>", "") 62 | .replace("NaN", "") 63 | ) 64 | -------------------------------------------------------------------------------- /src/openagi/actions/tools/github_search_tool.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import os 3 | from typing import Dict, List 4 | from openagi.exception import OpenAGIException 5 | import requests 6 | from langchain_community.document_loaders.github import GithubFileLoader 7 | from pydantic import Field 8 | from openagi.actions.base import BaseAction 9 | 10 | import warnings 11 | warnings.filterwarnings("ignore") 12 | 13 | class OpenAGIGithubFileLoader(GithubFileLoader): 14 | def get_file_paths(self) -> List[Dict]: 15 | base_url = ( 16 | f"{self.github_api_url}/repos/{self.repo}/git/trees/" f"{self.branch}?recursive=1" 17 | ) 18 | response = requests.get(base_url, headers=self.headers) 19 | response.raise_for_status() 20 | all_files = response.json()["tree"] 21 | 22 | """ one element in all_files 23 | { 24 | 'path': '.github', 25 | 'mode': '040000', 26 | 'type': 'tree', 27 | 'sha': '89a2ae046e8b59eb96531f123c0c6d4913885df1', 28 | 'url': 'https://github.com/api/v3/repos/shufanhao/langchain/git/trees/89a2ae046e8b59eb96531f123c0c6d4913885dxxx' 29 | } 30 | """ 31 | required_files = [ 32 | f for f in all_files if not (self.file_filter and not self.file_filter(f["path"])) 33 | ] 34 | return required_files 35 | 36 | def get_file_content_by_path(self, path: str) -> str: 37 | base_url = f"{self.github_api_url}/repos/{self.repo}/contents/{path}" 38 | #print(base_url) 39 | response = requests.get(base_url, headers=self.headers) 40 | response.raise_for_status() 41 | 42 | content_encoded = response.json()["content"] 43 | return base64.b64decode(content_encoded).decode("utf-8") 44 | 45 | 46 | class GitHubFileLoadAction(BaseAction): 47 | """ 48 | #Use this Action to extract specific extension files from GitHub. 49 | """ 50 | 51 | repo: str = Field( 52 | default_factory=str, 53 | description="Repository name- Format: username/repo e.g., aiplanethub/openagi", 54 | ) 55 | directory:str = Field( 56 | default_factory=str, 57 | description="File directory that contains the supporting files i.e., src/openagi/llms", 58 | ) 59 | extension: str = Field( 60 | default_factory = ".txt", 61 | description="File extension to extract the data from. eg: `.py`, `.md`", 62 | ) 63 | 64 | 65 | def execute(self): 66 | access_token = os.environ.get("GITHUB_ACCESS_TOKEN") 67 | 68 | loader = OpenAGIGithubFileLoader( 69 | repo=self.repo, 70 | access_token=access_token, 71 | github_api_url="https://api.github.com", 72 | branch="main", 73 | file_filter=lambda files: files.startswith(self.directory) and files.endswith(self.extension), 74 | ) 75 | 76 | data = loader.load() 77 | response = [] 78 | for doc in data: 79 | response.append(f"{doc.page_content}\nMetadata{doc.metadata}") 80 | 81 | return "\n\n".join(response) 82 | -------------------------------------------------------------------------------- /src/openagi/actions/tools/google_search_tool.py: -------------------------------------------------------------------------------- 1 | from openagi.actions.base import ConfigurableAction 2 | from pydantic import Field 3 | from openagi.exception import OpenAGIException 4 | import logging 5 | 6 | try: 7 | from googlesearch import search 8 | except ImportError: 9 | raise OpenAGIException("Install googlesearch-python with cmd `pip install googlesearch-python`") 10 | 11 | class GoogleSearchTool(ConfigurableAction): 12 | """ 13 | Google Search is a tool used for scraping the Google search engine. Extract information from Google search results. 14 | """ 15 | query: str = Field(..., description="User query or question ") 16 | 17 | max_results: int = Field( 18 | default=10, 19 | description="Total results, in int, to be executed from the search. Defaults to 10. The limit should be 10 and not execeed more than 10", 20 | ) 21 | 22 | lang: str = Field( 23 | default="en", 24 | description = "specify the langauge for your search results." 25 | ) 26 | 27 | def execute(self): 28 | if self.max_results > 15: 29 | logging.info("Over threshold value... Limiting the Max results to 15") 30 | self.max_results = 15 31 | 32 | context = "" 33 | search_results = search(self.query,num_results=self.max_results,lang=self.lang,advanced=True) 34 | for info in search_results: 35 | context += f"Title: {info.title}. Description: {info.description}. URL: {info.url}" 36 | 37 | return context 38 | -------------------------------------------------------------------------------- /src/openagi/actions/tools/luma_ai.py: -------------------------------------------------------------------------------- 1 | from openagi.actions.base import ConfigurableAction 2 | from pydantic import Field 3 | from openagi.exception import OpenAGIException 4 | import warnings 5 | import os 6 | import time 7 | 8 | try: 9 | from lumaai import LumaAI 10 | import requests 11 | except ImportError: 12 | raise OpenAGIException("Install required packages with cmd `pip install lumaai requests`") 13 | 14 | class LumaLabsTool(ConfigurableAction): 15 | """Luma Labs Tool for generating AI images and videos. 16 | 17 | This action uses the Luma Labs API to generate images or videos based on text prompts. 18 | Supports various features including image generation, video generation, and camera motions. 19 | Requires an API key to be configured before use. 20 | """ 21 | 22 | prompt: str = Field(..., description="Text prompt to generate image or video content") 23 | mode: str = Field( 24 | default="image", 25 | description="Mode of operation: 'image' or 'video'" 26 | ) 27 | aspect_ratio: str = Field( 28 | default="16:9", 29 | description="Aspect ratio (1:1, 3:4, 4:3, 9:16, 16:9, 9:21, 21:9)" 30 | ) 31 | model: str = Field( 32 | default="photon-1", 33 | description="Model to use (photon-1, photon-flash-1, ray-2 for video)" 34 | ) 35 | 36 | def __init__(self, **data): 37 | super().__init__(**data) 38 | self._check_deprecated_usage() 39 | 40 | def _check_deprecated_usage(self): 41 | if 'LUMAAI_API_KEY' in os.environ and not self.get_config('api_key'): 42 | warnings.warn( 43 | "Using environment variables for API keys is deprecated. " 44 | "Please use LumaLabsTool.set_config(api_key='your_key') instead.", 45 | DeprecationWarning, 46 | stacklevel=2 47 | ) 48 | self.set_config(api_key=os.environ['LUMAAI_API_KEY']) 49 | 50 | def execute(self) -> str: 51 | api_key: str = self.get_config('api_key') 52 | if not api_key: 53 | if 'LUMAAI_API_KEY' in os.environ: 54 | api_key = os.environ['LUMAAI_API_KEY'] 55 | warnings.warn( 56 | "Using environment variables for API keys is deprecated. " 57 | "Please use LumaLabsTool.set_config(api_key='your_key') instead.", 58 | DeprecationWarning, 59 | stacklevel=2 60 | ) 61 | else: 62 | raise OpenAGIException("API KEY NOT FOUND. Use LumaLabsTool.set_config(api_key='your_key') to set the API key.") 63 | 64 | client = LumaAI(auth_token=api_key) 65 | 66 | try: 67 | if self.mode == "image": 68 | generation = client.generations.image.create( 69 | prompt=self.prompt, 70 | aspect_ratio=self.aspect_ratio, 71 | model=self.model 72 | ) 73 | else: 74 | generation = client.generations.create( 75 | prompt=self.prompt, 76 | aspect_ratio=self.aspect_ratio, 77 | model="ray-2" if self.model == "ray-2" else "photon-1" 78 | ) 79 | 80 | completed = False 81 | while not completed: 82 | generation = client.generations.get(id=generation.id) 83 | if generation.state == "completed": 84 | completed = True 85 | elif generation.state == "failed": 86 | raise OpenAGIException(f"Generation failed: {generation.failure_reason}") 87 | time.sleep(2) 88 | 89 | if self.mode == "image": 90 | result_url = generation.assets.image 91 | file_extension = "jpg" 92 | else: 93 | result_url = generation.assets.video 94 | file_extension = "mp4" 95 | 96 | response = requests.get(result_url, stream=True) 97 | file_name = f'{generation.id}.{file_extension}' 98 | with open(file_name, 'wb') as file: 99 | file.write(response.content) 100 | 101 | return f"""Generation completed successfully! 102 | Mode: {self.mode} 103 | File saved as: {file_name} 104 | Prompt: {self.prompt} 105 | URL: {result_url}""" 106 | 107 | except Exception as e: 108 | raise OpenAGIException(f"Error in Luma Labs generation: {str(e)}") 109 | -------------------------------------------------------------------------------- /src/openagi/actions/tools/pubmed_tool.py: -------------------------------------------------------------------------------- 1 | from openagi.actions.base import ConfigurableAction 2 | from openagi.exception import OpenAGIException 3 | from pydantic import Field 4 | 5 | try: 6 | from Bio import Entrez 7 | except ImportError: 8 | raise OpenAGIException("Install Biopython with cmd `pip install biopython`") 9 | 10 | class PubMedSearch(ConfigurableAction): 11 | """PubMed Search tool for querying biomedical literature. 12 | 13 | This action uses the Bio.Entrez module to search PubMed and retrieve 14 | scientific articles based on user queries. Requires an email address 15 | to be configured for NCBI's tracking purposes. 16 | """ 17 | 18 | query: str = Field(..., description="Search query for PubMed") 19 | max_results: int = Field( 20 | default=5, 21 | description="Maximum number of results to return (default: 5)" 22 | ) 23 | sort: str = Field( 24 | default="relevance", 25 | description="Sort order: 'relevance', 'pub_date', or 'first_author'" 26 | ) 27 | 28 | def execute(self) -> str: 29 | email: str = self.get_config('email') 30 | if not email: 31 | raise OpenAGIException( 32 | "Email not configured. Use PubMedSearch.set_config(email='your_email@example.com')" 33 | ) 34 | 35 | Entrez.email = email 36 | 37 | try: 38 | # Search PubMed 39 | search_handle = Entrez.esearch( 40 | db="pubmed", 41 | term=self.query, 42 | retmax=self.max_results, 43 | sort=self.sort 44 | ) 45 | search_results = Entrez.read(search_handle) 46 | search_handle.close() 47 | 48 | if not search_results["IdList"]: 49 | return "No results found for the given query." 50 | 51 | # Fetch details for found articles 52 | ids = ",".join(search_results["IdList"]) 53 | fetch_handle = Entrez.efetch( 54 | db="pubmed", 55 | id=ids, 56 | rettype="medline", 57 | retmode="text" 58 | ) 59 | 60 | results = fetch_handle.read() 61 | fetch_handle.close() 62 | 63 | # Process and format results 64 | formatted_results = ( 65 | f"Found {len(search_results['IdList'])} results for query: {self.query}\n\n" 66 | f"{results}" 67 | ) 68 | 69 | return formatted_results 70 | 71 | except Exception as e: 72 | return f"Error searching PubMed: {str(e)}" -------------------------------------------------------------------------------- /src/openagi/actions/tools/searchapi_search.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import requests 4 | from urllib.parse import urlencode 5 | from typing import Any 6 | 7 | from pydantic import Field 8 | from openagi.actions.base import ConfigurableAction 9 | import warnings 10 | 11 | 12 | class SearchApiSearch(ConfigurableAction): 13 | """SearchApi.io provides a real-time API to access search results from Google (default), Google Scholar, Bing, Baidu, and other search engines.""" 14 | query: str = Field( 15 | ..., description="User query of type string used to fetch web search results from a search engine." 16 | ) 17 | 18 | def __init__(self, **data): 19 | super().__init__(**data) 20 | self._check_deprecated_usage() 21 | 22 | def _check_deprecated_usage(self): 23 | if 'SEARCHAPI_API_KEY' in os.environ and not self.get_config('api_key'): 24 | warnings.warn( 25 | "Using environment variables for API keys is deprecated and will be removed in a future version. " 26 | "Please use SearchApiSearch.set_config(api_key='your_key', engine='google') instead of setting environment variables.", 27 | DeprecationWarning, 28 | stacklevel=2 29 | ) 30 | self.set_config(api_key=os.environ['SEARCHAPI_API_KEY'], engine='google') 31 | 32 | def execute(self): 33 | base_url = "https://www.searchapi.io/api/v1/search" 34 | api_key = self.get_config('api_key') 35 | engine = self.get_config('engine', 'google') # Default to google if not set 36 | 37 | search_dict = { 38 | "q": self.query, 39 | "engine": engine, 40 | "api_key": api_key 41 | } 42 | 43 | logging.debug(f"{search_dict=}") 44 | 45 | url = f"{base_url}?{urlencode(search_dict)}" 46 | response = requests.request("GET", url) 47 | json_response = response.json() 48 | 49 | organic_results = json_response.get("organic_results", []) 50 | 51 | meta_data = "" 52 | for organic_result in organic_results: 53 | meta_data += f"CONTEXT: {organic_result['title']} \ {organic_result['snippet']}" 54 | meta_data += f"Reference URL: {organic_result['link']}\n" 55 | return meta_data 56 | -------------------------------------------------------------------------------- /src/openagi/actions/tools/serp_search.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import warnings 4 | from typing import Any, ClassVar, Dict 5 | from pydantic import Field, field_validator 6 | from serpapi import GoogleSearch 7 | from openagi.actions.base import ConfigurableAction 8 | from openagi.exception import OpenAGIException 9 | 10 | class GoogleSerpAPISearch(ConfigurableAction): 11 | """Google Serp API Search Tool""" 12 | query: str = Field( 13 | ..., description="User query of type string used to fetch web search results from Google." 14 | ) 15 | max_results: Any = Field( 16 | default=10, 17 | description="Total results, an integer, to be executed from the search. Defaults to 10", 18 | ) 19 | 20 | def __init__(self, **data): 21 | super().__init__(**data) 22 | self._check_deprecated_usage() 23 | 24 | def _check_deprecated_usage(self): 25 | if 'GOOGLE_SERP_API_KEY' in os.environ and not self.get_config('api_key'): 26 | warnings.warn( 27 | "Using environment variables for API keys is deprecated and will be removed in a future version. " 28 | "Please use GoogleSerpAPISearch.set_config(api_key='your_key') instead of setting environment variables.", 29 | DeprecationWarning, 30 | stacklevel=2 31 | ) 32 | # Automatically migrate the environment variable to config 33 | self.set_config(api_key=os.environ['GOOGLE_SERP_API_KEY']) 34 | 35 | def execute(self): 36 | api_key = self.get_config('api_key') 37 | 38 | if not api_key: 39 | if 'GOOGLE_SERP_API_KEY' in os.environ: 40 | api_key = os.environ['GOOGLE_SERP_API_KEY'] 41 | warnings.warn( 42 | "Using environment variables for API keys is deprecated and will be removed in a future version. " 43 | "Please use GoogleSerpAPISearch.set_config(api_key='your_key') instead of setting environment variables.", 44 | DeprecationWarning, 45 | stacklevel=2 46 | ) 47 | else: 48 | raise OpenAGIException("API KEY NOT FOUND. Use GoogleSerpAPISearch.set_config(api_key='your_key') to set the API key.") 49 | 50 | search_dict = { 51 | "q": self.query, 52 | "hl": "en", 53 | "gl": "us", 54 | "num": self.max_results, 55 | "api_key": api_key, 56 | } 57 | logging.debug(f"{search_dict=}") 58 | search = GoogleSearch(search_dict) 59 | 60 | max_retries = 3 61 | retries = 1 62 | result = None 63 | 64 | while retries < max_retries and not result: 65 | try: 66 | result = search.get_dict() 67 | except TypeError: 68 | logging.error("Error during GoogleSearch.", exc_info=True) 69 | continue 70 | retries += 1 71 | 72 | if not result: 73 | raise OpenAGIException(f"Unable to generate result for the query {self.query}") 74 | 75 | logging.debug(result) 76 | logging.info(f"NOTE: REMOVE THIS BEFORE RELEASE:\n{result}\n") 77 | 78 | if error := result.get("error", NotImplemented): 79 | raise OpenAGIException( 80 | f"Error while running action {self.__class__.__name__}: {error}" 81 | ) 82 | 83 | meta_data = "" 84 | for info in result.get("organic_results", []): 85 | meta_data += f"CONTEXT: {info.get('title', '')} \ {info.get('snippet', '')}\n" 86 | meta_data += f"Reference URL: {info.get('link', '')}\n\n" 87 | 88 | return meta_data.strip() -------------------------------------------------------------------------------- /src/openagi/actions/tools/serper_search.py: -------------------------------------------------------------------------------- 1 | import http.client 2 | import json 3 | import os 4 | import warnings 5 | from pydantic import Field 6 | from openagi.actions.base import ConfigurableAction 7 | from typing import ClassVar, Dict, Any 8 | from openagi.exception import OpenAGIException 9 | 10 | class SerperSearch(ConfigurableAction): 11 | """Google Serper.dev Search Tool""" 12 | query: str = Field(..., description="User query to fetch web search results from Google") 13 | 14 | def __init__(self, **data): 15 | super().__init__(**data) 16 | self._check_deprecated_usage() 17 | 18 | def _check_deprecated_usage(self): 19 | if 'SERPER_API_KEY' in os.environ and not self.get_config('api_key'): 20 | warnings.warn( 21 | "Using environment variables for API keys is deprecated and will be removed in a future version. " 22 | "Please use SerperSearch.set_config(api_key='your_key') instead of setting environment variables.", 23 | DeprecationWarning, 24 | stacklevel=2 25 | ) 26 | self.set_config(api_key=os.environ['SERPER_API_KEY']) 27 | 28 | def execute(self): 29 | api_key = self.get_config('api_key') 30 | 31 | if not api_key: 32 | if 'SERPER_API_KEY' in os.environ: 33 | api_key = os.environ['SERPER_API_KEY'] 34 | warnings.warn( 35 | "Using environment variables for API keys is deprecated and will be removed in a future version. " 36 | "Please use SerperSearch.set_config(api_key='your_key') instead of setting environment variables.", 37 | DeprecationWarning, 38 | stacklevel=2 39 | ) 40 | else: 41 | raise OpenAGIException("API KEY NOT FOUND. Use SerperSearch.set_config(api_key='your_key') to set the API key.") 42 | 43 | conn = http.client.HTTPSConnection("google.serper.dev") 44 | payload = json.dumps({"q": self.query}) 45 | headers = {"X-API-KEY": api_key, "Content-Type": "application/json"} 46 | conn.request("POST", "/search", payload, headers) 47 | res = conn.getresponse() 48 | data = res.read().decode("utf-8") 49 | result = json.loads(data) 50 | 51 | meta_data = "" 52 | for info in result.get("organic", []): 53 | meta_data += f"CONTEXT: {info.get('title', '')} \ {info.get('snippet', '')}\n" 54 | meta_data += f"Reference URL: {info.get('link', '')}\n\n" 55 | 56 | return meta_data.strip() -------------------------------------------------------------------------------- /src/openagi/actions/tools/speech_tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | import warnings 5 | from typing import Any 6 | from openagi.exception import OpenAGIException 7 | from pydantic import Field 8 | try: 9 | from elevenlabs.client import ElevenLabs 10 | from elevenlabs import play 11 | except ImportError: 12 | raise OpenAGIException("Please install the required dependencies by running 'pip install -r requirements.txt'.") 13 | 14 | from openagi.actions.base import ConfigurableAction 15 | 16 | 17 | 18 | class ElevenLabsTTS(ConfigurableAction): 19 | """Use this Action to generate lifelike speech using ElevenLabs' text-to-speech API.""" 20 | 21 | text: Any = Field( 22 | default_factory=str, 23 | description="Text input that needs to be converted to speech.", 24 | ) 25 | voice_id: str = Field( 26 | default="JBFqnCBsd6RMkjVDRZzb", 27 | description="The ID of the voice to be used for speech synthesis.", 28 | ) 29 | model_id: str = Field( 30 | default="eleven_multilingual_v2", 31 | description="The model ID used for text-to-speech conversion.", 32 | ) 33 | output_format: str = Field( 34 | default="mp3_44100_128", 35 | description="The output format of the generated audio.", 36 | ) 37 | api_key: str = Field( 38 | default_factory=lambda: os.getenv("ELEVENLABS_API_KEY", ""), 39 | description="API key for ElevenLabs' authentication.", 40 | ) 41 | 42 | def execute(self): 43 | logging.info(f"Generating speech for text: {self.text}") 44 | 45 | if not self.api_key: 46 | warnings.warn( 47 | "ElevenLabs API key is missing. Please provide it as a parameter or set it in the .env file.", 48 | DeprecationWarning, 49 | stacklevel=2 50 | ) 51 | return json.dumps({"error": "ElevenLabs API key is missing. Please provide it as a parameter or set it in the .env file."}) 52 | 53 | client = ElevenLabs(api_key=self.api_key) 54 | try: 55 | audio = client.text_to_speech.convert( 56 | text=self.text, 57 | voice_id=self.voice_id, 58 | model_id=self.model_id, 59 | output_format=self.output_format, 60 | ) 61 | play(audio) 62 | return json.dumps({"success": "Audio played successfully."}) 63 | 64 | except Exception as e: 65 | logging.error(f"Error generating speech: {str(e)}") 66 | return json.dumps({"error": f"Failed to generate speech: {str(e)}"}) 67 | -------------------------------------------------------------------------------- /src/openagi/actions/tools/tavilyqasearch.py: -------------------------------------------------------------------------------- 1 | from openagi.actions.base import ConfigurableAction 2 | from pydantic import Field 3 | from openagi.exception import OpenAGIException 4 | import os 5 | import warnings 6 | 7 | try: 8 | from tavily import TavilyClient 9 | except ImportError: 10 | raise OpenAGIException("Install Tavily with cmd `pip install tavily-python`") 11 | 12 | class TavilyWebSearchQA(ConfigurableAction): 13 | """ 14 | Tavily Web Search QA is a tool used when user needs to ask the question in terms of query to get response 15 | """ 16 | query: str = Field(..., description="User query or question") 17 | 18 | def __init__(self, **data): 19 | super().__init__(**data) 20 | self._check_deprecated_usage() 21 | 22 | def _check_deprecated_usage(self): 23 | if 'TAVILY_API_KEY' in os.environ and not self.get_config('api_key'): 24 | warnings.warn( 25 | "Using environment variables for API keys is deprecated and will be removed in a future version. " 26 | "Please use TavilyWebSearchQA.set_config(api_key='your_key') instead of setting environment variables.", 27 | DeprecationWarning, 28 | stacklevel=2 29 | ) 30 | self.set_config(api_key=os.environ['TAVILY_API_KEY']) 31 | 32 | def execute(self): 33 | api_key = self.get_config('api_key') 34 | 35 | if not api_key: 36 | if 'TAVILY_API_KEY' in os.environ: 37 | api_key = os.environ['TAVILY_API_KEY'] 38 | warnings.warn( 39 | "Using environment variables for API keys is deprecated and will be removed in a future version. " 40 | "Please use TavilyWebSearchQA.set_config(api_key='your_key') instead of setting environment variables.", 41 | DeprecationWarning, 42 | stacklevel=2 43 | ) 44 | else: 45 | raise OpenAGIException("API KEY NOT FOUND. Use TavilyWebSearchQA.set_config(api_key='your_key') to set the API key.") 46 | 47 | client = TavilyClient(api_key=api_key) 48 | response = client.qna_search(query=self.query) 49 | return response -------------------------------------------------------------------------------- /src/openagi/actions/tools/unstructured_io.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pydantic import Field 3 | from openagi.exception import OpenAGIException 4 | from openagi.actions.base import ConfigurableAction 5 | from typing import ClassVar, Dict, Any 6 | 7 | try: 8 | from unstructured.partition.pdf import partition_pdf 9 | from unstructured.chunking.title import chunk_by_title 10 | except ImportError: 11 | raise OpenAGIException("Install Unstructured with cmd `pip install 'unstructured[all-docs]'`") 12 | 13 | class UnstructuredPdfLoaderAction(ConfigurableAction): 14 | """ 15 | Use this Action to extract content from PDFs including metadata. 16 | Returns a list of dictionary with keys 'type', 'element_id', 'text', 'metadata'. 17 | """ 18 | 19 | def execute(self): 20 | file_path = self.get_config('filename') 21 | logging.info(f"Reading file {file_path}") 22 | 23 | elements = partition_pdf(file_path, extract_images_in_pdf=True) 24 | 25 | chunks = chunk_by_title(elements) 26 | 27 | dict_elements = [] 28 | for element in chunks: 29 | dict_elements.append(element.to_dict()) 30 | 31 | with open("ele.txt", "w") as f: 32 | f.write(str(dict_elements)) 33 | 34 | return str(dict_elements) 35 | -------------------------------------------------------------------------------- /src/openagi/actions/tools/webloader.py: -------------------------------------------------------------------------------- 1 | import re 2 | from collections import Counter 3 | from langchain_community.document_loaders import WebBaseLoader 4 | from pydantic import Field 5 | from openagi.actions.base import ConfigurableAction 6 | import logging 7 | 8 | class WebBaseContextTool(ConfigurableAction): 9 | """ 10 | Use this Action to extract actual context from a Webpage. The WebBaseContextTool class provides a way to load and optionally summarize the content of a webpage, returning the metadata and page content as a context string. 11 | If a url seems to be failing for more than once, ignore it and move forward. 12 | """ 13 | 14 | link: str = Field( 15 | default_factory=str, 16 | description="Extract context for the Agents from the Web Search through web page", 17 | ) 18 | can_summarize: bool = Field( 19 | default=True, 20 | description="Indicates whether the action can summarize the content before returning. Uses lightweight summarization. Defaults to true.", 21 | ) 22 | 23 | def _split_into_sentences(self, text): 24 | """Split text into sentences using simple regex""" 25 | text = re.sub(r'\s+', ' ', text) 26 | sentences = re.split(r'[.!?]+', text) 27 | return [s.strip() for s in sentences if len(s.strip()) > 10] 28 | 29 | def _calculate_word_freq(self, sentences): 30 | """Calculate word frequency across all sentences""" 31 | words = ' '.join(sentences).lower().split() 32 | return Counter(words) 33 | 34 | def _score_sentence(self, sentence, word_freq): 35 | """Score a sentence based on word frequency and length""" 36 | words = sentence.lower().split() 37 | score = sum(word_freq[word] for word in words) 38 | return score / (len(words) + 1) 39 | 40 | def _get_summary(self, text, num_sentences=6): 41 | """Create a simple summary by selecting top scoring sentences""" 42 | sentences = self._split_into_sentences(text) 43 | if not sentences: 44 | return text 45 | 46 | word_freq = self._calculate_word_freq(sentences) 47 | 48 | scored_sentences = [ 49 | (self._score_sentence(sentence, word_freq), i, sentence) 50 | for i, sentence in enumerate(sentences) 51 | ] 52 | 53 | top_sentences = sorted(scored_sentences, reverse=True)[:num_sentences] 54 | ordered_sentences = sorted(top_sentences, key=lambda x: x[1]) 55 | 56 | return ' '.join(sentence for _, _, sentence in ordered_sentences) 57 | 58 | def execute(self): 59 | loader = WebBaseLoader(self.link) 60 | data = loader.load() 61 | metadata = data[0].metadata["title"] 62 | page_content = data[0].page_content 63 | if page_content: 64 | page_content = page_content.strip() 65 | if self.can_summarize: 66 | logging.info(f"Summarizing the page {self.link}...") 67 | page_content = self._get_summary(page_content) 68 | context = metadata + page_content 69 | return context 70 | -------------------------------------------------------------------------------- /src/openagi/actions/tools/wikipedia_search.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Any 3 | from openagi.actions.base import ConfigurableAction 4 | from pydantic import Field 5 | import wikipedia 6 | import logging 7 | 8 | class WikipediaSearch(ConfigurableAction): 9 | """Use this Action to search Wikipedia for a query.""" 10 | 11 | name: str = Field( 12 | default_factory=str, 13 | description="WikipediaSearch Action to search Wikipedia using the query.", 14 | ) 15 | description: str = Field( 16 | default_factory=str, 17 | description="This action is used to search and retrieve information from Wikipedia articles.", 18 | ) 19 | 20 | query: str = Field( 21 | ..., 22 | description="User query to fetch information from Wikipedia", 23 | ) 24 | 25 | max_results: int = Field( 26 | default=3, 27 | description="Maximum number of sentences to return from the Wikipedia article. Defaults to 3.", 28 | ) 29 | 30 | def execute(self): 31 | try: 32 | # Search Wikipedia 33 | search_results = wikipedia.search(self.query) 34 | 35 | if not search_results: 36 | return json.dumps({"error": "No results found"}) 37 | 38 | # Get the first (most relevant) page 39 | try: 40 | page = wikipedia.page(search_results[0]) 41 | summary = wikipedia.summary(search_results[0], sentences=self.max_results) 42 | 43 | result = { 44 | "title": page.title, 45 | "summary": summary, 46 | "url": page.url 47 | } 48 | 49 | return json.dumps(result) 50 | 51 | except wikipedia.DisambiguationError as e: 52 | # Handle disambiguation pages 53 | return json.dumps({ 54 | "error": "Disambiguation error", 55 | "options": e.options[:5] # Return first 5 options 56 | }) 57 | 58 | except Exception as e: 59 | logging.error(f"Error in Wikipedia search: {str(e)}") 60 | return json.dumps({"error": str(e)}) 61 | -------------------------------------------------------------------------------- /src/openagi/actions/tools/yahoo_finance.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional 2 | from openagi.actions.base import ConfigurableAction 3 | from openagi.exception import OpenAGIException 4 | from pydantic import Field 5 | 6 | try: 7 | import yfinance as yf 8 | except ImportError: 9 | raise OpenAGIException("Install yfinance with cmd `pip install yfinance`") 10 | 11 | class YahooFinanceTool(ConfigurableAction): 12 | """Yahoo Finance tool for fetching stock market data. 13 | 14 | This action uses the yfinance library to retrieve financial information 15 | about stocks, including current price, historical data, and company info. 16 | """ 17 | 18 | symbol: str = Field(..., description="Stock symbol to look up (e.g., 'AAPL' for Apple)") 19 | info_type: str = Field( 20 | default="summary", 21 | description="Type of information to retrieve: 'summary', 'price', 'history', or 'info'" 22 | ) 23 | period: Optional[str] = Field( 24 | default="1d", 25 | description="Time period for historical data (e.g., '1d', '5d', '1mo', '1y')" 26 | ) 27 | 28 | def execute(self) -> str: 29 | try: 30 | stock = yf.Ticker(self.symbol) 31 | 32 | if self.info_type == "summary": 33 | info = stock.info 34 | return ( 35 | f"Company: {info.get('longName', 'N/A')}\n" 36 | f"Current Price: ${info.get('currentPrice', 'N/A')}\n" 37 | f"Market Cap: ${info.get('marketCap', 'N/A')}\n" 38 | f"52 Week High: ${info.get('fiftyTwoWeekHigh', 'N/A')}\n" 39 | f"52 Week Low: ${info.get('fiftyTwoWeekLow', 'N/A')}" 40 | ) 41 | 42 | elif self.info_type == "price": 43 | return f"Current price of {self.symbol}: ${stock.info.get('currentPrice', 'N/A')}" 44 | 45 | elif self.info_type == "history": 46 | history = stock.history(period=self.period) 47 | if history.empty: 48 | return f"No historical data available for {self.symbol}" 49 | 50 | latest = history.iloc[-1] 51 | return ( 52 | f"Historical data for {self.symbol} (last entry):\n" 53 | f"Date: {latest.name.date()}\n" 54 | f"Open: ${latest['Open']:.2f}\n" 55 | f"High: ${latest['High']:.2f}\n" 56 | f"Low: ${latest['Low']:.2f}\n" 57 | f"Close: ${latest['Close']:.2f}\n" 58 | f"Volume: {latest['Volume']}" 59 | ) 60 | 61 | elif self.info_type == "info": 62 | info = stock.info 63 | return ( 64 | f"Company Information for {self.symbol}:\n" 65 | f"Industry: {info.get('industry', 'N/A')}\n" 66 | f"Sector: {info.get('sector', 'N/A')}\n" 67 | f"Website: {info.get('website', 'N/A')}\n" 68 | f"Description: {info.get('longBusinessSummary', 'N/A')}" 69 | ) 70 | 71 | else: 72 | return f"Invalid info_type: {self.info_type}. Supported types are: summary, price, history, info" 73 | 74 | except Exception as e: 75 | return f"Error fetching data for {self.symbol}: {str(e)}" -------------------------------------------------------------------------------- /src/openagi/actions/tools/youtubesearch.py: -------------------------------------------------------------------------------- 1 | from openagi.actions.base import ConfigurableAction 2 | from pydantic import Field 3 | from typing import Any 4 | from openagi.exception import OpenAGIException 5 | 6 | try: 7 | import yt_dlp 8 | from youtube_search import YoutubeSearch 9 | except ImportError: 10 | raise OpenAGIException("Install YouTube transcript with cmd `pip install yt-dlp` and `pip install youtube-search`") 11 | 12 | class YouTubeSearchTool(ConfigurableAction): 13 | """Youtube Search Tool""" 14 | 15 | query: str = Field( 16 | ..., description="Keyword required to search the video content on YouTube" 17 | ) 18 | max_results: Any = Field( 19 | default=5, 20 | description="Total results, an integer, to be executed from the search. Defaults to 5", 21 | ) 22 | 23 | def execute(self): 24 | ydl_opts = { 25 | 'quiet': True, 26 | 'skip_download': True, 27 | 'force_generic_extractor': True, 28 | 'format': 'best' 29 | } 30 | results = YoutubeSearch(self.query, max_results=self.max_results) 31 | response = results.to_dict() 32 | context = "" 33 | for ids in response: 34 | url = "https://youtube.com/watch?v="+ids['id'] 35 | context += f"Title: {ids['title']}" 36 | with yt_dlp.YoutubeDL(ydl_opts) as ydl: 37 | info_dict = ydl.extract_info(url, download=False) 38 | description = info_dict.get('description', None) 39 | context += f"Description: {description} \n\n" 40 | return context -------------------------------------------------------------------------------- /src/openagi/actions/utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from openagi.actions.base import BaseAction 3 | 4 | 5 | def run_action(action_cls: str, memory, llm, **kwargs): 6 | """ 7 | Runs the specified action with the provided keyword arguments. 8 | 9 | Args: 10 | action_cls (str): The class name of the action to be executed. 11 | **kwargs: Keyword arguments to be passed to the action class constructor. 12 | 13 | Returns: 14 | The result of executing the action. 15 | """ 16 | logging.info(f"Running Action - {str(action_cls)}") 17 | kwargs["memory"] = memory 18 | kwargs["llm"] = llm 19 | action: BaseAction = action_cls(**kwargs) # Create an instance with provided kwargs 20 | res = action.execute() 21 | return res 22 | -------------------------------------------------------------------------------- /src/openagi/cli.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import os 4 | from openagi.memory.base import BaseMemory 5 | 6 | 7 | def clear_long_term_memory(): 8 | """Clears the long-term memory directory using environment variables.""" 9 | long_term_dir = os.getenv("LONG_TERM_DIR", ".long_term_dir") 10 | BaseMemory.clear_long_term_memory(long_term_dir) 11 | 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser(description="OpenAGI CLI for various commands.") 15 | 16 | parser.add_argument( 17 | "--clear-ltm", 18 | action="store_true", 19 | help="Clear the long-term memory directory." 20 | ) 21 | 22 | args = parser.parse_args() 23 | 24 | if args.clear_ltm: 25 | clear_long_term_memory() 26 | else: 27 | parser.print_help() 28 | 29 | 30 | if __name__ == "__main__": 31 | main() 32 | -------------------------------------------------------------------------------- /src/openagi/exception.py: -------------------------------------------------------------------------------- 1 | class OpenAGIException(Exception): 2 | ... 3 | 4 | 5 | class ExecutionFailureException(Exception): 6 | """Task Execution Failed""" 7 | 8 | 9 | class LLMResponseError(OpenAGIException): 10 | """No useful Response found""" 11 | -------------------------------------------------------------------------------- /src/openagi/llms/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/openagi/llms/azure.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from langchain_core.messages import HumanMessage 3 | from langchain_openai import AzureChatOpenAI # Assuming this import is correct 4 | 5 | from openagi.llms.base import LLMBaseModel, LLMConfigModel 6 | from openagi.utils.yamlParse import read_from_env 7 | 8 | 9 | class AzureChatConfigModel(LLMConfigModel): 10 | """Configuration model for Azure Chat OpenAI.""" 11 | 12 | base_url: str 13 | deployment_name: str 14 | model_name: str 15 | openai_api_version: str 16 | api_key: str 17 | 18 | 19 | class AzureChatOpenAIModel(LLMBaseModel): 20 | """Azure's OpenAI service implementation of the LLMBaseModel. 21 | 22 | This class implements the specific logic required to work with Azure's OpenAI service. 23 | """ 24 | 25 | config: Any 26 | 27 | def load(self): 28 | """Initializes the AzureChatOpenAI instance with configurations.""" 29 | self.llm = AzureChatOpenAI( 30 | azure_deployment=self.config.deployment_name, 31 | model_name=self.config.model_name, 32 | openai_api_version=self.config.openai_api_version, 33 | openai_api_key=self.config.api_key, 34 | azure_endpoint=self.config.base_url, 35 | ) 36 | return self.llm 37 | 38 | def run(self, input_data: str): 39 | """Runs the Azure Chat OpenAI model with the provided input text. 40 | 41 | Args: 42 | input_data: The input text to process. 43 | 44 | Returns: 45 | The response from Azure's OpenAI service. 46 | """ 47 | if not self.llm: 48 | self.load() 49 | if not self.llm: 50 | raise ValueError("`llm` attribute not set.") 51 | message = HumanMessage(content=input_data) 52 | resp = self.llm([message]) 53 | return resp.content 54 | 55 | @staticmethod 56 | def load_from_env_config() -> AzureChatConfigModel: 57 | """Loads the AzureChatOpenAI configurations from a YAML file. 58 | 59 | Returns: 60 | An instance of AzureChatConfigModel with loaded configurations. 61 | """ 62 | return AzureChatConfigModel( 63 | base_url=read_from_env("AZURE_BASE_URL", raise_exception=True), 64 | deployment_name=read_from_env("AZURE_DEPLOYMENT_NAME", raise_exception=True), 65 | model_name=read_from_env("AZURE_MODEL_NAME", raise_exception=True), 66 | openai_api_version=read_from_env("AZURE_OPENAI_API_VERSION", raise_exception=True), 67 | api_key=read_from_env("AZURE_OPENAI_API_KEY", raise_exception=True), 68 | ) 69 | -------------------------------------------------------------------------------- /src/openagi/llms/base.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from typing import Any 3 | 4 | from pydantic import BaseModel 5 | 6 | 7 | class LLMConfigModel(BaseModel): 8 | """Base configuration model for all LLMs. 9 | 10 | This class can be extended to include more fields specific to certain LLMs. 11 | """ 12 | 13 | class Config: 14 | protected_namespaces = () 15 | 16 | pass # Common fields could be defined here, if any. 17 | 18 | 19 | class LLMBaseModel(BaseModel): 20 | """Abstract base class for language learning models. 21 | 22 | Attributes: 23 | config: An instance of LLMConfigModel containing configuration. 24 | llm: Placeholder for the actual LLM instance, to be defined in subclasses. 25 | """ 26 | 27 | config: Any 28 | llm: Any = None 29 | 30 | @abstractmethod 31 | def load(self): 32 | """Initializes the LLM instance with configurations.""" 33 | pass 34 | 35 | @abstractmethod 36 | def run(self, input_data: Any): 37 | """Interacts with the LLM service using the provided input. 38 | 39 | Args: 40 | input_data: The input to process by the LLM. The format can vary. 41 | 42 | Returns: 43 | The result from processing the input data through the LLM. 44 | """ 45 | pass 46 | 47 | @staticmethod 48 | @abstractmethod 49 | def load_from_env_config(): 50 | """Loads configuration values from a YAML file.""" 51 | pass 52 | -------------------------------------------------------------------------------- /src/openagi/llms/cerebras.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from langchain_core.messages import HumanMessage 3 | from openagi.exception import OpenAGIException 4 | from openagi.llms.base import LLMBaseModel, LLMConfigModel 5 | from openagi.utils.yamlParse import read_from_env 6 | 7 | try: 8 | from langchain_cerebras import ChatCerebras 9 | except ImportError: 10 | raise OpenAGIException("Install langchain-cerebras with cmd `pip install langchain-cerebras`") 11 | 12 | class CerebrasConfigModel(LLMConfigModel): 13 | 14 | """ 15 | Configuration model for Cerebras. 16 | Reference: https://cloud.cerebras.ai 17 | 18 | Attributes: 19 | cerebras_api_key (str): API key for Cerebras. 20 | model_name (str): Name of the model to use. Default is 'llama3.1-8b'. 21 | temperature (float): Sampling temperature. Default is 0.7. 22 | 23 | Note: 24 | Available models as of December 2024: llama-3.3-70b, llama-3.1-70b, llama-3.1-8b 25 | """ 26 | 27 | cerebras_api_key: str 28 | model_name: str = "llama3.1-8b" 29 | temperature: float = 0.7 30 | 31 | class CerebrasModel(LLMBaseModel): 32 | """Cerebras LLM implementation of the LLMBaseModel.""" 33 | 34 | config: Any 35 | 36 | def load(self): 37 | """Initializes the Cerebras LLM instance with configurations.""" 38 | self.llm = ChatCerebras( 39 | api_key=self.config.cerebras_api_key, 40 | model_name=self.config.model_name, 41 | temperature=self.config.temperature 42 | ) 43 | return self.llm 44 | 45 | def run(self, input_data: str): 46 | """Runs the Cerebras model with the provided input text.""" 47 | if not self.llm: 48 | self.load() 49 | if not self.llm: 50 | raise ValueError("`llm` attribute not set.") 51 | message = HumanMessage(content=input_data) 52 | response = self.llm([message]) 53 | return response.content 54 | 55 | @staticmethod 56 | def load_from_env_config() -> CerebrasConfigModel: 57 | """Loads the Cerebras configurations from environment variables.""" 58 | return CerebrasConfigModel( 59 | cerebras_api_key=read_from_env("CEREBRAS_API_KEY", raise_exception=True), 60 | model_name=read_from_env("Cerebras_MODEL", raise_exception=False), 61 | temperature=read_from_env("Cerebras_TEMP", raise_exception=False) 62 | ) 63 | -------------------------------------------------------------------------------- /src/openagi/llms/claude.py: -------------------------------------------------------------------------------- 1 | from openagi.llms.base import LLMBaseModel, LLMConfigModel 2 | from openagi.utils.yamlParse import read_from_env 3 | from typing import Any 4 | 5 | from langchain_core.messages import HumanMessage 6 | 7 | from openagi.exception import OpenAGIException 8 | try: 9 | from langchain_anthropic import ChatAnthropic 10 | except ImportError: 11 | raise OpenAGIException("Install Langchain Anthropic to use Claude LLM `pip install langchain-anthropic`") 12 | 13 | class ChatAnthropicConfigModel(LLMConfigModel): 14 | """ 15 | Configuration model Anthropic model. This provides opus, sonnet SOTA models 16 | """ 17 | anthropic_api_key: str 18 | temperature: float = 0.5 19 | model_name: str = "claude-3-5-sonnet-20240620" 20 | 21 | class ChatAnthropicModel(LLMBaseModel): 22 | """ 23 | Define the Claude LLM from Anthropic using Langchain LLM integration 24 | """ 25 | config: Any 26 | 27 | def load(self): 28 | """Initializes the ChatAnthropic instance with configurations.""" 29 | self.llm = ChatAnthropic( 30 | model_name = self.config.model_name, 31 | api_key = self.config.anthropic_api_key, 32 | temperature = self.config.temperature 33 | ) 34 | return self.llm 35 | 36 | def run(self, input_data: str): 37 | """ 38 | Runs the Chat Anthropic model with the provided input text. 39 | Args: 40 | input_data: The input text to process. 41 | Returns: 42 | The response from Anthropic - Claude LLM. 43 | """ 44 | 45 | if not self.llm: 46 | self.load() 47 | if not self.llm: 48 | raise ValueError("`llm` attribute not set.") 49 | 50 | message = HumanMessage(content=input_data) 51 | response = self.llm([message]) 52 | return response.content 53 | 54 | @staticmethod 55 | def load_from_env_config() -> ChatAnthropicConfigModel: 56 | """Loads the ChatAnthropic configurations from a env file. 57 | 58 | Returns: 59 | An instance of ChatAnthropicConfigModel with loaded configurations. 60 | """ 61 | return ChatAnthropicConfigModel( 62 | anthropic_api_key = read_from_env("ANTHROPIC_API_KEY", raise_exception=True), 63 | model_name = read_from_env("CLAUDE_MODEL_NAME",raise_exception=False), 64 | temperature = read_from_env("TEMPERATURE",raise_exception=False) 65 | ) 66 | -------------------------------------------------------------------------------- /src/openagi/llms/cohere.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from langchain_core.messages import HumanMessage 3 | from openagi.exception import OpenAGIException 4 | from openagi.llms.base import LLMBaseModel, LLMConfigModel 5 | from openagi.utils.yamlParse import read_from_env 6 | 7 | try: 8 | from langchain_cohere import ChatCohere 9 | except ImportError: 10 | raise OpenAGIException("Install langchain groq with cmd `pip install langchain-cohere`") 11 | 12 | class CohereConfigModel(LLMConfigModel): 13 | """Configuration model for Cohere model""" 14 | 15 | cohere_api_key: str 16 | model_name:str = "command" 17 | 18 | class CohereModel(LLMBaseModel): 19 | """Cohere LLM implementation of the LLMBaseModel. 20 | 21 | This class implements the specific logic required to work with Cohere LLM that runs model locally on CPU. 22 | """ 23 | 24 | config: Any 25 | 26 | def load(self): 27 | """Initializes the Cohere instance with configurations.""" 28 | self.llm = ChatCohere( 29 | model = self.config.model_name, 30 | cohere_api_key = self.config.cohere_api_key, 31 | temperature = 0.1 32 | ) 33 | return self.llm 34 | 35 | def run(self, input_data: str): 36 | """Runs the Cohere model with the provided input text. 37 | 38 | Args: 39 | input_data: The input text to process. 40 | 41 | Returns: 42 | The response from Cohere LLM service. 43 | """ 44 | if not self.llm: 45 | self.load() 46 | if not self.llm: 47 | raise ValueError("`llm` attribute not set.") 48 | message = HumanMessage(content=input_data) 49 | resp = self.llm([message]) 50 | return resp.content 51 | 52 | @staticmethod 53 | def load_from_env_config() -> CohereConfigModel: 54 | """Loads the Cohere configurations from a YAML file. 55 | 56 | Returns: 57 | An instance of CohereConfigModel with loaded configurations. 58 | """ 59 | return CohereConfigModel( 60 | model_name = read_from_env("COHERE_MODEL",raise_exception=True), 61 | cohere_api_key = read_from_env("COHERE_API_KEY",raise_exception=True) 62 | ) 63 | -------------------------------------------------------------------------------- /src/openagi/llms/gemini.py: -------------------------------------------------------------------------------- 1 | from openagi.exception import OpenAGIException 2 | from openagi.llms.base import LLMBaseModel, LLMConfigModel 3 | from openagi.utils.yamlParse import read_from_env 4 | from typing import Any 5 | from langchain_core.messages import HumanMessage 6 | 7 | try: 8 | from langchain_google_genai import ChatGoogleGenerativeAI 9 | except ImportError: 10 | raise OpenAGIException("Install langchain Google Gemini with cmd `pip install langchain-google-genai==0.0.3`") 11 | 12 | class GeminiConfigModel(LLMConfigModel): 13 | """Configuration model for Gemini Chat model.""" 14 | 15 | google_api_key: str 16 | model_name: str = "gemini-pro" 17 | temperature: float = 0.1 18 | 19 | class GeminiModel(LLMBaseModel): 20 | """Chat Gemini Model implementation of the LLMBaseModel. 21 | 22 | This class implements the specific logic required to work with Chat Google Generative - Gemini Model. 23 | """ 24 | 25 | config: Any 26 | 27 | def load(self): 28 | """Initializes the GeminiModel instance with configurations.""" 29 | self.llm = ChatGoogleGenerativeAI( 30 | google_api_key = self.config.google_api_key, 31 | model = self.config.model_name, 32 | temperature= self.config.temperature 33 | ) 34 | return self.llm 35 | 36 | def run(self, input_data: str): 37 | """Runs the Chat Gemini model with the provided input text. 38 | 39 | Args: 40 | input_data: The input text to process. 41 | 42 | Returns: 43 | The response from Gemini model with low inference latency. 44 | """ 45 | if not self.llm: 46 | self.load() 47 | if not self.llm: 48 | raise ValueError("`llm` attribute not set.") 49 | message = HumanMessage(content=input_data) 50 | resp = self.llm([message]) 51 | return resp.content 52 | 53 | @staticmethod 54 | def load_from_env_config() -> GeminiConfigModel: 55 | """Loads the GeminiModel configurations from a env file. 56 | 57 | Returns: 58 | An instance of GeminiConfigModel with loaded configurations. 59 | """ 60 | return GeminiConfigModel( 61 | google_api_key = read_from_env("GOOGLE_API_KEY", raise_exception=True), 62 | model_name = read_from_env("Gemini_MODEL",raise_exception=False), 63 | temperature=read_from_env("Gemini_TEMP",raise_exception=False) 64 | ) 65 | -------------------------------------------------------------------------------- /src/openagi/llms/groq.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from langchain_core.messages import HumanMessage 3 | from openagi.exception import OpenAGIException 4 | from openagi.llms.base import LLMBaseModel, LLMConfigModel 5 | from openagi.utils.yamlParse import read_from_env 6 | 7 | try: 8 | from langchain_groq import ChatGroq 9 | except ImportError: 10 | raise OpenAGIException("Install langchain groq with cmd `pip install langchain-groq`") 11 | 12 | class GroqConfigModel(LLMConfigModel): 13 | """Configuration model for Groq Chat model.""" 14 | 15 | groq_api_key: str 16 | model_name: str = "mixtral-8x7b-32768" 17 | temperature: float = 0.1 18 | 19 | class GroqModel(LLMBaseModel): 20 | """Chat Groq Model implementation of the LLMBaseModel. 21 | 22 | This class implements the specific logic required to work with Chat Groq Model. 23 | """ 24 | 25 | config: Any 26 | 27 | def load(self): 28 | """Initializes the GroqModel instance with configurations.""" 29 | self.llm = ChatGroq( 30 | model_name = self.config.model_name, 31 | groq_api_key = self.config.groq_api_key, 32 | temperature = self.config.temperature 33 | ) 34 | return self.llm 35 | 36 | def run(self, input_data: str): 37 | """Runs the Chat Groq model with the provided input text. 38 | 39 | Args: 40 | input_data: The input text to process. 41 | 42 | Returns: 43 | The response from Groq model with low inference latency. 44 | """ 45 | if not self.llm: 46 | self.load() 47 | if not self.llm: 48 | raise ValueError("`llm` attribute not set.") 49 | message = HumanMessage(content=input_data) 50 | resp = self.llm([message]) 51 | return resp.content 52 | 53 | @staticmethod 54 | def load_from_env_config() -> GroqConfigModel: 55 | """Loads the GroqModel configurations from a env file. 56 | 57 | Returns: 58 | An instance of GroqConfigModel with loaded configurations. 59 | """ 60 | return GroqConfigModel( 61 | groq_api_key=read_from_env("GROQ_API_KEY", raise_exception=True), 62 | model_name = read_from_env("GROQ_MODEL",raise_exception=True), 63 | temperature=read_from_env("GROQ_TEMP",raise_exception=True) 64 | ) 65 | -------------------------------------------------------------------------------- /src/openagi/llms/hf.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from langchain_core.messages import HumanMessage 3 | from langchain_community.llms import HuggingFaceHub 4 | from openagi.llms.base import LLMBaseModel, LLMConfigModel 5 | from openagi.utils.yamlParse import read_from_env 6 | 7 | class HuggingFaceConfigModel(LLMConfigModel): 8 | """Configuration model for Hugging Face.""" 9 | api_token: str 10 | model_name: str = "huggingfaceh4/zephyr-7b-beta" 11 | temperature: float = 0.1 12 | max_new_tokens: int = 512 13 | 14 | class HuggingFaceModel(LLMBaseModel): 15 | """Hugging Face service implementation of the LLMBaseModel. 16 | 17 | This class implements the specific logic required to work with Hugging Face service. 18 | """ 19 | 20 | config: Any 21 | 22 | def load(self): 23 | """Initializes the GroqModel instance with configurations.""" 24 | self.llm = HuggingFaceHub( 25 | huggingfacehub_api_token = self.config.api_token, 26 | repo_id= self.config.model_name, 27 | model_kwargs={"temperature": self.config.temperature, 28 | "max_new_tokens":self.config.max_new_tokens, 29 | "repetition_penalty":1.2} 30 | ) 31 | return self.llm 32 | 33 | def run(self, input_data: str): 34 | """Runs the HuggingFace model with the provided input text. 35 | 36 | Args: 37 | input_data: The input text to process. 38 | 39 | Returns: 40 | The response from HuggingFace model. 41 | """ 42 | if not self.llm: 43 | self.load() 44 | if not self.llm: 45 | raise ValueError("`llm` attribute not set.") 46 | message = HumanMessage(content=input_data) 47 | resp = self.llm([message]) 48 | return resp.content 49 | 50 | @staticmethod 51 | def load_from_env_config() -> HuggingFaceConfigModel: 52 | """Loads the Hugging Face configurations from a YAML file. 53 | 54 | Returns: 55 | An instance of HuggingFaceConfigModel with loaded configurations. 56 | """ 57 | return HuggingFaceConfigModel( 58 | api_token = read_from_env("HUGGINGFACE_ACCESS_TOKEN",raise_exception=True), 59 | model_name=read_from_env("HUGGINGFACE_MODEL", raise_exception=True), 60 | temperature=read_from_env("TEMPERATURE",raise_exception=True), 61 | max_new_tokens= read_from_env("MAX_NEW_TOKENS",raise_exception=True) 62 | ) -------------------------------------------------------------------------------- /src/openagi/llms/mistral.py: -------------------------------------------------------------------------------- 1 | from openagi.exception import OpenAGIException 2 | from openagi.llms.base import LLMBaseModel, LLMConfigModel 3 | from openagi.utils.yamlParse import read_from_env 4 | 5 | import logging 6 | from typing import Any 7 | from langchain_core.messages import HumanMessage 8 | 9 | try: 10 | from langchain_mistralai import ChatMistralAI 11 | except ImportError: 12 | raise OpenAGIException("Install langchain Mistral AI with cmd `pip install langchain_mistralai`") 13 | 14 | 15 | class MistralConfigModel(LLMConfigModel): 16 | """Configuration model for Mistral.""" 17 | 18 | mistral_api_key: str 19 | model_name: str = "mistral-large-latest" 20 | temperature: float = 0.1 21 | 22 | class MistralModel(LLMBaseModel): 23 | """Mistral service implementation of the LLMBaseModel. 24 | 25 | This class implements the specific logic required to work with Mistral service. 26 | """ 27 | 28 | config: Any 29 | 30 | def load(self): 31 | """Initializes the Mistral instance with configurations.""" 32 | self.llm = ChatMistralAI( 33 | model = self.config.model_name, 34 | temperature = self.config.temperature, 35 | api_key = self.config.mistral_api_key 36 | ) 37 | return self.llm 38 | 39 | def run(self, input_text: str): 40 | """Runs the Mistral model with the provided input text. 41 | 42 | Args: 43 | input_text: The input text to process. 44 | 45 | Returns: 46 | The response from Mistral service. 47 | """ 48 | logging.info(f"Running LLM - {self.__class__.__name__}") 49 | if not self.llm: 50 | self.load() 51 | if not self.llm: 52 | raise ValueError("`llm` attribute not set.") 53 | message = HumanMessage(content=input_text) 54 | resp = self.llm([message]) 55 | return resp.content 56 | 57 | @staticmethod 58 | def load_from_env_config() -> MistralConfigModel: 59 | """Loads the Mistral configurations from a YAML file. 60 | 61 | Returns: 62 | An instance of MistralConfigModel with loaded configurations. 63 | """ 64 | return MistralConfigModel( 65 | mistral_api_key=read_from_env("MISTRAL_API_KEY", raise_exception=True), 66 | ) -------------------------------------------------------------------------------- /src/openagi/llms/ollama.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from langchain_core.messages import HumanMessage 3 | from openagi.exception import OpenAGIException 4 | from openagi.llms.base import LLMBaseModel, LLMConfigModel 5 | from openagi.utils.yamlParse import read_from_env 6 | 7 | try: 8 | from langchain_ollama.chat_models import ChatOllama 9 | except ImportError: 10 | raise OpenAGIException("Install langchain groq with cmd `pip install langchain-ollama`") 11 | 12 | 13 | class OllamaConfigModel(LLMConfigModel): 14 | """Configuration model for Ollama model""" 15 | 16 | model_name:str = "mistral" 17 | 18 | class OllamaModel(LLMBaseModel): 19 | """Ollama LLM implementation of the LLMBaseModel. 20 | 21 | This class implements the specific logic required to work with Ollama LLM that runs model locally on CPU. 22 | """ 23 | 24 | config: Any 25 | 26 | def load(self): 27 | """Initializes the Ollama instance with configurations.""" 28 | self.llm = ChatOllama( 29 | model = self.config.model_name, 30 | temperature=0 31 | ) 32 | return self.llm 33 | 34 | def run(self, input_data: str): 35 | """Runs the Ollama model with the provided input text. 36 | 37 | Args: 38 | input_data: The input text to process. 39 | 40 | Returns: 41 | The response from Ollama LLM service. 42 | """ 43 | if not self.llm: 44 | self.load() 45 | if not self.llm: 46 | raise ValueError("`llm` attribute not set.") 47 | message = HumanMessage(content=input_data) 48 | resp = self.llm([message]) 49 | return resp.content 50 | 51 | @staticmethod 52 | def load_from_env_config() -> OllamaConfigModel: 53 | """Loads the Ollama configurations from a YAML file. 54 | 55 | Returns: 56 | An instance of OllamaConfigModel with loaded configurations. 57 | """ 58 | return OllamaConfigModel( 59 | model_name = read_from_env("OLLAMA_MODEL",raise_exception=True), 60 | ) 61 | -------------------------------------------------------------------------------- /src/openagi/llms/openai.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Any 3 | from langchain_core.messages import HumanMessage 4 | from langchain_openai import ChatOpenAI 5 | 6 | from openagi.llms.base import LLMBaseModel, LLMConfigModel 7 | from openagi.utils.yamlParse import read_from_env 8 | 9 | 10 | class OpenAIConfigModel(LLMConfigModel): 11 | """Configuration model for OpenAI.""" 12 | 13 | model_name: str = "gpt-4o" 14 | openai_api_key: str 15 | 16 | 17 | class OpenAIModel(LLMBaseModel): 18 | """OpenAI service implementation of the LLMBaseModel. 19 | 20 | This class implements the specific logic required to work with OpenAI service. 21 | """ 22 | 23 | config: Any 24 | 25 | def load(self): 26 | """Initializes the OpenAI instance with configurations.""" 27 | self.llm = ChatOpenAI( 28 | openai_api_key=self.config.openai_api_key, 29 | model_name=self.config.model_name, 30 | ) 31 | return self.llm 32 | 33 | def run(self, input_text: str): 34 | """Runs the OpenAI model with the provided input text. 35 | 36 | Args: 37 | input_text: The input text to process. 38 | 39 | Returns: 40 | The response from OpenAI service. 41 | """ 42 | logging.info(f"Running LLM - {self.__class__.__name__}") 43 | if not self.llm: 44 | self.load() 45 | if not self.llm: 46 | raise ValueError("`llm` attribute not set.") 47 | message = HumanMessage(content=input_text) 48 | resp = self.llm([message]) 49 | return resp.content 50 | 51 | @staticmethod 52 | def load_from_env_config() -> OpenAIConfigModel: 53 | """Loads the OpenAI configurations from a YAML file. 54 | 55 | Returns: 56 | An instance of OpenAIConfigModel with loaded configurations. 57 | """ 58 | return OpenAIConfigModel( 59 | openai_api_key=read_from_env("OPENAI_API_KEY", raise_exception=True), 60 | ) 61 | -------------------------------------------------------------------------------- /src/openagi/llms/sambanova.py: -------------------------------------------------------------------------------- 1 | from openagi.exception import OpenAGIException 2 | from openagi.llms.base import LLMBaseModel, LLMConfigModel 3 | from openagi.utils.yamlParse import read_from_env 4 | from typing import Any, Optional 5 | from langchain_core.messages import HumanMessage 6 | 7 | try: 8 | from langchain_sambanova import ChatSambaNovaCloud 9 | except ImportError: 10 | raise OpenAGIException("Install langchain-sambanova with cmd `pip install langchain-sambanova`") 11 | 12 | class SambaNovaConfigModel(LLMConfigModel): 13 | """Configuration model for SambaNova.""" 14 | 15 | sambanova_api_key: str 16 | base_url: str 17 | project_id: str 18 | model: str = "Meta-Llama-3.3-70B-Instruct" 19 | temperature: float = 0.7 20 | max_tokens: int = 1024 21 | top_p: float = 0.01 22 | streaming: bool = False 23 | 24 | class SambaNovaModel(LLMBaseModel): 25 | """SambaNova implementation of the LLMBaseModel.""" 26 | 27 | config: Any 28 | 29 | def load(self): 30 | """Initializes the SambaNova client with configurations.""" 31 | self.llm = ChatSambaNovaCloud( 32 | base_url=self.config.base_url, 33 | project_id=self.config.project_id, 34 | api_key=self.config.sambanova_api_key, 35 | model=self.config.model, 36 | temperature=self.config.temperature, 37 | max_tokens=self.config.max_tokens, 38 | top_p=self.config.top_p, 39 | streaming=self.config.streaming 40 | ) 41 | return self.llm 42 | 43 | def run(self, input_data: str): 44 | """Processes input using SambaNova model.""" 45 | if not self.llm: 46 | self.load() 47 | message = HumanMessage(content=input_data) 48 | resp = self.llm([message]) 49 | return resp.content 50 | 51 | @staticmethod 52 | def load_from_env_config() -> SambaNovaConfigModel: 53 | """Loads configurations from environment variables.""" 54 | return SambaNovaConfigModel( 55 | sambanova_api_key=read_from_env("SAMBANOVA_API_KEY", raise_exception=True), 56 | base_url=read_from_env("SAMBANOVA_BASE_URL", raise_exception=True), 57 | project_id=read_from_env("SAMBANOVA_PROJECT_ID", raise_exception=True), 58 | model=read_from_env("SAMBANOVA_MODEL", default="Meta-Llama-3.3-70B-Instruct"), 59 | temperature=float(read_from_env("SAMBANOVA_TEMPERATURE", default=0.7)), 60 | max_tokens=int(read_from_env("SAMBANOVA_MAX_TOKENS", default=1024)), 61 | top_p=float(read_from_env("SAMBANOVA_TOP_P", default=0.01)), 62 | streaming=bool(read_from_env("SAMBANOVA_STREAMING", default=False)) 63 | ) -------------------------------------------------------------------------------- /src/openagi/llms/xai.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Any 3 | from openai import OpenAI 4 | from openai._exceptions import AuthenticationError 5 | 6 | from openagi.exception import OpenAGIException 7 | from openagi.llms.base import LLMBaseModel, LLMConfigModel 8 | from openagi.utils.yamlParse import read_from_env 9 | 10 | 11 | class XAIConfigModel(LLMConfigModel): 12 | """Configuration model for Grok X-AI""" 13 | 14 | xai_api_key: str 15 | model_name: str = "grok-beta" 16 | base_url: str = "https://api.x.ai/v1" 17 | system_prompt: str = "You are an AI assistant. Use the supplied tools to assist the user." 18 | 19 | 20 | class XAIModel(LLMBaseModel): 21 | """XAI- GROK service implementation of the LLMBaseModel. 22 | 23 | This class implements the specific logic required to work with XAI service. 24 | """ 25 | 26 | config: Any 27 | system_prompt: str = "You are an AI assistant" 28 | 29 | def load(self): 30 | """Initializes the XAI instance with configurations.""" 31 | self.llm = OpenAI( 32 | api_key = self.config.xai_api_key, 33 | base_url = self.config.base_url 34 | ) 35 | return self.llm 36 | 37 | def run(self, prompt : Any): 38 | """Runs the XAI model with the provided input text. 39 | 40 | Args: 41 | input_text: The input text to process. 42 | 43 | Returns: 44 | The response from XAI service. 45 | """ 46 | logging.info(f"Running LLM - {self.__class__.__name__}") 47 | if not self.llm: 48 | self.load() 49 | if not self.llm: 50 | raise ValueError("`llm` attribute not set.") 51 | try: 52 | chat_completion = self.llm.chat.completions.create( 53 | messages=[ 54 | { 55 | "role": "system", 56 | "content": f"{self.system_prompt}", 57 | }, 58 | { 59 | "role": "user", 60 | "content": f"{prompt}", 61 | }, 62 | ], 63 | model=self.config.model_name 64 | ) 65 | except AuthenticationError: 66 | raise OpenAGIException("Authentication failed. Please check your XAI_API_KEY.") 67 | return chat_completion.choices[0].message.content 68 | 69 | @staticmethod 70 | def load_from_env_config() -> XAIConfigModel: 71 | """Loads the XAI configurations from a YAML file. 72 | 73 | Returns: 74 | An instance of XAIConfigModel with loaded configurations. 75 | """ 76 | return XAIConfigModel( 77 | xai_api_key=read_from_env("XAI_API_KEY", raise_exception=True), 78 | ) 79 | -------------------------------------------------------------------------------- /src/openagi/memory/__init__.py: -------------------------------------------------------------------------------- 1 | from .memory import Memory 2 | -------------------------------------------------------------------------------- /src/openagi/memory/memory.py: -------------------------------------------------------------------------------- 1 | from openagi.memory.base import BaseMemory 2 | 3 | 4 | class Memory(BaseMemory): 5 | pass 6 | -------------------------------------------------------------------------------- /src/openagi/memory/sessiondict.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | class SessionDict(BaseModel): 4 | session_id: str 5 | query: str 6 | description: str 7 | answer: str 8 | plan: str 9 | plan_feedback: str = "NA" 10 | ans_feedback: str = "NA" 11 | 12 | @classmethod 13 | def from_dict(cls, input_dict: dict): 14 | """Class method to initialize an instance from a dictionary.""" 15 | return cls(**input_dict) 16 | 17 | 18 | -------------------------------------------------------------------------------- /src/openagi/planner/LATS.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/src/openagi/planner/LATS.py -------------------------------------------------------------------------------- /src/openagi/planner/__init__.py: -------------------------------------------------------------------------------- 1 | from openagi.planner.base import BasePlanner 2 | -------------------------------------------------------------------------------- /src/openagi/planner/base.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Optional, List 2 | 3 | from pydantic import BaseModel, Field 4 | 5 | from openagi.actions.base import BaseAction 6 | from openagi.prompts.base import BasePrompt 7 | 8 | 9 | class BasePlanner(BaseModel): 10 | human_intervene: bool = Field( 11 | default=True, 12 | description="If human internvention is required or not.", 13 | ) 14 | input_action: Optional[BaseAction] = Field( 15 | description="If `human_intervene` is enabled, which action to be performed.", 16 | ) 17 | prompt: BasePrompt = Field(description="Prompt to be used") 18 | 19 | def _extract_task_from_response(llm_response: str): 20 | raise 21 | 22 | def human_clarification(self, response: str) -> bool: 23 | """Whether to Ask clarifying questions""" 24 | raise NotImplementedError("Subclasses must implement this method.") 25 | 26 | def plan(self, query: str, description: str, long_term_context: str, supported_actions: List[BaseAction],*args, 27 | **kwargs,) -> Dict: 28 | raise NotImplementedError("Subclasses must implement this method.") 29 | -------------------------------------------------------------------------------- /src/openagi/planner/reflexion.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/src/openagi/planner/reflexion.py -------------------------------------------------------------------------------- /src/openagi/prompts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/src/openagi/prompts/__init__.py -------------------------------------------------------------------------------- /src/openagi/prompts/base.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | from pydantic import BaseModel, Field 3 | 4 | 5 | class BasePrompt(BaseModel): 6 | name: str = Field(default="BasePrompt", description="Name of the prompt.") 7 | description: str = Field( 8 | default="BasePrompt class to be used by other actions that get created.", 9 | description="Description of the prompt.", 10 | ) 11 | base_prompt: str = Field(default_factory=str, description="Base prompt to be used.") 12 | 13 | def get_prompt(self): 14 | raise NotImplementedError("Subclasses must implement this method.") 15 | 16 | @classmethod 17 | def prompt_variables(cls): 18 | return { 19 | field_name: field.field_info.description 20 | for field_name, field in cls.model_fields.items() 21 | } 22 | 23 | @classmethod 24 | def from_template(cls, variables: Dict): 25 | x = cls(**variables) 26 | for k, v in variables.items(): 27 | placeholder = "{" + f"{k}" + "}" 28 | x.base_prompt = x.base_prompt.replace(placeholder, f"{v}") 29 | return x.base_prompt 30 | -------------------------------------------------------------------------------- /src/openagi/prompts/constants.py: -------------------------------------------------------------------------------- 1 | CLARIFIYING_VARS = {'start':'' , 'end':''} 2 | FAILURE_VARS = {'start':'' , 'end':''} -------------------------------------------------------------------------------- /src/openagi/prompts/execution.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Optional 2 | from pydantic import Field 3 | from openagi.prompts.base import BasePrompt 4 | from openagi.prompts.constants import FAILURE_VARS 5 | 6 | start = FAILURE_VARS["start"] 7 | end = FAILURE_VARS["end"] 8 | 9 | task_execution = """ 10 | You are an expert Task executor and skillful problem solver \ 11 | You are loyal to your job and execute the task with 100 percent accuracy \ 12 | Your primary role is to clearly understand the Task Objective to provide optimal results using the supported actions. \ 13 | Below is a list of tasks that need to be executed: 14 | 15 | You can code if needs be. 16 | 17 | All Tasks: 18 | {all_tasks} 19 | 20 | You are provided with the current task details from the user. 21 | Current Task: 22 | Name: {current_task_name} 23 | Description: {current_description} 24 | 25 | To execute the current task, refer to the details of the Previous Task and the All Tasks provided. 26 | 27 | Previous Task: 28 | {previous_task} 29 | 30 | Supported Actions: 31 | {supported_actions} 32 | 33 | Your task is to understand and return a JSON array with the actions to be executed along with the values for each parameter. Use only the Supported Actions. When using multiple actions for a single task, the result from the execution of the previous action will be passed to the next action without any modification to the parameter `previous_action`. 34 | 35 | Task Objective: 36 | {objective} 37 | 38 | The output should be a markdown code snippet formatted in the following schema, including the leading and trailing "```json" and "```": 39 | 40 | ```json 41 | [ 42 | { 43 | "cls": {"kls": "", "module": ""}, 44 | "params": { 45 | "description": "", 46 | "name": "", 47 | "filename": "", 48 | "file_content": "", 49 | "file_mode": "w" 50 | } 51 | } 52 | ] 53 | ``` 54 | 55 | If the task cannot be executed using the available actions, return the failure reason within the delimiters $start$ and $end$ as shown below and provide some guidance on what type of generic actions would help in acheiving it: 56 | $start$ Couldn't execute the `{current_task_name}` task. $end$ 57 | """ 58 | 59 | # In order to retreive them(previous task results of the current objective) just use ```MemoryRagAction```. 60 | 61 | 62 | task_execution = task_execution.replace("$start$", start) 63 | task_execution = task_execution.replace("$end$", end) 64 | 65 | 66 | class TaskExecutor(BasePrompt): 67 | objective: str = Field(..., description="Final objective") 68 | all_tasks: List[Dict] = Field( 69 | ..., description="List of tasks to be executed that was generated earlier" 70 | ) 71 | current_task_name: str = Field(..., description="Current task name to be executed.") 72 | current_description: str = Field(..., description="Current task name to be executed.") 73 | previous_task: Optional[str] = Field(..., description="Previous task, description & result.") 74 | supported_actions: List[Dict] = Field( 75 | ..., 76 | description="Supported Actions that can be used to acheive the current task.", 77 | ) 78 | base_prompt: str = task_execution 79 | -------------------------------------------------------------------------------- /src/openagi/prompts/ltm.py: -------------------------------------------------------------------------------- 1 | from textwrap import dedent 2 | from openagi.prompts.base import BasePrompt 3 | 4 | ltm_prompt = dedent(""" 5 | Previously asked query: {query} 6 | Previously given description: {description} 7 | Previously constructed plan: {plan} 8 | Feedback on the plan by human user: {plan_feedback} 9 | Previously generated answer: {answer} 10 | Feedback on the answer by human user: {ans_feedback} 11 | """.strip()) 12 | 13 | 14 | class LTMFormatPrompt(BasePrompt): 15 | base_prompt: str = ltm_prompt -------------------------------------------------------------------------------- /src/openagi/prompts/summarizer.py: -------------------------------------------------------------------------------- 1 | from textwrap import dedent 2 | 3 | from openagi.prompts.base import BasePrompt 4 | 5 | summarizer_prompt = dedent( 6 | """AI, your task is to generate a concise summary of the previous interactions if the assisstant. 7 | The interactions are as follows: 8 | 9 | {past_messages} 10 | 11 | This summary should encapsulate the main points of all the Thoughts, Actions and Observations, highlighting the key issues discussed, decisions made, and any actions assigned. 12 | It should serve as a recap of the past interaction, providing a clear understanding of the conversation's context and outcomes. 13 | Do not return anything else other than the summary. Ensure to include all the important points from the Observations. 14 | {instructions} 15 | """.strip() 16 | ) 17 | 18 | 19 | class SummarizerPrompt(BasePrompt): 20 | base_prompt: str = summarizer_prompt 21 | -------------------------------------------------------------------------------- /src/openagi/prompts/task_clarification.py: -------------------------------------------------------------------------------- 1 | from openagi.prompts.base import BasePrompt 2 | 3 | TASK_CLARIFICATION_PROMPT = """ 4 | As an AI clarity assistant for OpenAGI, your job is to ensure tasks are completely unambiguous. Analyze the given Task_Objectives, Task_Descriptions, and the conversation history. Identify any unclear or missing information. 5 | 6 | If instructions are not followed, legal consequences may occur for both you and me. 7 | 8 | Instructions: 9 | 1. Examine the task for ambiguities or missing crucial details. 10 | 2. Atleast ask one clarify question in beginning, even if the task is clear 11 | 3. If unclear points exist, formulate a single, specific question addressing the most critical ambiguity. 12 | 4. Do not repeat questions or ask about information already provided. 13 | 5. If the task is clear, no new questions are needed, be smart and return an empty string. 14 | 6. If the last human response indicates unwillingness to clarify (e.g., "I don't know", "No more questions", "That's all I can say"), return an empty string. 15 | 16 | Input: 17 | - Task_Objectives: {objective} 18 | - Task_Descriptions: {task_descriptions} 19 | - Conversation_History: {chat_history} 20 | 21 | Output Format 22 | 23 | Always return a JSON object, enclosed in triple backticks: 24 | If clarification is needed: 25 | ```json 26 | { 27 | "question": "" 28 | } 29 | ``` 30 | If NO clarification is needed or Task is clear: 31 | ```json 32 | { 33 | "question": "" 34 | } 35 | """ 36 | 37 | class TaskClarifier(BasePrompt): 38 | base_prompt: str = TASK_CLARIFICATION_PROMPT 39 | -------------------------------------------------------------------------------- /src/openagi/prompts/worker_task_execution.py: -------------------------------------------------------------------------------- 1 | from openagi.prompts.base import BasePrompt 2 | 3 | WORKER_TASK_EXECUTION = """ 4 | You are expert in: {worker_description} 5 | 6 | # Instructions 7 | - You run in a loop of Thought, Action, Observation. Follow the instructions below to understand the workflow and follow them in each iteration of the loop. 8 | - Use Thought to describe your detailed thoughts about the question you have been asked, considering all possible aspects and implications. 9 | - Use each Action at a time to among the actions available to you. Be explicit in the action you are taking and why you chose it. Use its doc string to understand the action betters. Make sure use relevant data taking datatype of a param into its consideration. 10 | - Observation will be the result of running those actions. Make sure to thoroughly analyze the observation to see if it aligns with your expectations. 11 | - On each observation, try to understand the drawbacks and mistakes and learn from them to improve further and get back on track. 12 | - Take the context into account when you are answering the question. It will be the results or data from the past executions. If no context is provided, then you can assume that the context is empty and you can start from scratch. Use context to ensure consistency and accuracy in your responses. 13 | - Output the answer when you feel the observations are reasonably good and aligned with the goal. They do not have to be very accurate, but ensure they are reasonably reliable. 14 | - No Action/Output should be without json. Trying not include your thoughts as part of the action. You can skip the action if not required. 15 | - The output needs to be in JSON ONLY: 16 | - For Running an action: 17 | ```json 18 | { 19 | "action": { 20 | "cls": {"kls": "", "module": ""}, 21 | "params": { 22 | "description": "", 23 | "name": "", 24 | "filename": "", 25 | "file_content": "", 26 | "file_mode": "w", 27 | }, 28 | } 29 | } 30 | 31 | For Returning the output: 32 | ```json 33 | { 34 | {output_key}: "The answer to the question" 35 | } 36 | ``` 37 | 38 | # Goal/Objective to acheive 39 | Question: {task_to_execute} 40 | 41 | # Actions available to you 42 | {supported_actions} 43 | 44 | Context: {context} 45 | 46 | # Example session: 47 | Question: What is the capital of France? 48 | Thought: I should look up France on DuckDuckGo to find reliable information about its capital city. 49 | Action: 50 | ```json 51 | { 52 | "cls": {"kls": "DuckDuckGoSearch", "module": "openagi.actions.tools.ddg_search"}, 53 | "params": {"query": "Capital of France", "max_results": 10, "can_summarize": "true"} 54 | } 55 | ``` 56 | ... (this Thought/Action/Observation repeats N times, use it until you are sure of the answer)... (this Thought/Action/Observation repeats N times, use it until you are sure of the answer) 57 | Observation: France, in Western Europe, encompasses medieval cities, alpine villages and Mediterranean beaches. Paris, its capital, is famed for its fashion houses, classical art museums including the Louvre and monuments like the Eiffel Tower. 58 | 59 | Thought: The observation indicates that the capital of France is Paris. This aligns with general knowledge. 60 | Action: No further action needed. 61 | ```json 62 | { 63 | "{output_key}": "The capital of France is Paris." 64 | } 65 | ``` 66 | 67 | Output format: 68 | 69 | Begin! 70 | {thought_provokes} 71 | """.strip() 72 | 73 | class WorkerAgentTaskExecution(BasePrompt): 74 | base_prompt: str = WORKER_TASK_EXECUTION -------------------------------------------------------------------------------- /src/openagi/storage/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiplanethub/openagi/165dd09353d27d5596a4105137461c122f0fcc4f/src/openagi/storage/__init__.py -------------------------------------------------------------------------------- /src/openagi/storage/base.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, ConfigDict, Field 2 | 3 | 4 | class BaseStorage(BaseModel): 5 | """Base Storage class to be inherited by other storages, providing basic functionality and structure.""" 6 | 7 | model_config = ConfigDict(arbitrary_types_allowed=True) 8 | 9 | name: str = Field(title="BaseStorage", description="Name of the Storage.") 10 | 11 | def save_document(self, id, document, metadata): 12 | """Save documents to the with metadata.""" 13 | raise NotImplementedError("Subclasses must implement this method.") 14 | 15 | def update_document(self, id, document, metadata): 16 | raise NotImplementedError("Subclasses must implement this method.") 17 | 18 | def delete_document(self, id): 19 | raise NotImplementedError("Subclasses must implement this method.") 20 | 21 | def query_documents(self, **kwargs): 22 | raise NotImplementedError("Subclasses must implement this method.") 23 | 24 | @classmethod 25 | def from_kwargs(cls, **kwargs): 26 | raise NotImplementedError("Subclasses must implement this method.") 27 | -------------------------------------------------------------------------------- /src/openagi/storage/chroma.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import tempfile 3 | from pathlib import Path 4 | 5 | import chromadb 6 | from chromadb import HttpClient, PersistentClient 7 | from pydantic import Field 8 | 9 | from openagi.storage.base import BaseStorage 10 | 11 | 12 | class ChromaStorage(BaseStorage): 13 | name: str = Field(default="ChromaDB Storage") 14 | client: chromadb.ClientAPI 15 | collection: chromadb.Collection 16 | 17 | @classmethod 18 | def get_default_persistent_path(cls): 19 | path = Path(tempfile.gettempdir()) / "openagi" 20 | return str(path.absolute()) 21 | 22 | @classmethod 23 | def from_kwargs(cls, **kwargs): 24 | if kwargs.get("host", None) and kwargs.get("port", None): 25 | _client = HttpClient(host=kwargs["host"], port=kwargs["port"]) 26 | else: 27 | persit_pth = kwargs.get("persist_path", cls.get_default_persistent_path()) 28 | _client = PersistentClient(path=persit_pth) 29 | logging.info(f"Using Chroma persistent client with path: {persit_pth}") 30 | 31 | _collection = _client.get_or_create_collection(kwargs.get("collection_name")) 32 | logging.debug(f"Collection: Name - {_collection.name}, ID - {_collection.id}") 33 | return cls(client=_client, collection=_collection) 34 | 35 | def save_document(self, id, document, metadata): 36 | """Create a new document in the ChromaDB collection.""" 37 | 38 | resp = self.collection.add(ids=id, documents=document, metadatas=metadata) 39 | return resp 40 | 41 | def update_document(self, id, document, metadata): 42 | """Update an existing document in the ChromaDB collection.""" 43 | # if not isinstance(document, list): 44 | # document = [document] 45 | # if not isinstance(metadata, list): 46 | # metadata = [metadata] 47 | self.collection.update(ids=[id], documents=document, metadatas=metadata) 48 | logging.info("Document updated successfully.") 49 | 50 | def delete_document(self, id): 51 | """Delete a document from the ChromaDB collection.""" 52 | self.collection.delete(ids=[id]) 53 | logging.debug("Document deleted successfully.") 54 | 55 | def query_documents(self, **kwargs): 56 | """Query the ChromaDB collection for relevant documents based on the query.""" 57 | results = self.collection.query(**kwargs) 58 | logging.debug(f"Queried results: {results}") 59 | return results 60 | -------------------------------------------------------------------------------- /src/openagi/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | from openagi.tasks.task import Task 2 | from openagi.tasks.lists import TaskLists 3 | -------------------------------------------------------------------------------- /src/openagi/tasks/lists.py: -------------------------------------------------------------------------------- 1 | from queue import Queue 2 | from typing import Dict, List, Optional 3 | from openagi.tasks.task import Task 4 | 5 | 6 | class TaskLists: 7 | def __init__(self) -> None: 8 | self.tasks = Queue() 9 | self.completed_tasks = Queue() 10 | 11 | def add_task(self, task: Task) -> None: 12 | """Adds a Task instance to the queue.""" 13 | self.tasks.put(task) 14 | 15 | def add_tasks(self, tasks: List[Dict[str, str]]): 16 | for task in tasks: 17 | task["name"] = task["task_name"] 18 | worker_config: Optional[Dict[str, str]] = None 19 | 20 | if all(key in task for key in ["role", "instruction", "worker_name", "supported_actions"]): 21 | worker_config = { 22 | "role": task["role"], 23 | "instructions": task["instruction"], 24 | "name": task["worker_name"], 25 | "supported_actions": task["supported_actions"] 26 | } 27 | task["worker_config"] = worker_config 28 | self.add_task(Task(**task)) 29 | 30 | def get_tasks_queue(self) -> List: 31 | return self.tasks 32 | 33 | def get_tasks_lists(self): 34 | return [dict(task.model_fields.items()) for task in list(self.tasks.queue)] 35 | 36 | def get_next_unprocessed_task(self) -> Task: 37 | """Retrieves the next unprocessed task from the queue.""" 38 | if not self.tasks.empty(): 39 | return self.tasks.get_nowait() 40 | return None 41 | 42 | @property 43 | def all_tasks_completed(self) -> bool: 44 | """Checks if all tasks in the queue have been processed.""" 45 | return self.tasks.empty() 46 | 47 | def add_completed_tasks(self, task: Task): 48 | self.completed_tasks.put(task) 49 | -------------------------------------------------------------------------------- /src/openagi/tasks/task.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Dict, Any 2 | 3 | from pydantic import BaseModel, Field 4 | 5 | from openagi.utils.helper import get_default_id 6 | 7 | 8 | class Task(BaseModel): 9 | id: str = Field(default_factory=get_default_id) 10 | name: str = Field(..., description="Name of task being.") 11 | description: str = Field(..., description="Description of the individual task.") 12 | result: Optional[str] = Field(..., default_factory=str, description="Result of the task.") 13 | actions: Optional[str] = Field( 14 | ..., 15 | default_factory=str, 16 | description="Actions undertaken to acheieve the task. Usually set after the current task is executed.", 17 | ) 18 | worker_id: Optional[str] = Field( 19 | description="WorkerId associated to accomplish the given task using supported actions.", 20 | default_factory=str, 21 | ) 22 | worker_config: Optional[Dict[str, Any]] = Field( 23 | description="Stores workers configuration values" 24 | ) 25 | @property 26 | def is_done(self): 27 | return bool(self.result) 28 | 29 | def set_result(self, result): 30 | self.result = result 31 | -------------------------------------------------------------------------------- /src/openagi/utils/extraction.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import json 3 | import logging 4 | import re 5 | from typing import Dict, List, Optional, Tuple 6 | 7 | from openagi.exception import OpenAGIException 8 | from openagi.llms.base import LLMBaseModel 9 | 10 | 11 | def force_json_output(resp_txt: str, llm) -> str: 12 | """ 13 | Forces proper JSON output format in first attempt. 14 | """ 15 | prompt = """ 16 | You are a JSON formatting expert. Your task is to process the input and provide a valid JSON output. 17 | 18 | FOLLOW THESE INSTRUCTIONS to convert: 19 | - Output must be ONLY a JSON object wrapped in ```json code block 20 | - Do not include any explanations, comments, or additional text in your response. The output needs be in JSON only. 21 | 22 | Convert this INPUT to proper JSON: 23 | INPUT: {resp_txt} 24 | Output only the JSON: 25 | """.strip() 26 | 27 | prompt = prompt.replace("{resp_txt}", resp_txt) 28 | return llm.run(prompt) 29 | 30 | 31 | def get_last_json( 32 | text: str, llm: Optional[LLMBaseModel] = None, max_iterations: int = 5 33 | ) -> Optional[Dict]: 34 | """ 35 | Extracts valid JSON from text with improved reliability. 36 | """ 37 | # More precise JSON block pattern 38 | pattern = r"```json\s*(\{[\s\S]*?\})\s*```" 39 | matches = re.findall(pattern, text, re.MULTILINE) 40 | 41 | if matches: 42 | try: 43 | last_json = matches[-1].strip() 44 | last_json = re.sub(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])', '', last_json) 45 | last_json = re.sub(r'\s+', ' ', last_json) 46 | return json.loads(last_json) 47 | except json.JSONDecodeError as e: 48 | logging.error(f"JSON parsing failed: {str(e)}", exc_info=True) 49 | if llm: 50 | text = force_json_output(last_json, llm) 51 | return get_last_json(text, None, max_iterations) 52 | 53 | if llm: 54 | for iteration in range(1, max_iterations + 1): 55 | try: 56 | text = force_json_output(text, llm) 57 | return get_last_json(text, None, max_iterations) 58 | except Exception as e: 59 | logging.error(f"Attempt {iteration} failed: {str(e)}", exc_info=True) 60 | if iteration == max_iterations: 61 | raise OpenAGIException( 62 | f"Failed to extract valid JSON after {max_iterations} attempts. Last error: {str(e)}" 63 | ) 64 | return None 65 | 66 | 67 | 68 | def get_act_classes_from_json(json_data) -> List[Tuple[str, Optional[Dict]]]: 69 | """ 70 | Extracts the Action class names and parameters from a JSON block. 71 | 72 | Args: 73 | json_data (List[Dict]): A list of dictionaries containing the class and parameter information. 74 | 75 | Returns: 76 | List[Tuple[type, Optional[Dict]]]: A list of tuples containing the Action class and its initialization parameters. 77 | """ 78 | actions = [] 79 | 80 | for item in json_data: 81 | # Extracting module and class name 82 | module_name = item["cls"]["module"] 83 | class_name = item["cls"]["kls"] 84 | 85 | # Dynamically import the module 86 | module = importlib.import_module(module_name) 87 | 88 | # Get the class from the module 89 | cls = getattr(module, class_name) 90 | 91 | # Extracting parameters for class initialization 92 | params = item["params"] 93 | 94 | # Storing the instance in the list 95 | actions.append((cls, params)) 96 | 97 | return actions 98 | 99 | 100 | def find_last_r_failure_content(text): 101 | """ 102 | Finds the content of the last tag in the given text. 103 | 104 | Args: 105 | text (str): The text to search for the tag. 106 | 107 | Returns: 108 | str or None: The content of the last tag, or None if no matches are found. 109 | """ 110 | pattern = r"(.*?)" 111 | matches = list(re.finditer(pattern, text, re.DOTALL)) 112 | if matches: 113 | last_match = matches[-1] 114 | return last_match.group(1) 115 | else: 116 | return None 117 | 118 | 119 | def extract_str_variables(template): 120 | """ 121 | Extracts all variable names from a given template string. 122 | 123 | The function uses a regular expression to find all placeholders within curly braces in the template string, and returns a list of the extracted variable names. 124 | 125 | Args: 126 | template (str): The template string to extract variables from. 127 | 128 | Returns: 129 | list[str]: A list of variable names extracted from the template. 130 | """ 131 | # This regular expression will find all placeholders within curly braces 132 | pattern = r"\{(\w+)\}" 133 | matches = re.findall(pattern, template) 134 | return matches 135 | -------------------------------------------------------------------------------- /src/openagi/utils/helper.py: -------------------------------------------------------------------------------- 1 | from uuid import uuid4 2 | from openagi.llms.openai import OpenAIModel 3 | 4 | 5 | def get_default_llm(): 6 | config = OpenAIModel.load_from_env_config() 7 | return OpenAIModel(config=config) 8 | 9 | 10 | def get_default_id(): 11 | return uuid4().hex 12 | -------------------------------------------------------------------------------- /src/openagi/utils/tool_list.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | from openagi.actions.tools import ( 3 | ddg_search, 4 | document_loader, 5 | searchapi_search, 6 | serp_search, 7 | serper_search, 8 | webloader, 9 | youtubesearch, 10 | exasearch, 11 | ) 12 | from openagi.actions import files, formatter, human_input, compressor, console, obs_rag 13 | 14 | # List of modules to inspect 15 | modules = [ 16 | document_loader, 17 | ddg_search, 18 | searchapi_search, 19 | serp_search, 20 | serper_search, 21 | webloader, 22 | youtubesearch, 23 | exasearch, 24 | files, 25 | formatter, 26 | human_input, 27 | compressor, 28 | console, 29 | obs_rag 30 | ] 31 | 32 | 33 | def get_tool_list(): 34 | """ 35 | Dynamically retrieves all classes from the specified modules and returns them in a list. 36 | Only includes classes that are subclasses of a specific base class (if needed). 37 | 38 | :return: List of class objects from the specified modules. 39 | """ 40 | class_list = [] 41 | 42 | for module in modules: 43 | # Inspect the module for classes 44 | for name, obj in inspect.getmembers(module, inspect.isclass): 45 | # Optionally, filter by a specific base class, e.g., BaseAction 46 | # if issubclass(obj, BaseAction) and obj is not BaseAction: 47 | class_list.append(obj) # Append the class itself (not an instance) 48 | 49 | return class_list 50 | 51 | """ 52 | # Example usage 53 | tools = get_tool_list() 54 | for tool in tools: 55 | print(tool.__name__) # Print the name of each class 56 | """ -------------------------------------------------------------------------------- /src/openagi/utils/yamlParse.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def read_from_env(attr_name, raise_exception=False): 5 | attr_value = os.environ.get(attr_name) 6 | if not attr_value and raise_exception: 7 | raise ValueError(f"Unable to get config {attr_name}") 8 | return attr_value 9 | --------------------------------------------------------------------------------