├── .env.local ├── .gitignore ├── README.md ├── autogen_scenarios.py ├── autogen_worker ├── codereviewassistant.py ├── meetingscheduler.py └── researchpaperqa.py ├── crewai_scenarios.py ├── crewai_worker ├── blogpostgen.py ├── codedocumentationassistant.py └── marketresearchanalyzer.py ├── devika_worker ├── blog.py ├── datacollection.py └── debugpython.py ├── langchain_scenarios.py ├── langchain_worker ├── meetingscheduler.py ├── trivia.py └── writingassistant.py ├── memgpt_scenarios.py ├── memgpt_worker ├── historicalfigure.py ├── personalassistant.py └── textdiscussion.py ├── proxy.py ├── requirements.txt └── worker.py /.env.local: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY=your key here 2 | OPENAI_ENDPOINT=https://api.openai.com/v1 3 | OPENAI_MODEL=gpt-4o -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .env 3 | logs.jsonl 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Agentic Worker 2 | 3 | Gather data for agentic dataset, from various frameworks. 4 | - AutoGen 5 | - CrewAI 6 | - MemGPT 7 | - LangChain Agents 8 | - princeton-nlp/SWE-agent 9 | - Devika 10 | - Open-Devin 11 | - llama-agentic-system 12 | 13 | ## Start the logging proxy 14 | 15 | `python proxy.py` 16 | 17 | ## run a scenario 18 | 19 | `python worker.py autogen scenario1` 20 | -------------------------------------------------------------------------------- /autogen_scenarios.py: -------------------------------------------------------------------------------- 1 | def scenario1(): 2 | print("Running AutoGen scenario1") 3 | 4 | def scenario2(): 5 | print("Running AutoGen scenario2") 6 | 7 | def run_autogen_scenario(scenario_name): 8 | if scenario_name == 'scenario1': 9 | scenario1() 10 | elif scenario_name == 'scenario2': 11 | scenario2() 12 | else: 13 | raise ValueError(f"Invalid scenario: autogen, {scenario_name}") -------------------------------------------------------------------------------- /autogen_worker/codereviewassistant.py: -------------------------------------------------------------------------------- 1 | import os 2 | from autogen_scenarios import AssistantAgent, UserProxyAgent 3 | from autogen.coding import LocalCommandLineCodeExecutor 4 | 5 | # Configure the LLM 6 | config_list = [{"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]}] 7 | 8 | # Create a local command line code executor 9 | code_executor = LocalCommandLineCodeExecutor(work_dir="./workspace") 10 | 11 | # Code Writer Agent 12 | code_writer = AssistantAgent( 13 | name="CodeWriter", 14 | description="Writes code based on user's task description", 15 | system_message="You are a coding assistant. Write code based on the user's task description.", 16 | llm_config={"config_list": config_list}, 17 | ) 18 | 19 | # Code Reviewer Agent 20 | code_reviewer = AssistantAgent( 21 | name="CodeReviewer", 22 | description="Reviews code and provides feedback and suggestions", 23 | system_message="You are a code reviewer. Analyze the given code, provide feedback and suggestions for improvements.", 24 | llm_config={"config_list": config_list}, 25 | ) 26 | 27 | # User Proxy Agent 28 | user_proxy = UserProxyAgent( 29 | name="UserProxy", 30 | code_execution_config={ 31 | "last_n_messages": 2, # Only pass last 2 messages to code executor 32 | "executor": code_executor, 33 | }, 34 | human_input_mode="TERMINATE", # Allow user input until 'exit' is typed 35 | is_termination_msg=lambda x: "TERMINATE" in x.get("content", ""), 36 | ) 37 | 38 | def review_code(task_description): 39 | code_writer.initiate_chat( 40 | user_proxy, message=f"Write code to: {task_description}" 41 | ) 42 | 43 | for _ in range(3): # 3 rounds of code review 44 | code_reviewer.initiate_chat( 45 | code_writer, message="Please review the code and provide feedback." 46 | ) 47 | code_writer.initiate_chat( 48 | code_reviewer, message="Please update the code based on the feedback." 49 | ) 50 | 51 | # Example usage 52 | review_code("Create a function that takes a list of integers and returns the sum of all even numbers in the list.") -------------------------------------------------------------------------------- /autogen_worker/meetingscheduler.py: -------------------------------------------------------------------------------- 1 | import os 2 | from datetime import datetime, timedelta 3 | from autogen_scenarios import AssistantAgent, UserProxyAgent 4 | 5 | # Configure the LLM 6 | config_list = [{"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]}] 7 | 8 | # Simulated Calendar API 9 | calendar = { 10 | "John": [ 11 | {"start": "2023-06-13T13:00:00", "end": "2023-06-13T14:00:00"}, 12 | {"start": "2023-06-13T15:30:00", "end": "2023-06-13T16:30:00"}, 13 | ] 14 | } 15 | 16 | def find_available_slot(attendee, date, duration): 17 | slots = calendar.get(attendee, []) 18 | start_time = datetime.strptime(f"{date}T12:00:00", "%Y-%m-%dT%H:%M:%S") 19 | end_time = datetime.strptime(f"{date}T18:00:00", "%Y-%m-%dT%H:%M:%S") 20 | 21 | while start_time + timedelta(minutes=duration) <= end_time: 22 | if not any(datetime.strptime(slot["start"], "%Y-%m-%dT%H:%M:%S") < start_time + timedelta(minutes=duration) and 23 | start_time < datetime.strptime(slot["end"], "%Y-%m-%dT%H:%M:%S") for slot in slots): 24 | return start_time.strftime("%Y-%m-%dT%H:%M:%S") 25 | start_time += timedelta(minutes=30) 26 | return None 27 | 28 | def schedule_meeting(attendee, start_time, end_time): 29 | calendar.setdefault(attendee, []).append({"start": start_time, "end": end_time}) 30 | return f"Meeting scheduled with {attendee} from {start_time} to {end_time}." 31 | 32 | # User Assistant Agent 33 | user_assistant = AssistantAgent( 34 | name="UserAssistant", 35 | system_message="You are an assistant that helps the user schedule meetings.", 36 | llm_config={"config_list": config_list}, 37 | ) 38 | 39 | # Scheduling Assistant Agent 40 | scheduling_assistant = AssistantAgent( 41 | name="SchedulingAssistant", 42 | system_message="You are an assistant that interprets meeting requests and proposes available time slots.", 43 | llm_config={"config_list": config_list}, 44 | ) 45 | 46 | # User Proxy Agent 47 | user_proxy = UserProxyAgent( 48 | name="UserProxy", 49 | human_input_mode="TERMINATE", 50 | is_termination_msg=lambda x: "TERMINATE" in x.get("content", ""), 51 | ) 52 | 53 | def schedule(meeting_request): 54 | user_proxy.initiate_chat(user_assistant, message=meeting_request) 55 | user_assistant.initiate_chat(scheduling_assistant, message=user_proxy.messages[-1]['content']) 56 | 57 | attendee, date, duration = "John", "2023-06-13", 30 # Parse from scheduling_assistant's response 58 | proposed_slot = find_available_slot(attendee, date, duration) 59 | 60 | if proposed_slot: 61 | user_assistant.initiate_chat(user_proxy, message=f"Proposed time slot: {proposed_slot}. Do you confirm?") 62 | if "yes" in user_proxy.messages[-1]['content'].lower(): 63 | end_time = (datetime.strptime(proposed_slot, "%Y-%m-%dT%H:%M:%S") + timedelta(minutes=duration)).strftime("%Y-%m-%dT%H:%M:%S") 64 | result = schedule_meeting(attendee, proposed_slot, end_time) 65 | user_assistant.initiate_chat(user_proxy, message=result) 66 | else: 67 | user_assistant.initiate_chat(user_proxy, message="No available time slots found.") 68 | 69 | # Example usage 70 | schedule("Schedule a 30 min meeting with John to discuss the Q3 budget sometime next Tue afternoon.") -------------------------------------------------------------------------------- /autogen_worker/researchpaperqa.py: -------------------------------------------------------------------------------- 1 | import os 2 | from autogen_scenarios import RetrieveAssistantAgent, RetrieveUserProxyAgent 3 | from autogen.retrievechat import ChromaRetriever 4 | 5 | # Configure the LLM 6 | config_list = [{"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]}] 7 | 8 | # Set up the vector database and index the research papers 9 | papers_folder = "./research_papers" # Folder containing research papers 10 | chroma_retriever = ChromaRetriever( 11 | docs_path=papers_folder, 12 | chunk_token_size=200, 13 | embedding_model="all-mpnet-base-v2", 14 | ) 15 | 16 | # Chatbot Assistant Agent 17 | chatbot = RetrieveAssistantAgent( 18 | name="Chatbot", 19 | system_message="You are a helpful assistant who answers questions based on the provided research papers.", 20 | llm_config={"config_list": config_list}, 21 | ) 22 | 23 | # User Proxy Agent 24 | user_proxy = RetrieveUserProxyAgent( 25 | name="UserProxy", 26 | retrieve_config={ 27 | "retriever": chroma_retriever, 28 | "search_kwargs": {"k": 3}, # Retrieve top 3 relevant chunks 29 | }, 30 | human_input_mode="TERMINATE", # Allow user input until 'exit' is typed 31 | is_termination_msg=lambda x: "TERMINATE" in x.get("content", ""), 32 | ) 33 | 34 | def ask_question(question): 35 | user_proxy.initiate_chat(chatbot, message=question) 36 | 37 | # Example usage 38 | ask_question("What are the main challenges in developing robust language models?") -------------------------------------------------------------------------------- /crewai_scenarios.py: -------------------------------------------------------------------------------- 1 | def scenario1(): 2 | print("Running CrewAI scenario1") 3 | 4 | def scenario2(): 5 | print("Running CrewAI scenario2") 6 | 7 | def run_crewai_scenario(scenario_name): 8 | if scenario_name == 'scenario1': 9 | scenario1() 10 | elif scenario_name == 'scenario2': 11 | scenario2() 12 | else: 13 | raise ValueError(f"Invalid scenario: crewai, {scenario_name}") -------------------------------------------------------------------------------- /crewai_worker/blogpostgen.py: -------------------------------------------------------------------------------- 1 | import os 2 | from crewai_scenarios import Crew, Agent, Task, Process 3 | from crewai_tools import WebsiteSearchTool, CSVSearchTool, DirectoryReadTool, FileReadTool 4 | 5 | # Set up the necessary environment variables 6 | os.environ["SERPER_API_KEY"] = "your_serper_api_key" 7 | os.environ["OPENAI_API_KEY"] = "your_openai_api_key" 8 | 9 | # Create the researcher agent 10 | researcher = Agent( 11 | role="Researcher", 12 | goal="Gather information on the topic: The Future of Artificial Intelligence", 13 | backstory="A skilled researcher with a background in AI and technology.", 14 | tools=[WebsiteSearchTool(), CSVSearchTool()], 15 | memory=True, 16 | verbose=True, 17 | ) 18 | 19 | # Create the writer agent 20 | writer = Agent( 21 | role="Writer", 22 | goal="Generate a well-structured and engaging blog post based on the researcher's findings", 23 | backstory="An experienced writer with a knack for creating captivating content.", 24 | tools=[DirectoryReadTool(directory="previous_blog_posts"), FileReadTool()], 25 | memory=True, 26 | verbose=True, 27 | ) 28 | 29 | # Define the researcher's task 30 | research_task = Task( 31 | description="Gather information on the topic: The Future of Artificial Intelligence", 32 | expected_output="A collection of relevant information and key points about the future of AI.", 33 | agent=researcher, 34 | ) 35 | 36 | # Define the writer's task 37 | writing_task = Task( 38 | description="Generate a blog post based on the researcher's findings", 39 | expected_output="A well-structured and engaging blog post about the future of AI.", 40 | agent=writer, 41 | output_file="generated_blog_post.md", 42 | ) 43 | 44 | # Create the crew 45 | blog_post_crew = Crew( 46 | agents=[researcher, writer], 47 | tasks=[research_task, writing_task], 48 | process=Process.sequential, 49 | ) 50 | 51 | # Kick off the crew 52 | result = blog_post_crew.kickoff() 53 | print(result) -------------------------------------------------------------------------------- /crewai_worker/codedocumentationassistant.py: -------------------------------------------------------------------------------- 1 | import os 2 | from crewai_scenarios import Crew, Agent, Task 3 | from crewai_tools import CodeDocsSearchTool, GithubSearchTool, tool 4 | 5 | # Set up the necessary environment variables 6 | os.environ["OPENAI_API_KEY"] = "your_openai_api_key" 7 | 8 | def agent_step_callback(step: dict): 9 | print(f"Step: {step['step']}, Action: {step['action']}, Observation: {step['observation']}") 10 | 11 | @tool("Code Snippet Extractor") 12 | def extract_code_snippet(docs_url: str) -> str: 13 | # Implementation of code snippet extraction logic 14 | # This is a placeholder and should be replaced with the actual implementation 15 | return "Extracted code snippet" 16 | 17 | # Create the code documentation assistant agent 18 | code_assistant = Agent( 19 | role="Code Documentation Assistant", 20 | goal="Help developers find relevant information within code documentation", 21 | backstory="An AI-powered assistant specializing in code documentation search and retrieval.", 22 | tools=[ 23 | CodeDocsSearchTool(), 24 | GithubSearchTool(content_types=["code", "issue"]), 25 | extract_code_snippet, 26 | ], 27 | verbose=True, 28 | max_iter=5, 29 | step_callback=agent_step_callback, 30 | ) 31 | 32 | # Define the code documentation search task 33 | search_task = Task( 34 | description="Search code documentation for information on 'Python list comprehension'", 35 | expected_output="Relevant information and code examples related to Python list comprehension.", 36 | agent=code_assistant, 37 | ) 38 | 39 | # Create the crew 40 | documentation_crew = Crew( 41 | agents=[code_assistant], 42 | tasks=[search_task], 43 | ) 44 | 45 | # Kick off the crew 46 | result = documentation_crew.kickoff() 47 | print(result) -------------------------------------------------------------------------------- /crewai_worker/marketresearchanalyzer.py: -------------------------------------------------------------------------------- 1 | import os 2 | from crewai_scenarios import Crew, Agent, Task, Process 3 | from crewai_tools import CSVSearchTool, WebsiteSearchTool, tool 4 | from langchain_openai import ChatOpenAI 5 | 6 | # Set up the necessary environment variables 7 | os.environ["SERPER_API_KEY"] = "your_serper_api_key" 8 | os.environ["OPENAI_API_KEY"] = "your_openai_api_key" 9 | 10 | @tool("Market Research API") 11 | def market_research_api(query: str) -> str: 12 | # Implementation of the market research API access logic 13 | # This is a placeholder and should be replaced with the actual implementation 14 | return "Market research data" 15 | 16 | # Create the data collector agent 17 | data_collector = Agent( 18 | role="Data Collector", 19 | goal="Gather market research data on the topic: Smartphone Market Trends", 20 | backstory="An efficient data collector with expertise in market research.", 21 | tools=[CSVSearchTool(), WebsiteSearchTool(), market_research_api], 22 | memory=True, 23 | verbose=True, 24 | cache=True, 25 | ) 26 | 27 | # Create the analyst agent 28 | analyst = Agent( 29 | role="Analyst", 30 | goal="Analyze the collected market research data to identify trends, patterns, and insights", 31 | backstory="A skilled analyst with a keen eye for data interpretation.", 32 | memory=True, 33 | verbose=True, 34 | cache=True, 35 | ) 36 | 37 | # Create the report generator agent 38 | report_generator = Agent( 39 | role="Report Generator", 40 | goal="Create a comprehensive market research report based on the analyst's findings", 41 | backstory="An experienced report writer with a talent for creating compelling market research reports.", 42 | memory=True, 43 | verbose=True, 44 | cache=True, 45 | ) 46 | 47 | # Define the data collection task 48 | data_collection_task = Task( 49 | description="Gather market research data on the topic: Smartphone Market Trends", 50 | expected_output="A collection of relevant market research data on smartphone market trends.", 51 | agent=data_collector, 52 | ) 53 | 54 | # Define the data analysis task 55 | data_analysis_task = Task( 56 | description="Analyze the collected market research data to identify trends, patterns, and insights", 57 | expected_output="A summary of key trends, patterns, and insights from the market research data.", 58 | agent=analyst, 59 | ) 60 | 61 | # Define the report generation task 62 | report_generation_task = Task( 63 | description="Create a comprehensive market research report based on the analyst's findings", 64 | expected_output="A well-structured and informative market research report on smartphone market trends.", 65 | agent=report_generator, 66 | output_file="market_research_report.md", 67 | ) 68 | 69 | # Create the crew 70 | market_research_crew = Crew( 71 | agents=[data_collector, analyst, report_generator], 72 | tasks=[data_collection_task, data_analysis_task, report_generation_task], 73 | process=Process.hierarchical, 74 | manager_llm=ChatOpenAI(temperature=0, model="gpt-4"), 75 | ) 76 | 77 | # Kick off the crew 78 | result = market_research_crew.kickoff() 79 | print(result) -------------------------------------------------------------------------------- /devika_worker/blog.py: -------------------------------------------------------------------------------- 1 | from devika import Devika 2 | 3 | def automate_scenario_1(): 4 | # Initialize Devika 5 | devika = Devika(base_model='claude-3-haiku-20240307', search_engine='google') 6 | 7 | # User prompt 8 | user_prompt = "Devika, I'd like to create a personal blog website. It should have a modern design, an about me page, and the ability for me to easily write and publish blog posts. Can you build this for me?" 9 | 10 | # Create a new project 11 | project_name = "Personal Blog Website" 12 | devika.project_manager.create_project(project_name) 13 | 14 | # Add user message 15 | devika.project_manager.add_message_from_user(project_name, user_prompt) 16 | 17 | # Execute Devika 18 | devika.execute(user_prompt, project_name) 19 | 20 | # Wait for Devika to complete the task 21 | while not devika.agent_state.is_agent_completed(project_name): 22 | pass 23 | 24 | # Get the latest message from Devika 25 | latest_message = devika.project_manager.get_latest_message_from_devika(project_name) 26 | 27 | # Extract the blog URL from the latest message 28 | blog_url = extract_url_from_message(latest_message['message']) 29 | 30 | # Print the blog URL 31 | print(f"Personal blog website created successfully! URL: {blog_url}") 32 | 33 | def extract_url_from_message(message): 34 | # Extract the URL from the message using regular expressions or string manipulation 35 | # This function should be implemented based on the expected format of the URL in the message 36 | # For simplicity, let's assume the URL is the last word in the message 37 | return message.split()[-1] 38 | 39 | # Run the automation 40 | automate_scenario_1() -------------------------------------------------------------------------------- /devika_worker/datacollection.py: -------------------------------------------------------------------------------- 1 | from devika import Devika 2 | 3 | def automate_scenario_3(): 4 | # Initialize Devika 5 | devika = Devika(base_model='claude-3-haiku-20240307', search_engine='google') 6 | 7 | # User prompt 8 | user_prompt = "Devika, I need to collect data on all 500 companies in the Fortune 500 - their revenues, # of employees, headquarters location, and main industry. Can you collect this data for me and compile it into a spreadsheet?" 9 | 10 | # Create a new project 11 | project_name = "Fortune 500 Data Collection" 12 | devika.project_manager.create_project(project_name) 13 | 14 | # Add user message 15 | devika.project_manager.add_message_from_user(project_name, user_prompt) 16 | 17 | # Execute Devika 18 | devika.execute(user_prompt, project_name) 19 | 20 | # Wait for Devika to complete the task 21 | while not devika.agent_state.is_agent_completed(project_name): 22 | pass 23 | 24 | # Get the latest message from Devika 25 | latest_message = devika.project_manager.get_latest_message_from_devika(project_name) 26 | 27 | # Extract the spreadsheet URL from the latest message 28 | spreadsheet_url = extract_url_from_message(latest_message['message']) 29 | 30 | # Print the spreadsheet URL 31 | print(f"Fortune 500 data collected successfully! Spreadsheet URL: {spreadsheet_url}") 32 | 33 | def extract_url_from_message(message): 34 | # Extract the URL from the message using regular expressions or string manipulation 35 | # This function should be implemented based on the expected format of the URL in the message 36 | # For simplicity, let's assume the URL is the last word in the message 37 | return message.split()[-1] 38 | 39 | # Run the automation 40 | automate_scenario_3() -------------------------------------------------------------------------------- /devika_worker/debugpython.py: -------------------------------------------------------------------------------- 1 | from devika import Devika 2 | 3 | def automate_scenario_2(): 4 | # Initialize Devika 5 | devika = Devika(base_model='claude-3-haiku-20240307', search_engine='google') 6 | 7 | # Sample Python code with errors 8 | code_with_errors = ''' 9 | def analyze_sales_data(data): 10 | total_sales = 0 11 | for sale in data 12 | total_sales += sale['amount'] 13 | average_sales = total_sales / len(data) 14 | print("Total Sales:", total_sale) 15 | print("Average Sales:", average_sales) 16 | 17 | data = [ 18 | {'id': 1, 'amount': 1000}, 19 | {'id': 2, 'amount': 1500}, 20 | {'id': 3, 'amount': 2000}, 21 | {'id': 4, 'amount': 1200} 22 | ] 23 | 24 | analyze_sales_data(data) 25 | ''' 26 | 27 | # User prompt 28 | user_prompt = f"Hi Devika, I'm working on this Python script to analyze some sales data but I'm getting errors when I run it. Here's my code:\n\n{code_with_errors}\n\nCan you debug this and get it working?" 29 | 30 | # Create a new project 31 | project_name = "Sales Data Analysis" 32 | devika.project_manager.create_project(project_name) 33 | 34 | # Add user message 35 | devika.project_manager.add_message_from_user(project_name, user_prompt) 36 | 37 | # Execute Devika 38 | devika.execute(user_prompt, project_name) 39 | 40 | # Wait for Devika to complete the task 41 | while not devika.agent_state.is_agent_completed(project_name): 42 | pass 43 | 44 | # Get the latest message from Devika 45 | latest_message = devika.project_manager.get_latest_message_from_devika(project_name) 46 | 47 | # Extract the debugged code from the latest message 48 | debugged_code = extract_code_from_message(latest_message['message']) 49 | 50 | # Print the debugged code 51 | print("Debugged Code:") 52 | print(debugged_code) 53 | 54 | def extract_code_from_message(message): 55 | # Extract the code block from the message using string manipulation 56 | # Assuming the code block is enclosed in triple backticks (```) 57 | start_index = message.find("```python") + len("```python") 58 | end_index = message.find("```", start_index) 59 | code_block = message[start_index:end_index].strip() 60 | return code_block 61 | 62 | # Run the automation 63 | automate_scenario_2() -------------------------------------------------------------------------------- /langchain_scenarios.py: -------------------------------------------------------------------------------- 1 | def scenario1(): 2 | print("Running LangChain scenario1") 3 | 4 | def scenario2(): 5 | print("Running LangChain scenario2") 6 | 7 | def run_langchain_scenario(scenario_name): 8 | if scenario_name == 'scenario1': 9 | scenario1() 10 | elif scenario_name == 'scenario2': 11 | scenario2() 12 | else: 13 | raise ValueError(f"Invalid scenario: langchain, {scenario_name}") -------------------------------------------------------------------------------- /langchain_worker/meetingscheduler.py: -------------------------------------------------------------------------------- 1 | import os 2 | from langchain.agents import initialize_agent, Tool 3 | from langchain.chains import LLMChain 4 | from langchain.llms import OpenAI 5 | from langchain.prompts import PromptTemplate 6 | from datetime import datetime, timedelta 7 | 8 | # Set up OpenAI API key 9 | os.environ["OPENAI_API_KEY"] = "your_openai_api_key" 10 | 11 | # Dummy calendar data 12 | calendars = { 13 | "Alice": [ 14 | {"start": "2023-06-01T09:00:00", "end": "2023-06-01T10:30:00"}, 15 | {"start": "2023-06-01T14:00:00", "end": "2023-06-01T15:00:00"} 16 | ], 17 | "Bob": [ 18 | {"start": "2023-06-01T11:00:00", "end": "2023-06-01T12:30:00"}, 19 | {"start": "2023-06-01T14:30:00", "end": "2023-06-01T16:00:00"} 20 | ], 21 | "Charlie": [ 22 | {"start": "2023-06-01T10:00:00", "end": "2023-06-01T11:30:00"}, 23 | {"start": "2023-06-01T13:00:00", "end": "2023-06-01T14:30:00"} 24 | ] 25 | } 26 | 27 | # Calendar API tool 28 | def get_availability(attendees): 29 | availability = [] 30 | for attendee in attendees: 31 | if attendee in calendars: 32 | availability.append(f"{attendee}'s availability:") 33 | for event in calendars[attendee]: 34 | availability.append(f" - From: {event['start']} To: {event['end']}") 35 | else: 36 | availability.append(f"No calendar data found for {attendee}") 37 | return "\n".join(availability) 38 | 39 | # Time slot finder tool 40 | def find_time_slot(attendees, duration): 41 | duration = int(duration) 42 | time_slots = [] 43 | 44 | start_time = datetime(2023, 6, 1, 9, 0) 45 | end_time = datetime(2023, 6, 1, 17, 0) 46 | time_step = timedelta(minutes=30) 47 | 48 | while start_time + timedelta(minutes=duration) <= end_time: 49 | available = True 50 | for attendee in attendees: 51 | if attendee not in calendars: 52 | available = False 53 | break 54 | for event in calendars[attendee]: 55 | event_start = datetime.fromisoformat(event["start"]) 56 | event_end = datetime.fromisoformat(event["end"]) 57 | if start_time < event_end and start_time + timedelta(minutes=duration) > event_start: 58 | available = False 59 | break 60 | if not available: 61 | break 62 | if available: 63 | time_slots.append(f"{start_time.strftime('%Y-%m-%dT%H:%M:%S')} - {(start_time + timedelta(minutes=duration)).strftime('%Y-%m-%dT%H:%M:%S')}") 64 | start_time += time_step 65 | 66 | if time_slots: 67 | return "\n".join(time_slots) 68 | else: 69 | return "No available time slots found for the given attendees and duration." 70 | 71 | # Email tool 72 | def send_invite(attendees, time_slot): 73 | return f"Sending meeting invite to {', '.join(attendees)} for time slot {time_slot}." 74 | 75 | # Create tools list 76 | tools = [ 77 | Tool(name="Get Availability", func=get_availability, description="Useful for retrieving attendees' availability from their calendars."), 78 | Tool(name="Find Time Slot", func=find_time_slot, description="Useful for finding available time slots for the meeting based on attendees' availability and meeting duration."), 79 | Tool(name="Send Invite", func=send_invite, description="Useful for sending meeting invites to attendees.") 80 | ] 81 | 82 | # Initialize agent 83 | agent = initialize_agent(tools, OpenAI(temperature=0), agent="zero-shot-react-description", verbose=True) 84 | 85 | # Main loop 86 | while True: 87 | print("\nWelcome to the Meeting Scheduler Agent!") 88 | attendees_input = input("Please enter the list of attendees (comma-separated) or type 'exit' to quit: ") 89 | 90 | if attendees_input.lower() == "exit": 91 | print("Thank you for using the Meeting Scheduler Agent. Goodbye!") 92 | break 93 | 94 | attendees = [attendee.strip() for attendee in attendees_input.split(",")] 95 | duration = input("Please enter the meeting duration in minutes: ") 96 | 97 | result = agent.run(f"Attendees: {', '.join(attendees)}\nDuration: {duration} minutes") 98 | print(f"\nResult: {result}") -------------------------------------------------------------------------------- /langchain_worker/trivia.py: -------------------------------------------------------------------------------- 1 | import os 2 | from langchain.agents import initialize_agent, Tool 3 | from langchain.chains import LLMChain 4 | from langchain.llms import OpenAI 5 | from langchain.prompts import PromptTemplate 6 | from langchain.embeddings import OpenAIEmbeddings 7 | from langchain.vectorstores import FAISS 8 | from langchain.document_loaders import WikipediaLoader 9 | 10 | # Set up OpenAI API key 11 | os.environ["OPENAI_API_KEY"] = "your_openai_api_key" 12 | 13 | # Load Wikipedia data 14 | doc_path = "path_to_your_wikipedia_data" 15 | loader = WikipediaLoader(doc_path) 16 | docs = loader.load() 17 | 18 | # Create embeddings and vectorstore 19 | embeddings = OpenAIEmbeddings() 20 | vectorstore = FAISS.from_documents(docs, embeddings) 21 | 22 | # Wikipedia search tool 23 | def wikipedia_search(query): 24 | docs = vectorstore.similarity_search(query) 25 | return "\n".join([doc.page_content for doc in docs][:3]) 26 | 27 | # Multiple choice tool 28 | def multiple_choice(query, choices): 29 | prompt = f"""Given the following question and choices, select the most appropriate answer: 30 | 31 | Question: {query} 32 | Choices: {choices} 33 | 34 | Answer:""" 35 | chain = LLMChain(llm=OpenAI(temperature=0), prompt=PromptTemplate.from_template(prompt)) 36 | return chain.predict(query=query, choices=choices) 37 | 38 | # Create tools list 39 | tools = [ 40 | Tool(name="Wikipedia Search", func=wikipedia_search, description="Useful for searching Wikipedia for relevant information."), 41 | Tool(name="Multiple Choice", func=multiple_choice, description="Useful for selecting the most appropriate answer from given choices.") 42 | ] 43 | 44 | # Initialize agent 45 | agent = initialize_agent(tools, OpenAI(temperature=0), agent="zero-shot-react-description", verbose=True) 46 | 47 | # Main loop 48 | while True: 49 | print("\nWelcome to the Trivia Game Agent!") 50 | question = input("Please enter your trivia question or type 'exit' to quit: ") 51 | 52 | if question.lower() == "exit": 53 | print("Thank you for playing the Trivia Game Agent. Goodbye!") 54 | break 55 | 56 | choices_input = input("Please enter the answer choices separated by commas: ") 57 | choices = [choice.strip() for choice in choices_input.split(",")] 58 | 59 | result = agent.run(f"Question: {question}\nChoices: {', '.join(choices)}") 60 | print(f"\nResult: {result}") -------------------------------------------------------------------------------- /langchain_worker/writingassistant.py: -------------------------------------------------------------------------------- 1 | import os 2 | from langchain.agents import initialize_agent, Tool 3 | from langchain.chains import LLMChain 4 | from langchain.llms import OpenAI 5 | from langchain.prompts import PromptTemplate 6 | 7 | # Set up OpenAI API key 8 | os.environ["OPENAI_API_KEY"] = "your_openai_api_key" 9 | 10 | # Grammar/spelling checker tool 11 | def grammar_checker(text): 12 | prompt = f"""Please correct the grammar and spelling in the following text: 13 | 14 | Text: {text} 15 | 16 | Corrected text:""" 17 | chain = LLMChain(llm=OpenAI(temperature=0), prompt=PromptTemplate.from_template(prompt)) 18 | return chain.predict(text=text) 19 | 20 | # Summarizer tool 21 | def summarizer(text): 22 | prompt = f"""Please provide a concise summary of the following text: 23 | 24 | Text: {text} 25 | 26 | Summary:""" 27 | chain = LLMChain(llm=OpenAI(temperature=0), prompt=PromptTemplate.from_template(prompt)) 28 | return chain.predict(text=text) 29 | 30 | # Tone analyzer tool 31 | def tone_analyzer(text): 32 | prompt = f"""Analyze the tone of the following text and suggest ways to improve it: 33 | 34 | Text: {text} 35 | 36 | Tone analysis and suggestions:""" 37 | chain = LLMChain(llm=OpenAI(temperature=0), prompt=PromptTemplate.from_template(prompt)) 38 | return chain.predict(text=text) 39 | 40 | # Thesaurus tool 41 | def thesaurus(word): 42 | prompt = f"""Please provide synonyms for the word '{word}':""" 43 | chain = LLMChain(llm=OpenAI(temperature=0), prompt=PromptTemplate.from_template(prompt)) 44 | return chain.predict(word=word) 45 | 46 | # Create tools list 47 | tools = [ 48 | Tool(name="Grammar Checker", func=grammar_checker, description="Useful for correcting grammar and spelling."), 49 | Tool(name="Summarizer", func=summarizer, description="Useful for summarizing text."), 50 | Tool(name="Tone Analyzer", func=tone_analyzer, description="Useful for analyzing and improving the tone of text."), 51 | Tool(name="Thesaurus", func=thesaurus, description="Useful for finding synonyms.") 52 | ] 53 | 54 | # Initialize agent 55 | agent = initialize_agent(tools, OpenAI(temperature=0), agent="zero-shot-react-description", verbose=True) 56 | 57 | # Main loop 58 | while True: 59 | print("\nWelcome to the Writing Assistant Agent!") 60 | user_input = input("Please enter your text or type 'exit' to quit: ") 61 | 62 | if user_input.lower() == "exit": 63 | print("Thank you for using the Writing Assistant Agent. Goodbye!") 64 | break 65 | 66 | result = agent.run(user_input) 67 | print(f"\nResult: {result}") -------------------------------------------------------------------------------- /memgpt_scenarios.py: -------------------------------------------------------------------------------- 1 | def scenario1(): 2 | print("Running MemGPT scenario1") 3 | 4 | def scenario2(): 5 | print("Running MemGPT scenario2") 6 | 7 | def run_memgpt_scenario(scenario_name): 8 | if scenario_name == 'scenario1': 9 | scenario1() 10 | elif scenario_name == 'scenario2': 11 | scenario2() 12 | else: 13 | raise ValueError(f"Invalid scenario: memgpt, {scenario_name}") -------------------------------------------------------------------------------- /memgpt_worker/historicalfigure.py: -------------------------------------------------------------------------------- 1 | import os 2 | from memgpt_scenarios import create_client, configure 3 | 4 | # Configure MemGPT 5 | configure( 6 | model_endpoint_type="openai", 7 | model_endpoint="https://api.openai.com/v1", # Replace with your OpenAI endpoint 8 | openai_key=os.environ["OPENAI_API_KEY"], # Set your OpenAI API key as an environment variable 9 | storage_backend="local", 10 | db_uri="sqlite:///:memory:", # Use an in-memory SQLite database 11 | ) 12 | 13 | # Create a MemGPT client 14 | client = create_client() 15 | 16 | # Define the historical figure's name and background information file path 17 | historical_figure_name = "Albert Einstein" 18 | background_file_path = "path/to/albert_einstein_background.txt" # Replace with the path to the background file 19 | 20 | # Create a data source and load the background information into it 21 | source = client.create_source(name=f"{historical_figure_name} Background") 22 | client.load_file_into_source(filename=background_file_path, source_id=source.id) 23 | 24 | # Define the historical figure's persona 25 | persona = f""" 26 | I am {historical_figure_name}, the famous historical figure. 27 | I will engage in conversation with users, drawing upon my life experiences, views, and accomplishments. 28 | I aim to provide authentic and insightful responses based on the knowledge provided about me. 29 | Users can ask me about my life, debate ideas with me, and get my perspective on various topics. 30 | """ 31 | 32 | # Create the MemGPT agent with the historical figure's persona 33 | agent = client.create_agent(name=historical_figure_name, persona=persona) 34 | 35 | # Attach the background information data source to the agent's memory 36 | client.attach_source_to_agent(source_name=f"{historical_figure_name} Background", agent_id=agent.id) 37 | 38 | print(f"Welcome to the MemGPT Historical Figure Roleplaying Chatbot demo!") 39 | print(f"You are now chatting with {historical_figure_name}.") 40 | print(f"Ask about their life, debate ideas, and get their perspective on various topics.") 41 | print("Type 'quit' to exit.\n") 42 | 43 | while True: 44 | user_input = input("User: ") 45 | 46 | if user_input.lower() == "quit": 47 | print(f"\nThank you for chatting with {historical_figure_name}. Goodbye!") 48 | break 49 | 50 | messages = client.user_message(agent_id=agent.id, message=user_input) 51 | 52 | for msg in messages: 53 | if "assistant_message" in msg: 54 | print(f"{historical_figure_name}: {msg['assistant_message']}") 55 | elif "internal_monologue" in msg: 56 | print(f"{historical_figure_name}'s Thoughts: {msg['internal_monologue']}") -------------------------------------------------------------------------------- /memgpt_worker/personalassistant.py: -------------------------------------------------------------------------------- 1 | import os 2 | from memgpt_scenarios import create_client, configure 3 | 4 | # Configure MemGPT 5 | configure( 6 | model_endpoint_type="openai", 7 | model_endpoint="https://api.openai.com/v1", # Replace with your OpenAI endpoint 8 | openai_key=os.environ["OPENAI_API_KEY"], # Set your OpenAI API key as an environment variable 9 | storage_backend="local", 10 | db_uri="sqlite:///:memory:", # Use an in-memory SQLite database 11 | ) 12 | 13 | # Create a MemGPT client 14 | client = create_client() 15 | 16 | # Define the assistant's persona 17 | persona = """ 18 | I am MemGPT Assistant, a friendly and intelligent AI assistant created by MemGPT. 19 | My goal is to help users with a wide variety of tasks to the best of my abilities. 20 | I have broad knowledge spanning many subjects, and I'm always eager to learn more. 21 | I communicate in a warm, empathetic, and thoughtful manner. 22 | I'm here to assist with things like writing, analysis, math, coding, creative projects, and answering questions. 23 | Please let me know how I can help! 24 | """ 25 | 26 | # Create the MemGPT agent with the assistant persona 27 | agent = client.create_agent(name="MemGPT Assistant", persona=persona) 28 | 29 | print("Welcome to the MemGPT Personal Assistant demo!") 30 | print("You are now chatting with an AI assistant. Type 'quit' to exit.\n") 31 | 32 | while True: 33 | user_input = input("User: ") 34 | 35 | if user_input.lower() == "quit": 36 | print("\nThank you for chatting with the MemGPT Assistant. Goodbye!") 37 | break 38 | 39 | messages = client.user_message(agent_id=agent.id, message=user_input) 40 | 41 | for msg in messages: 42 | if "assistant_message" in msg: 43 | print(f"Assistant: {msg['assistant_message']}") 44 | elif "internal_monologue" in msg: 45 | print(f"Assistant's Thoughts: {msg['internal_monologue']}") -------------------------------------------------------------------------------- /memgpt_worker/textdiscussion.py: -------------------------------------------------------------------------------- 1 | import os 2 | from memgpt_scenarios import create_client, configure 3 | 4 | # Configure MemGPT 5 | configure( 6 | model_endpoint_type="openai", 7 | model_endpoint="https://api.openai.com/v1", # Replace with your OpenAI endpoint 8 | openai_key=os.environ["OPENAI_API_KEY"], # Set your OpenAI API key as an environment variable 9 | storage_backend="local", 10 | db_uri="sqlite:///:memory:", # Use an in-memory SQLite database 11 | ) 12 | 13 | # Create a MemGPT client 14 | client = create_client() 15 | 16 | # Define the file path for the book/article 17 | file_path = "path/to/your/book_or_article.txt" # Replace with the path to your book or article file 18 | 19 | # Create a data source and load the book/article into it 20 | source = client.create_source(name="Book/Article") 21 | client.load_file_into_source(filename=file_path, source_id=source.id) 22 | 23 | # Define the discussion partner's persona 24 | persona = """ 25 | I am an AI book/article discussion partner created by MemGPT. 26 | My role is to engage in in-depth discussions about the content of the book or article provided to me. 27 | I can answer questions, share my perspective, and relate ideas to other concepts I've learned. 28 | I aim to provide thoughtful and insightful responses to help the user better understand and analyze the material. 29 | """ 30 | 31 | # Create the MemGPT agent with the discussion partner persona 32 | agent = client.create_agent(name="MemGPT Discussion Partner", persona=persona) 33 | 34 | # Attach the book/article data source to the agent's memory 35 | client.attach_source_to_agent(source_name="Book/Article", agent_id=agent.id) 36 | 37 | print("Welcome to the MemGPT Book/Article Discussion Partner demo!") 38 | print("You are now chatting with an AI that has read the provided book/article.") 39 | print("Ask questions, share your thoughts, and engage in a discussion about the content.") 40 | print("Type 'quit' to exit.\n") 41 | 42 | while True: 43 | user_input = input("User: ") 44 | 45 | if user_input.lower() == "quit": 46 | print("\nThank you for discussing the book/article with the MemGPT Discussion Partner. Goodbye!") 47 | break 48 | 49 | messages = client.user_message(agent_id=agent.id, message=user_input) 50 | 51 | for msg in messages: 52 | if "assistant_message" in msg: 53 | print(f"Assistant: {msg['assistant_message']}") 54 | elif "internal_monologue" in msg: 55 | print(f"Assistant's Thoughts: {msg['internal_monologue']}") -------------------------------------------------------------------------------- /proxy.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import requests 4 | from dotenv import load_dotenv 5 | from flask import Flask, request, Response, stream_with_context 6 | from threading import Lock 7 | 8 | log_lock = Lock() 9 | app = Flask(__name__) 10 | LOG_FILE = 'logs.jsonl' 11 | 12 | load_dotenv() 13 | OPENAI_API_URL = os.getenv('OPENAI_ENDPOINT') 14 | OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') 15 | OPENAI_MODEL = os.getenv('OPENAI_MODEL') 16 | 17 | @app.route('/', defaults={'path': ''}, methods=['GET', 'POST', 'PUT', 'DELETE']) 18 | @app.route('/', methods=['GET', 'POST', 'PUT', 'DELETE']) 19 | def proxy(path): 20 | 21 | if path.endswith('/models'): 22 | return Response( 23 | json.dumps([{ 24 | "id": OPENAI_MODEL, 25 | "object": "model", 26 | "created": 1686935002, 27 | "owned_by": "organization-owner" 28 | }]), 29 | content_type='application/json' 30 | ) 31 | 32 | print("\n=== Incoming Request ===") 33 | print(f"Method: {request.method}") 34 | print(f"Path: {path}") 35 | print(f"Headers: {dict(request.headers)}") 36 | print(f"Raw Data: {request.get_data().decode('utf-8')}") 37 | base_path = '/' + OPENAI_API_URL.rstrip('/').split('/')[-1] 38 | 39 | # Strip out the first path component and replace with the base_path 40 | path_parts = path.split('/', 1) 41 | actual_path = path_parts[1] if len(path_parts) > 1 else '' 42 | 43 | # Remove the base_path from OPENAI_API_URL if it exists 44 | base_url = OPENAI_API_URL.rstrip('/').rsplit(base_path, 1)[0] 45 | url = f"{base_url}{base_path}/{actual_path}" 46 | print(f"Proxying request to: {url}") 47 | 48 | headers = {k: v for k, v in request.headers.items() if k != 'Host'} 49 | headers['Host'] = url.split('//')[-1].split('/')[0] 50 | headers['Authorization'] = f'Bearer {OPENAI_API_KEY}' 51 | 52 | data = request.get_data() 53 | json_data = json.loads(data.decode('utf-8')) if data else None 54 | 55 | if request.method == 'POST': 56 | json_data['model'] = OPENAI_MODEL 57 | data = json.dumps(json_data).encode('utf-8') 58 | 59 | is_stream = json_data.get('stream', False) if json_data else False 60 | 61 | print("\n=== Outgoing Request ===") 62 | print(f"URL: {url}") 63 | print(f"Headers: {headers}") 64 | print(f"Data: {data.decode('utf-8') if data else None}") 65 | 66 | try: 67 | response = requests.request( 68 | method=request.method, 69 | url=url, 70 | headers=headers, 71 | data=data, 72 | stream=is_stream, 73 | ) 74 | 75 | response.raise_for_status() 76 | 77 | if request.method == 'POST': 78 | if is_stream: 79 | def generate(): 80 | response_content = '' 81 | for line in response.iter_lines(): 82 | if line: 83 | if line.startswith(b'data: '): 84 | yield line + b'\n\n' # Added extra newline for SSE 85 | line_data = line.decode('utf-8')[6:] 86 | if line_data != '[DONE]': 87 | response_content += json.loads(line_data)['choices'][0]['delta'].get('content', '') 88 | 89 | with log_lock: 90 | with open(LOG_FILE, 'a') as log_file: 91 | log_file.write(json.dumps({ 92 | 'request': json_data, 93 | 'response': response_content 94 | }) + '\n') 95 | 96 | return Response(stream_with_context(generate()), content_type=response.headers['Content-Type']) 97 | else: 98 | response_data = response.json() 99 | complete_response = response_data['choices'][0]['message']['content'] 100 | 101 | with log_lock: 102 | with open(LOG_FILE, 'a') as log_file: 103 | log_file.write(json.dumps({ 104 | 'request': json_data, 105 | 'response': complete_response 106 | }) + '\n') 107 | 108 | return response_data 109 | else: 110 | if is_stream: 111 | return Response(stream_with_context(response.iter_content(chunk_size=None)), content_type=response.headers['Content-Type']) 112 | else: 113 | return response.json() 114 | 115 | except requests.exceptions.RequestException as e: 116 | print(f"Error proxying request: {e}") 117 | # Get the actual error details from the response if available 118 | if hasattr(e.response, 'json'): 119 | try: 120 | error_data = e.response.json() 121 | return Response(json.dumps(error_data), 122 | status=e.response.status_code, 123 | content_type='application/json') 124 | except json.JSONDecodeError: 125 | pass 126 | 127 | # Fallback error response 128 | error_message = str(e) 129 | status_code = e.response.status_code if hasattr(e, 'response') else 500 130 | return Response(json.dumps({ 131 | "error": { 132 | "message": error_message, 133 | "type": type(e).__name__, 134 | "status": status_code 135 | } 136 | }), status=status_code, content_type='application/json') 137 | 138 | if __name__ == '__main__': 139 | port = int(os.environ.get('PORT', 5001)) 140 | app.run(port=port) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | crewai[tools] 3 | langchain 4 | letta 5 | autogen-agentchat~=0.2 6 | chromadb 7 | openai -------------------------------------------------------------------------------- /worker.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | from autogen_scenarios import run_autogen_scenario 4 | from memgpt_scenarios import run_memgpt_scenario 5 | from langchain_scenarios import run_langchain_scenario 6 | from crewai_scenarios import run_crewai_scenario 7 | 8 | logging.basicConfig(level=logging.INFO) 9 | logger = logging.getLogger(__name__) 10 | 11 | def main(): 12 | try: 13 | parser = argparse.ArgumentParser(description='Worker Node') 14 | parser.add_argument('framework', choices=['autogen', 'memgpt', 'langchain', 'crewai'], help='The framework to use') 15 | parser.add_argument('scenario', help='The scenario to run') 16 | args = parser.parse_args() 17 | 18 | scenario_runner = { 19 | 'autogen': run_autogen_scenario, 20 | 'memgpt': run_memgpt_scenario, 21 | 'langchain': run_langchain_scenario, 22 | 'crewai': run_crewai_scenario 23 | } 24 | scenario_runner[args.framework](args.scenario) 25 | except KeyError: 26 | logger.error(f"Invalid framework: {args.framework}") 27 | raise 28 | except Exception as e: 29 | logger.error(f"Error running scenario: {e}") 30 | raise 31 | 32 | if __name__ == '__main__': 33 | main() --------------------------------------------------------------------------------