├── .gitignore ├── 01_how_to_langgraph_example_01.py ├── 01_how_to_use_crew_ai_for_image_understanding.py ├── 01_how_to_use_crewai_to_solve_math.py ├── 02_how_to_create_stories_for_kids.py ├── 03_how_to_persist_shared_state_lg.py ├── 05_agent_of_agents.py ├── 08_how_to_do_memory_in_crew_01.py ├── 09_triaging_crew_ai_01.py ├── 11.twenty_question_game.py ├── LICENSE ├── README.md ├── chkpt_client ├── __init__.py └── sqllite_client.py ├── crew_ai_crews ├── __init__.py ├── agents.py ├── master_chef.py ├── tasks.py └── tools.py ├── grandma_story ├── __init__.py ├── grandma_agents.py ├── grandma_stories.py └── story_tasks.py ├── requirements.txt └── society_of_minds ├── __init__.py └── mind_creator.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | .idea/ 161 | checkpoint 162 | logs 163 | chkpt.db 164 | -------------------------------------------------------------------------------- /01_how_to_langgraph_example_01.py: -------------------------------------------------------------------------------- 1 | # Author: Rajib Deb 2 | # A simple example showing how langgraph works 3 | import operator 4 | from typing import TypedDict, Annotated, Sequence 5 | 6 | from langchain_core.messages import BaseMessage, HumanMessage 7 | from langgraph.graph import StateGraph 8 | 9 | 10 | def call_bbc_agent(state): 11 | messages = state['messages'] 12 | print("bbc ", messages) 13 | # I have hard coded the below. But I can very well call a open ai model to get the response 14 | response = "Here is the India news from BBC" 15 | # We return a list, because this will get added to the existing list 16 | return {"messages": [response]} 17 | 18 | 19 | def call_cnn_agent(state): 20 | messages = state['messages'] 21 | print("cnn ", messages) 22 | # I have hard coded the below. But I can very well call a bedrock model to get the response 23 | response = "Here is the India news from CNN" 24 | # We return a list, because this will get added to the existing list 25 | return {"messages": [response]} 26 | 27 | 28 | def call_ndtv_agent(state): 29 | messages = state['messages'] 30 | print("ndtv ", messages) 31 | # I have hard coded the below. But I can very well call a gemini model to get the response 32 | response = "Here is the India news from NDTV" 33 | # We return a list, because this will get added to the existing list 34 | return {"messages": [response]} 35 | 36 | 37 | def call_fox_agent(state): 38 | messages = state['messages'] 39 | print("fox ", messages) 40 | # I have hard coded the below. But I can very well call a LlaMa model to get the response 41 | response = "Here is the India news from FOX" 42 | # We return a list, because this will get added to the existing list 43 | return {"messages": [response]} 44 | 45 | 46 | class AgentState(TypedDict): 47 | messages: Annotated[Sequence[BaseMessage], operator.add] 48 | 49 | 50 | workflow = StateGraph(AgentState) 51 | 52 | # Define the two nodes we will cycle between 53 | workflow.add_node("bbc_agent", call_bbc_agent) 54 | workflow.add_node("cnn_agent", call_cnn_agent) 55 | workflow.add_node("ndtv_agent", call_ndtv_agent) 56 | workflow.add_node("fox_agent", call_fox_agent) 57 | 58 | workflow.set_entry_point("bbc_agent") 59 | # bbc_agent->cnn_agent 60 | workflow.add_edge('bbc_agent', 'cnn_agent') 61 | # bbc_agent->cnn_agent->ndtv_agent 62 | workflow.add_edge('cnn_agent', 'ndtv_agent') 63 | # bbc_agent->cnn_agent->ndtv_agent->fox_agent 64 | workflow.add_edge('ndtv_agent', 'fox_agent') 65 | workflow.set_finish_point("fox_agent") 66 | 67 | app = workflow.compile() 68 | 69 | inputs = {"messages": [HumanMessage(content="What is India news")]} 70 | response = app.invoke(inputs) 71 | messages = response["messages"] 72 | for message in messages: 73 | if isinstance(message, HumanMessage): 74 | print("Question :", message.content) 75 | else: 76 | print(message) 77 | -------------------------------------------------------------------------------- /01_how_to_use_crew_ai_for_image_understanding.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Rajib Deb 3 | Date: 02/10/2024 4 | Description: This is the driver program that starts the MasterChef crew. 5 | """ 6 | from crew_ai_crews.master_chef import MasterChefCrew 7 | 8 | if __name__ == "__main__": 9 | # url="https://rumkisgoldenspoon.com/wp-content/uploads/2022/02/Aar-macher-jhol.jpg" 10 | url="https://m.media-amazon.com/images/I/51dFvTRE3iL.__AC_SX300_SY300_QL70_FMwebp_.jpg" 11 | crew = MasterChefCrew(url=url) 12 | print(crew.kickoff()) -------------------------------------------------------------------------------- /01_how_to_use_crewai_to_solve_math.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from crewai import Agent, Task, Crew 4 | from dotenv import load_dotenv 5 | from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper 6 | from langchain_community.tools.wolfram_alpha import WolframAlphaQueryRun 7 | 8 | load_dotenv() 9 | OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') 10 | WOLFRAM_ALPHA_APPID = os.environ.get('WOLFRAM_ALPHA_APPID') 11 | 12 | math_tool = WolframAlphaQueryRun(api_wrapper =WolframAlphaAPIWrapper()) 13 | # search_tool = DuckDuckGoSearchRun() 14 | math_student = Agent( 15 | role = "Math Student at Georgia Tech University", 16 | goal = "You are an expert in solving mathematics problems related to differential and integral calculus", 17 | backstory = """You are one of the best students in the Maths department of the university. You can solve 18 | complex mathematics problem related to differential and integral calculus. 19 | """, 20 | verbose=True, 21 | allow_delegation=False, 22 | tools=[math_tool] 23 | 24 | ) 25 | 26 | math_professor = Agent( 27 | role = "Math professor at Georgia Tech University", 28 | goal = "You correct and provide the final right answers of math problems submitted by your students", 29 | backstory = """You are a professor at the university and have a PH.D in Mathematics. You are expert in grading and correcting 30 | solutions to math problems related to Calculus. 31 | """, 32 | verbose=True, 33 | allow_delegation=True 34 | 35 | ) 36 | 37 | solving_task = Task(description="""Approximate the integral of function 2 + square(x) using five equal intervals between -1 and +1. 38 | Please provide detailed steps of the solution. 39 | """,agent=math_student) 40 | 41 | grading_task = Task(description="""For the solution provided, review the solution and correct if required. Please provide detailed steps of the solution. 42 | """,agent=math_professor) 43 | 44 | crew = Crew( 45 | agents=[math_student, math_professor], 46 | tasks=[solving_task, grading_task], 47 | verbose=2, # You can set it to 1 or 2 to different logging levels 48 | ) 49 | 50 | # Get your crew to work! 51 | result = crew.kickoff() 52 | 53 | print("######################") 54 | print(result) -------------------------------------------------------------------------------- /02_how_to_create_stories_for_kids.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Rajib Deb 3 | Date: 02/10/2024 4 | Description: This is the driver program that starts the MasterChef crew. 5 | """ 6 | # from grandma_story.grandma_stories import GrandmaCrew 7 | # 8 | # if __name__ == "__main__": 9 | # idea = "Unity is strength" 10 | # crew = GrandmaCrew(idea=idea) 11 | # result = crew.kickoff() 12 | # print(result) 13 | 14 | # Author: Rajib Deb 15 | # A simple example showing how langgraph works 16 | 17 | from langgraph.graph import MessageGraph 18 | from openai import OpenAI 19 | 20 | from grandma_story.grandma_stories import GrandmaCrew 21 | 22 | 23 | def craft_story(state): 24 | messages = state[-1] 25 | crew = GrandmaCrew(idea=messages) 26 | response = crew.kickoff() 27 | return response 28 | 29 | 30 | def convert_to_speech(state): 31 | client = OpenAI() 32 | messages = state[-1] 33 | 34 | with client.audio.speech.with_streaming_response.create( 35 | model="tts-1", 36 | voice="alloy", 37 | input=messages, 38 | ) as response: 39 | response.stream_to_file("story.mp3") 40 | 41 | return response 42 | 43 | 44 | workflow = MessageGraph() 45 | 46 | # Define the nodes we will cycle between 47 | workflow.add_node("craft_story", craft_story) 48 | workflow.add_node("convert_to_speech", convert_to_speech) 49 | workflow.set_entry_point("craft_story") 50 | workflow.add_edge("craft_story", "convert_to_speech") 51 | workflow.set_finish_point("convert_to_speech") 52 | 53 | app = workflow.compile() 54 | 55 | story_idea = "Unity is strength" 56 | response = app.invoke(story_idea) 57 | print("-----") 58 | print(response[-1]) 59 | -------------------------------------------------------------------------------- /03_how_to_persist_shared_state_lg.py: -------------------------------------------------------------------------------- 1 | # Author: Rajib Deb 2 | # A simple example showing how langgraph persists state 3 | import operator 4 | import os 5 | import pickle 6 | from typing import TypedDict, Annotated, Sequence 7 | 8 | from dotenv import load_dotenv 9 | from langchain_core.messages import BaseMessage, HumanMessage 10 | from langchain_core.runnables import RunnableConfig, ConfigurableFieldSpec 11 | from langchain_openai import ChatOpenAI 12 | from langgraph.checkpoint import BaseCheckpointSaver, Checkpoint 13 | from langgraph.graph import MessageGraph 14 | 15 | from chkpt_client.sqllite_client import SQLLiteClient 16 | 17 | load_dotenv() 18 | OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') 19 | 20 | 21 | class DBCheckPointer(BaseCheckpointSaver): 22 | class Config: 23 | arbitrary_types_allowed = True 24 | 25 | client = SQLLiteClient() 26 | 27 | @property 28 | def config_specs(self) -> list[ConfigurableFieldSpec]: 29 | return [ 30 | ConfigurableFieldSpec( 31 | id="session_id", 32 | annotation=str, 33 | name="Session ID", 34 | description=None, 35 | default="", 36 | is_shared=True, 37 | ), 38 | ] 39 | 40 | def get(self, config: RunnableConfig) -> Checkpoint: 41 | checkpoint = self.client.select_chkpt(config["configurable"]["session_id"]) 42 | return checkpoint 43 | 44 | def put(self, config: RunnableConfig, checkpoint: Checkpoint) -> None: 45 | record = (config["configurable"]["session_id"], 46 | pickle.dumps(checkpoint),) 47 | try: 48 | self.client.insert_chkpt(record=record) 49 | except Exception as e: 50 | print(e) 51 | 52 | 53 | model = ChatOpenAI(temperature=0, streaming=False) 54 | 55 | 56 | def personal_assistant(messages): 57 | response = model.invoke(messages) 58 | # We return a list, because this will get added to the existing list 59 | return response 60 | 61 | 62 | class AgentState(TypedDict): 63 | messages: Annotated[Sequence[BaseMessage], operator.add] 64 | 65 | 66 | workflow = MessageGraph() 67 | workflow.add_node("assistant", personal_assistant) 68 | 69 | workflow.set_entry_point("assistant") 70 | workflow.set_finish_point("assistant") 71 | 72 | checkpoint = DBCheckPointer() 73 | app = workflow.compile(checkpointer=checkpoint) 74 | 75 | while True: 76 | content = input("Ask me a question \n") 77 | if content == "exit": 78 | exit(0) 79 | human_message = [HumanMessage(content=content)] 80 | response = app.invoke(human_message, {"configurable": {"session_id": "2"}}) 81 | print(response[-1].content) 82 | -------------------------------------------------------------------------------- /05_agent_of_agents.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from crewai import Crew, Process 4 | from dotenv import load_dotenv 5 | 6 | from society_of_minds.mind_creator import TheMind 7 | 8 | load_dotenv() 9 | OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') 10 | 11 | # What task do you want to give to the mind 12 | task = "Create powerpoint explain the impact of technology on climate." 13 | print("Mind has got the task to perform: {task}".format(task = task)) 14 | 15 | # Instantiate the mind 16 | the_mind = TheMind() 17 | # Mind now creates the tiny agents which are mindless 18 | crew_agents, crew_tasks = the_mind.create_the_society(task) 19 | 20 | print("Society of mind now working on the task...") 21 | crew = Crew( 22 | agents=crew_agents, 23 | tasks=crew_tasks, 24 | verbose=False, 25 | process=Process.sequential # Optional: Sequential task execution is default 26 | ) 27 | 28 | # Society of minds at work 29 | result = crew.kickoff() 30 | print(result) 31 | -------------------------------------------------------------------------------- /08_how_to_do_memory_in_crew_01.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from crewai import Crew, Agent, Task, Process 4 | from dotenv import load_dotenv 5 | 6 | load_dotenv() 7 | OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') 8 | 9 | father_agent = Agent( 10 | role='Father', 11 | goal='You are the father of a kid. The kid may ask you any question.' 12 | 'Your goal is to provide a satisfactory answer to the kid.', 13 | verbose=True, 14 | memory=True, 15 | backstory=( 16 | "You are a 40 year old male. You live in the city of San Jose with your wife and kid who is 10 years old." 17 | ), 18 | tools=[], 19 | allow_delegation=True 20 | ) 21 | 22 | father_task = Task( 23 | description=( 24 | "Your task is to answer the {question} of your kid in a satisfactory " 25 | "and legible way so that it makes sense to your kid. " 26 | ), 27 | expected_output='Answer to your kid question', 28 | tools=[], 29 | # human_input = True, 30 | agent=father_agent 31 | ) 32 | 33 | parent_crew = Crew( 34 | agents=[father_agent], 35 | tasks=[father_task], 36 | process=Process.sequential, 37 | memory=True, 38 | verbose=True, 39 | embedder={ 40 | "provider": "openai", 41 | "config": { 42 | "model": 'text-embedding-3-small' 43 | } 44 | } 45 | ) 46 | 47 | while True: 48 | question = input("Kid: \n") 49 | answer = parent_crew.kickoff({"question": question}) 50 | 51 | print("***********************") 52 | print(answer) 53 | -------------------------------------------------------------------------------- /09_triaging_crew_ai_01.py: -------------------------------------------------------------------------------- 1 | # imports 2 | import os 3 | from uuid import uuid4 4 | 5 | from crewai import Agent, Task, Crew, Process 6 | from dotenv import load_dotenv 7 | from langchain.tools import WikipediaQueryRun 8 | from langchain_community.utilities import WikipediaAPIWrapper 9 | from langchain_core.tracers.context import tracing_v2_enabled 10 | from langchain_groq import ChatGroq 11 | from crewai_tools import SerperDevTool 12 | 13 | load_dotenv() 14 | OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') 15 | 16 | unique_id = uuid4().hex[0:8] 17 | os.environ["LANGCHAIN_TRACING_V2"] = "true" 18 | os.environ["LANGCHAIN_PROJECT"] = f"Crew AI - {unique_id}" 19 | os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com" 20 | os.environ["LANGCHAIN_API_KEY"] = os.environ.get('LANGSMITH_API_KEY') # Update to your API key 21 | 22 | 23 | # Keys 24 | os.environ["SERPER_API_KEY"] = os.environ.get('serapi_key') 25 | os.environ["GROQ_API_KEY"] = os.environ.get('groq_key') 26 | # Tools 27 | wikipedia=WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper()) 28 | serper = SerperDevTool() 29 | # Groq 30 | 31 | Groq = ChatGroq( 32 | temperature=0.7, 33 | model_name='llama3-70b-8192' 34 | ) 35 | 36 | # agents1 37 | 38 | 39 | 40 | jornalista = Agent( 41 | role='Historiador e jornalista Esportivo pesquisador sobre futebol Brasileiro', 42 | backstory='Você possue larga experiência em pesquisas sobre carreiras de jogadores do futebol brasileiro, possue vários livros escritos.', 43 | goal='Realiza pesquisas sobre o histórico da carreira dos jogadores de futebol do Brasil', 44 | llm=Groq, 45 | verbose=True 46 | ) 47 | 48 | # angents3 49 | editor = Agent( 50 | role='Editor e Crítico Esportivo', 51 | backstory='Você constrói críticas jornalísticas e editoriais', 52 | goal='Escrever matérias jornalísticas e críticas detalhada', 53 | llm=Groq, 54 | verbose=True 55 | 56 | ) 57 | 58 | # agenst4 59 | revisor = Agent( 60 | role='Revisor de textos', 61 | backstory='Você é um jornalista revisor de textos', 62 | goal='Revisa texto final para publicação', 63 | llm=Groq, 64 | verbose=True 65 | 66 | ) 67 | 68 | # agenst5 69 | tradutor = Agent( 70 | role='tradutor de textos', 71 | backstory='Você é um tradutor de textos para o português', 72 | goal='Traduzir o texto do revisor para a publicação', 73 | llm=Groq, 74 | verbose=True 75 | 76 | ) 77 | 78 | # task1 79 | carreira = Task( 80 | description='Pesquisa na página da Wikipedia em pt-BR sobre o histórico sobre a carreira, conquistas e polêmicas do jogador Gabriel Barbosa, o Gabigol.', 81 | expected_output='Através de uma pesquisa na página da Wikipedia em pt-BR, vai extrair uma descrição dos principais fatos do histórico da carreira do jogador Gabriel Barbosa, o Gabigol. Ênfase especial nas conquistas e polêmicas na carreira', 82 | agent=jornalista, 83 | tools=[wikipedia] 84 | ) 85 | 86 | # task3 87 | contexto_fraude_antidoping=Task( 88 | description='Pesquisar na internet nas fontes de notícias no Brasil e em português sobre a tentativa de fraude do antidoping do jogador Gabriel Barbosa, o Gabigol em 2024', 89 | expected_output='Resultado das notícias do contexto da tentativa de fraude ao antidoping do Gabigol e as Últimas notícias sobre o caso.', 90 | agent=jornalista, 91 | tools=[serper] 92 | ) 93 | 94 | # task4 95 | crítica=Task( 96 | description='Escrever uma matéria completa e uma crítica detalhada sobre o caso de fraude do antidoping do jogador Gabriel Barbosa, o Gabigol, em 2024', 97 | expected_output='Levantamento de questões sobre o caso e a criação de uma matéria e crítica detalhada sobre o caso da tentativa fraude do antidoping do jogador Gabriel Barbosa, o Gabigol', 98 | agent=editor, 99 | context=[carreira, contexto_fraude_antidoping] 100 | ) 101 | 102 | # task5 103 | revisão=Task( 104 | description='Fazer a revisão final do texto do crítico para publicação', 105 | expected_output='Entrega do texto final na íntegra revisado para publicação', 106 | agent=revisor, 107 | context=[carreira,contexto_fraude_antidoping,crítica] 108 | ) 109 | 110 | tradução=Task( 111 | description='Fazer a tradução do texto final para o português', 112 | expected_output='Receber o texto final na íntegra revisado em pt-br', 113 | agent=tradutor 114 | ) 115 | 116 | # Assemble a crew 117 | crew = Crew( 118 | 119 | agents=[jornalista, editor, revisor, tradutor], 120 | tasks=[carreira, contexto_fraude_antidoping, crítica, revisão, tradução], 121 | full_output=True, 122 | process=Process.sequential, 123 | verbose=2, 124 | output_log_file=True 125 | 126 | ) 127 | crew.kickoff() 128 | print(f""" 129 | Task completed! 130 | Task: {revisão.output.description} 131 | Output: {tradução.output.raw_output} 132 | """) 133 | with tracing_v2_enabled(project_name=f"Crew AI - {unique_id}"): 134 | result = crew.kickoff() 135 | 136 | print(result) 137 | -------------------------------------------------------------------------------- /11.twenty_question_game.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from crewai import Agent, Task, Crew 4 | from dotenv import load_dotenv 5 | from langchain_community.tools.ddg_search import DuckDuckGoSearchRun 6 | 7 | load_dotenv() 8 | OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') 9 | 10 | player1 = Agent( 11 | role="Martin", 12 | goal="You will guess the name of a personality", 13 | backstory="""Your name is Martin. You are playing the 20 question game with your friend Ravi and Shamik. In this game 14 | you will think of a famous personality and share with Shamik. Ravi needs to guess the name, NEVER share the personality 15 | name with Ravi. 16 | """, 17 | verbose=True, 18 | allow_delegation=False, 19 | memory = True 20 | ) 21 | 22 | player2 = Agent( 23 | role="Shamik", 24 | goal="Answer only YES or NO to questions about about the personality", 25 | backstory="""Your name is Shamik. You are playing the 20 question game with your friend Martin and Ravi. Martin will share the name 26 | of the personality with you. Ravi will ask 20 questions to you one by one about the personality that Martin shared with you. You need to answer YES or NO to the questions. 27 | Through these 20 questions, Ravi will try to guess the name of the personality. DO NOT share the name of the 28 | personality with Ravi. 29 | """, 30 | verbose=True, 31 | allow_delegation=True, 32 | memory = True 33 | ) 34 | 35 | player3 = Agent( 36 | role="Ravi", 37 | goal="Ask 20 questions about the person and guess the name of the person", 38 | backstory="""Your name is Ravi. You are playing the 20 question game with your friend Shamik and Martin. 39 | In this game you will ask 20 closed questions about that personality which 40 | can be answered 'YES' or 'NO' . Only Shamik will answer YES or NO for those questions. 41 | Through these 20 questions, you should be able to identify the name of the personality. Ask the question one by one and wait 42 | for Shamik to answer before you ask next question. After all the questions are answered, you must try to 43 | guess the name of the persoanlity. 44 | """, 45 | verbose=True, 46 | allow_delegation=True, 47 | memory = True 48 | ) 49 | 50 | identification_task= Task(description="""You will identify the name of a personality 51 | """, expected_output="name of a personality",agent=player1) 52 | 53 | question_task = Task(description="""You will answer YES or NO when a question is asked about the personality 54 | """, expected_output="answer about the personality",agent=player2) 55 | 56 | answer_task = Task(description="""you will ask the question about the personality and guess the name of the personality 57 | through 20 questions only. 58 | """, expected_output="name of the personality",agent=player3) 59 | 60 | crew = Crew( 61 | agents=[player1, player2,player3], 62 | tasks=[identification_task,question_task, answer_task], 63 | verbose=2, # You can set it to 1 or 2 to different logging levels 64 | ) 65 | 66 | # Get your crew to work! 67 | result = crew.kickoff() 68 | 69 | print("######################") 70 | print(result) 71 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 rajib 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # multi_agent 2 | This repo will contain examples from autogen, crewai and anythin related to multi agent conversation 3 | -------------------------------------------------------------------------------- /chkpt_client/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rajib76/multi_agent/f1dd4164582c0ea6830adc8e0b6bec01816135fd/chkpt_client/__init__.py -------------------------------------------------------------------------------- /chkpt_client/sqllite_client.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import sqlite3 3 | from sqlite3 import Error 4 | 5 | create_table_sql = f""" 6 | CREATE TABLE IF NOT EXISTS CHKPT ( 7 | session_id TEXT PRIMARY KEY, 8 | chkpt BLOB 9 | ); 10 | """ 11 | 12 | 13 | class SQLLiteClient(): 14 | def __init__(self, db_file="/Users/joyeed/multi-agent/multi_agent/chkpt.db"): 15 | self.module = __name__ 16 | self.db_file = db_file 17 | 18 | def connect_to_db(self): 19 | conn = None 20 | try: 21 | conn = sqlite3.connect(self.db_file) 22 | except Error as e: 23 | print(e) 24 | 25 | return conn 26 | 27 | def create_chkpt_table(self, create_table_sql): 28 | conn = self.connect_to_db() 29 | try: 30 | c = conn.cursor() 31 | c.execute(create_table_sql) 32 | except Error as e: 33 | print(e) 34 | 35 | conn.close() 36 | 37 | def insert_chkpt(self, record): 38 | self.create_chkpt_table(create_table_sql=create_table_sql) 39 | conn = self.connect_to_db() 40 | 41 | sql = '''INSERT OR REPLACE INTO CHKPT(session_id,chkpt) VALUES (?,?)''' 42 | cur = conn.cursor() 43 | cur.execute(sql, record) 44 | conn.commit() 45 | conn.close() 46 | 47 | return cur.lastrowid 48 | 49 | def select_chkpt(self, session_id): 50 | conn = self.connect_to_db() 51 | cur = conn.cursor() 52 | try: 53 | cur.execute( 54 | "SELECT chkpt FROM CHKPT WHERE session_id = ?", 55 | (session_id,), 56 | ) 57 | if value := cur.fetchone(): 58 | return pickle.loads(value[0]) 59 | except Exception as e: 60 | print(e) 61 | 62 | 63 | if __name__ == "__main__": 64 | client = SQLLiteClient() 65 | record = ("1", "raj", "{}") 66 | client.insert_chkpt(record=record) 67 | record = client.select_chkpt() 68 | print(record) 69 | -------------------------------------------------------------------------------- /crew_ai_crews/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rajib76/multi_agent/f1dd4164582c0ea6830adc8e0b6bec01816135fd/crew_ai_crews/__init__.py -------------------------------------------------------------------------------- /crew_ai_crews/agents.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Rajib Deb 3 | Date: 02/01/2024 4 | Description: This module wraps all the agents that we will use for our use case 5 | """ 6 | from textwrap import dedent 7 | 8 | from crewai import Agent 9 | 10 | from crew_ai_crews.tools import ExtractIngredients 11 | 12 | 13 | class MasterChef(): 14 | """ 15 | This class wraps all the agents that we will use for our use case 16 | """ 17 | def __init__(self,url): 18 | self.module = __name__ 19 | self.url = url 20 | 21 | def get_recipe_from_image(self): 22 | """ 23 | This creates the agent which can extract ingredients used to prepare a food item. This image will 24 | use a tool to extract the information from the image. The tool uses gpt-4-v-preview model 25 | :return: Returns the agent object 26 | """ 27 | agent = Agent( 28 | role='Food Connoisseur', 29 | goal='Extract ingredients of a food item looking at the image.', 30 | backstory=dedent("""As a food connoisseur, you have extensive experience is exploring delicious food items. 31 | You are adept at identifying the food ingredients by looking at the provided url of the image of food items. 32 | 33 | {url} 34 | """.format(url=self.url) 35 | ), 36 | tools=[ExtractIngredients.extract_ingredient], 37 | verbose=True, 38 | allow_delegation=False) 39 | 40 | return agent 41 | 42 | def instruct_to_cook_dish(self): 43 | """ 44 | This creates the agent which can instruct how to cook the food item based on the ingredients extracted. 45 | This uses its parametric knowledge to prepare the dish. 46 | :return: Returns the agent object 47 | """ 48 | agent = Agent( 49 | role='Master Chef', 50 | goal='Instruct how to cook a dish based on the provided ingredients of the food', 51 | backstory=dedent("""As a famous chef, you have extensive experience is making delicious food items. 52 | You are adept at providing instructions on how to cook a healthy dish with the ingredients provided.""" 53 | ), 54 | verbose=True, 55 | allow_delegation=False) 56 | 57 | return agent 58 | -------------------------------------------------------------------------------- /crew_ai_crews/master_chef.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Rajib Deb 3 | Date: 02/10/2024 4 | Description: This module creates the crew objects with all the required agents and tasks 5 | """ 6 | import os 7 | 8 | from crewai import Crew 9 | from dotenv import load_dotenv 10 | 11 | from crew_ai_crews.agents import MasterChef 12 | from crew_ai_crews.tasks import ExtractIngredientsFromImage 13 | 14 | load_dotenv() 15 | OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') 16 | 17 | 18 | class MasterChefCrew(): 19 | """ 20 | The master chef crew consisting of the food connoisseur and the chef 21 | """ 22 | def __init__(self, url): 23 | agents = MasterChef(url=url) 24 | self.extract_agent = agents.get_recipe_from_image() 25 | self.instruct_agent = agents.instruct_to_cook_dish() 26 | 27 | def kickoff(self): 28 | """ 29 | Creating the crew with the two agents 30 | :return: 31 | """ 32 | tasks = ExtractIngredientsFromImage() 33 | crew = Crew( 34 | agents=[self.extract_agent, self.instruct_agent], 35 | tasks=[tasks.get_ingredients(self.extract_agent), tasks.cooking_instruction(self.instruct_agent)], 36 | verbose=True 37 | ) 38 | result = crew.kickoff() 39 | return result 40 | -------------------------------------------------------------------------------- /crew_ai_crews/tasks.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Rajib Deb 3 | Descriptions: Defines all the tasks that will be used by the agents 4 | """ 5 | from crewai import Task 6 | from textwrap import dedent 7 | 8 | 9 | class ExtractIngredientsFromImage: 10 | def get_ingredients(self, agent): 11 | """ 12 | This task is for the food connoisseur agent which will analyze the image of the food item 13 | and extract ingredients out of it. 14 | :param agent: The food connoisseur agent 15 | :return: Returns the task object 16 | """ 17 | task = Task( 18 | description=dedent(f""" 19 | Analyze an image of a food item and extract ingredients of the food 20 | """), agent=agent 21 | ) 22 | 23 | return task 24 | 25 | def cooking_instruction(self, agent): 26 | """ 27 | The task to be used by the chef 28 | :param agent: The master chef agent 29 | :return: Returns the task of the chef 30 | """ 31 | task = Task( 32 | description=dedent(f""" 33 | Instruct how to cook a healthy food item based on the provided ingredients 34 | """), agent=agent 35 | ) 36 | 37 | return task 38 | -------------------------------------------------------------------------------- /crew_ai_crews/tools.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Rajib Deb 3 | Date: 02/10/2024 4 | Description: Defines the tool to be used to understand the image of the food item 5 | """ 6 | import json 7 | 8 | from langchain.tools import tool 9 | from openai import OpenAI 10 | 11 | 12 | class ExtractIngredients(): 13 | @tool("Extract ingredients") 14 | def extract_ingredient(url: str): 15 | """ 16 | Useful to extract ingredients from a food item image 17 | :param url:The image url 18 | :return:returns the response after extracting information from the image 19 | """ 20 | print("url is ", url) 21 | # Sometimes the REACT agent extracts the url as {'url':} and sometimes ar 22 | # below check is to handle both situations 23 | try: 24 | image_url_json=json.loads(url) 25 | print("url keys are ", image_url_json.keys()) 26 | image_url = image_url_json["url"] 27 | except ValueError as e: 28 | image_url =url 29 | client = OpenAI() 30 | response = client.chat.completions.create( 31 | model="gpt-4-vision-preview", 32 | messages=[ 33 | { 34 | "role": "user", 35 | "content": [ 36 | {"type": "text", "text": "Extract ingredients from the food item image"}, 37 | { 38 | "type": "image_url", 39 | "image_url": { 40 | "url":image_url, 41 | }, 42 | }, 43 | ], 44 | } 45 | ], 46 | max_tokens=300, 47 | ) 48 | 49 | return response.choices[0] 50 | -------------------------------------------------------------------------------- /grandma_story/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rajib76/multi_agent/f1dd4164582c0ea6830adc8e0b6bec01816135fd/grandma_story/__init__.py -------------------------------------------------------------------------------- /grandma_story/grandma_agents.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Rajib Deb 3 | Date: 02/25/2024 4 | Description: This module wraps all the agents that we will use for our use case 5 | """ 6 | from textwrap import dedent 7 | 8 | from crewai import Agent 9 | 10 | 11 | class GrandmaAgents(): 12 | """ 13 | This class wraps all the agents that we will use for our use case 14 | """ 15 | def __init__(self,idea): 16 | self.module = __name__ 17 | self.idea=idea 18 | 19 | def craft_the_story(self): 20 | """ 21 | This is the grandma agent who can craft a story based on an idea 22 | :return: Returns the agent object 23 | """ 24 | agent = Agent( 25 | role='Grandmother', 26 | goal='Create short stories for kids to teach them good values', 27 | backstory=dedent("""As a grandmother, your role is to create short stories 28 | suitable for kids, emphasizing good values and humanity based on initial ideas provided by the user. 29 | As a creative collaborator,Grandma's Stories takes the initial and elaborates on it with minimal guidance, crafting stories that 30 | are engaging, educational, and full of imagination. The stories will convey morals such as kindness, honesty, 31 | bravery, and empathy in a manner that's accessible and entertaining to children. You should avoid complex 32 | language and inappropriate themes, ensuring content is inclusive and respectful. The interaction style with 33 | users is creative and autonomous, taking the seed of an idea and developing it into a full narrative that 34 | captivates young readers and teaches important life lessons. 35 | 36 | {idea} 37 | """.format(idea=self.idea) 38 | ), 39 | verbose=False, 40 | allow_delegation=False) 41 | 42 | return agent 43 | -------------------------------------------------------------------------------- /grandma_story/grandma_stories.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Rajib Deb 3 | Date: 02/10/2024 4 | Description: This module creates the crew objects with all the required agents and tasks 5 | """ 6 | import os 7 | 8 | from crewai import Crew 9 | from dotenv import load_dotenv 10 | 11 | from crew_ai_crews.agents import MasterChef 12 | from crew_ai_crews.tasks import ExtractIngredientsFromImage 13 | from grandma_story.grandma_agents import GrandmaAgents 14 | from grandma_story.story_tasks import CreateStories 15 | 16 | load_dotenv() 17 | OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') 18 | 19 | 20 | class GrandmaCrew(): 21 | """ 22 | The crew which crafts the story. It is a crew of a single grandma agent 23 | """ 24 | def __init__(self, idea): 25 | agents = GrandmaAgents(idea=idea) 26 | self.craft_story = agents.craft_the_story() 27 | 28 | def kickoff(self): 29 | """ 30 | Creating the crew with grandma 31 | :return: 32 | """ 33 | tasks = CreateStories() 34 | crew = Crew( 35 | agents=[self.craft_story], 36 | tasks=[tasks.craft_story(self.craft_story)], 37 | verbose=True 38 | ) 39 | result = crew.kickoff() 40 | return result 41 | -------------------------------------------------------------------------------- /grandma_story/story_tasks.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Rajib Deb 3 | Descriptions: Defines all the tasks that will be used by the agents 4 | """ 5 | from crewai import Task 6 | from textwrap import dedent 7 | 8 | 9 | class CreateStories: 10 | def craft_story(self, agent): 11 | """ 12 | This task is for grandma to craft the story 13 | :param agent: The grandma agent 14 | :return: Returns the task object 15 | """ 16 | task = Task( 17 | description=dedent(f""" 18 | Craft a kid story based on an initial idea that will teach the kids good values and make them a better human 19 | """), agent=agent 20 | ) 21 | 22 | return task 23 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | langroid 2 | langroid[hf-embeddings] 3 | python-dotenv 4 | crewai 5 | duckduckgo-search 6 | wolframalpha -------------------------------------------------------------------------------- /society_of_minds/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rajib76/multi_agent/f1dd4164582c0ea6830adc8e0b6bec01816135fd/society_of_minds/__init__.py -------------------------------------------------------------------------------- /society_of_minds/mind_creator.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import os 3 | from typing import List 4 | 5 | from crewai import Agent, Task, Crew, Process 6 | from dotenv import load_dotenv 7 | from pydantic import BaseModel 8 | 9 | load_dotenv() 10 | OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') 11 | 12 | 13 | class Agents(BaseModel): 14 | agent_name: str 15 | role: str 16 | goal: str 17 | backstory: str 18 | task: str 19 | taskoutput: str 20 | 21 | 22 | class AgentList(BaseModel): 23 | agents: List[Agents] 24 | 25 | class Config: 26 | arbitrary_types_allowed = True 27 | 28 | 29 | class TheMind(BaseModel): 30 | 31 | def create_agent_tasks(self, agent_info): 32 | crew_agents = [] 33 | crew_tasks = [] 34 | for agent in agent_info: 35 | agent_name = agent["agent_name"] 36 | print(f"Creating agent {agent_name}".format(agent_name=agent_name)) 37 | role_name = agent["role"] 38 | goal = agent["goal"] 39 | backstory = agent["backstory"] 40 | task = agent["task"] 41 | task_output = agent["taskoutput"] 42 | crew_agent = Agent( 43 | role=role_name, 44 | goal=goal, 45 | verbose=False, 46 | memory=True, 47 | backstory=backstory, 48 | tools=[], 49 | allow_delegation=False 50 | ) 51 | crew_agents.append(crew_agent) 52 | print("Creating task for the agent {task} :".format(task=task)) 53 | crew_task = Task( 54 | description=( 55 | task 56 | ), 57 | expected_output=task_output, 58 | tools=[], 59 | agent=crew_agent 60 | ) 61 | crew_tasks.append(crew_task) 62 | # for agent in crew_agents: 63 | # print(agent.role) 64 | print("---------------------------------") 65 | return crew_agents, crew_tasks 66 | 67 | def create_the_society(self, task: str): 68 | # creates the smaller agents in the Mind 69 | society_of_minds = Agent( 70 | role='Mind Creator', 71 | goal='Create smaller process to achieve a complex task', 72 | verbose=False, 73 | memory=True, 74 | backstory=( 75 | "You are the creator of Mind. Each mind is made of many smaller processes which we call agents. " 76 | "You know that each mental agent by itself can only do some simple thing that needs no mind or thought at all." 77 | "Yet when we join these agents in societies, in certain very special ways, this leads to intelligence. Your " 78 | "job is to create a series of agents based on a provided complex task."), 79 | tools=[], 80 | allow_delegation=True 81 | ) 82 | 83 | # creates the tasks for the agents 84 | society_of_minds_task = Task( 85 | description=( 86 | "{task}" 87 | ), 88 | expected_output='A list of agents with their name, role, goal, backstory, task and task output in a JSON ' 89 | 'format.Only output the JSON. Keys in the JSON must be lower case and named as agent_name,role,' 90 | 'goal, backstory,task and taskoutput Do not add anything else.', 91 | tools=[], 92 | agent=society_of_minds, 93 | output_json=AgentList 94 | ) 95 | 96 | crew = Crew( 97 | agents=[society_of_minds], 98 | tasks=[society_of_minds_task], 99 | verbose=False, 100 | process=Process.sequential # Optional: Sequential task execution is default 101 | ) 102 | agents = crew.kickoff(inputs={'task': task}) 103 | agents = ast.literal_eval(agents) 104 | print(agents) 105 | crew_agents, crew_tasks = self.create_agent_tasks(agents['agents']) 106 | return crew_agents, crew_tasks 107 | 108 | # if __name__ == "__main__": 109 | # mymind = TheMind() 110 | # task = "Create a powerpoint to explain the impact of technology on climate." 111 | # agents = mymind.create_the_society(task) 112 | # print(agents) 113 | --------------------------------------------------------------------------------