├── .gitignore ├── 1_basic_agent.py ├── 2_tools.py ├── 3_state.py ├── 3a_tools_and_state.py ├── 4_streaming.py ├── 5_human_in_the_loop.py ├── 6_multi_agent.py ├── data └── 2023_canadian_budget.pdf └── pyproject.toml /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .DS_Store 3 | -------------------------------------------------------------------------------- /1_basic_agent.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | load_dotenv() 3 | 4 | from llama_index.llms.openai import OpenAI 5 | from llama_index.core.agent.workflow import AgentWorkflow 6 | 7 | def multiply(a: float, b: float) -> float: 8 | """Multiply two numbers and returns the product""" 9 | return a * b 10 | 11 | def add(a: float, b: float) -> float: 12 | """Add two numbers and returns the sum""" 13 | return a + b 14 | 15 | llm = OpenAI(model="gpt-4o-mini") 16 | 17 | workflow = AgentWorkflow.from_tools_or_functions( 18 | [multiply, add], 19 | llm=llm, 20 | system_prompt="You are an agent that can perform basic mathematical operations using tools." 21 | ) 22 | 23 | async def main(): 24 | response = await workflow.run(user_msg="What is 20+(2*4)?") 25 | print(response) 26 | 27 | if __name__ == "__main__": 28 | import asyncio 29 | asyncio.run(main()) 30 | -------------------------------------------------------------------------------- /2_tools.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | load_dotenv() 3 | 4 | from llama_index.llms.openai import OpenAI 5 | from llama_index.core.agent.workflow import AgentWorkflow 6 | from llama_index.tools.yahoo_finance import YahooFinanceToolSpec 7 | 8 | def multiply(a: float, b: float) -> float: 9 | """Multiply two numbers and returns the product""" 10 | return a * b 11 | 12 | def add(a: float, b: float) -> float: 13 | """Add two numbers and returns the sum""" 14 | return a + b 15 | 16 | llm = OpenAI(model="gpt-4o-mini") 17 | 18 | finance_tools = YahooFinanceToolSpec().to_tool_list() 19 | finance_tools.extend([multiply, add]) 20 | 21 | workflow = AgentWorkflow.from_tools_or_functions( 22 | finance_tools, 23 | llm=llm, 24 | system_prompt="You are an agent that can perform basic mathematical operations using tools." 25 | ) 26 | 27 | async def main(): 28 | response = await workflow.run(user_msg="What's the current stock price of NVIDIA?") 29 | print(response) 30 | 31 | if __name__ == "__main__": 32 | import asyncio 33 | asyncio.run(main()) 34 | -------------------------------------------------------------------------------- /3_state.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | load_dotenv() 3 | 4 | from llama_index.llms.openai import OpenAI 5 | from llama_index.core.agent.workflow import AgentWorkflow 6 | from llama_index.tools.yahoo_finance import YahooFinanceToolSpec 7 | from llama_index.core.workflow import Context 8 | from llama_index.core.workflow import JsonPickleSerializer, JsonSerializer 9 | 10 | def multiply(a: float, b: float) -> float: 11 | """Multiply two numbers and returns the product""" 12 | return a * b 13 | 14 | def add(a: float, b: float) -> float: 15 | """Add two numbers and returns the sum""" 16 | return a + b 17 | 18 | llm = OpenAI(model="gpt-4o-mini") 19 | 20 | finance_tools = YahooFinanceToolSpec().to_tool_list() 21 | finance_tools.extend([multiply, add]) 22 | 23 | workflow = AgentWorkflow.from_tools_or_functions( 24 | finance_tools, 25 | llm=llm, 26 | system_prompt="You are an agent that can perform basic mathematical operations using tools." 27 | ) 28 | 29 | # configure a context to work with our workflow 30 | ctx = Context(workflow) 31 | 32 | async def main(): 33 | response = await workflow.run(user_msg="Hi, my name is Laurie!",ctx=ctx) 34 | print(response) 35 | 36 | response2 = await workflow.run(user_msg="What's my name?",ctx=ctx) 37 | print(response2) 38 | 39 | # convert our Context to a dictionary object 40 | ctx_dict = ctx.to_dict(serializer=JsonSerializer()) 41 | 42 | # create a new Context from the dictionary 43 | restored_ctx = Context.from_dict( 44 | workflow, ctx_dict, serializer=JsonSerializer() 45 | ) 46 | 47 | response3 = await workflow.run(user_msg="What's my name?",ctx=restored_ctx) 48 | print(response3) 49 | 50 | if __name__ == "__main__": 51 | import asyncio 52 | asyncio.run(main()) 53 | -------------------------------------------------------------------------------- /3a_tools_and_state.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | load_dotenv() 3 | 4 | from llama_index.llms.openai import OpenAI 5 | from llama_index.core.agent.workflow import AgentWorkflow 6 | from llama_index.tools.yahoo_finance import YahooFinanceToolSpec 7 | from llama_index.core.workflow import Context 8 | 9 | llm = OpenAI(model="gpt-4o-mini") 10 | 11 | async def set_name(ctx: Context, name: str) -> str: 12 | state = await ctx.get("state") 13 | state["name"] = name 14 | await ctx.set("state", state) 15 | return f"Name set to {name}" 16 | 17 | workflow = AgentWorkflow.from_tools_or_functions( 18 | [set_name], 19 | llm=llm, 20 | system_prompt="You are a helpful assistant that can set a name.", 21 | initial_state={"name": "unset"}, 22 | ) 23 | 24 | async def main(): 25 | ctx = Context(workflow) 26 | 27 | # check if it knows a name before setting it 28 | response = await workflow.run(user_msg="What's my name?", ctx=ctx) 29 | print(str(response)) 30 | 31 | # set the name using a tool 32 | response2 = await workflow.run(user_msg="My name is Laurie", ctx=ctx) 33 | print(str(response2)) 34 | 35 | # retrieve the value from the state directly 36 | state = await ctx.get("state") 37 | print("Name as stored in state: ",state["name"]) 38 | 39 | if __name__ == "__main__": 40 | import asyncio 41 | asyncio.run(main()) 42 | -------------------------------------------------------------------------------- /4_streaming.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | load_dotenv() 3 | 4 | from llama_index.llms.openai import OpenAI 5 | from llama_index.core.agent.workflow import AgentWorkflow 6 | from llama_index.core.workflow import Context 7 | from llama_index.tools.tavily_research import TavilyToolSpec 8 | import os 9 | from llama_index.core.agent.workflow import ( 10 | AgentInput, 11 | AgentOutput, 12 | ToolCall, 13 | ToolCallResult, 14 | AgentStream, 15 | ) 16 | 17 | llm = OpenAI(model="gpt-4o-mini") 18 | 19 | tavily_tool = TavilyToolSpec( api_key=os.getenv("TAVILY_API_KEY") ) 20 | 21 | workflow = AgentWorkflow.from_tools_or_functions( 22 | tavily_tool.to_tool_list(), 23 | llm=llm, 24 | system_prompt="You're a helpful assistant that can search the web for information." 25 | ) 26 | 27 | async def main(): 28 | handler = workflow.run(user_msg="What's the weather like in San Francisco?") 29 | 30 | # handle streaming output 31 | async for event in handler.stream_events(): 32 | if isinstance(event, AgentStream): 33 | print(event.delta, end="", flush=True) 34 | elif isinstance(event, AgentInput): 35 | print("Agent input: ", event.input) # the current input messages 36 | print("Agent name:", event.current_agent_name) # the current agent name 37 | elif isinstance(event, AgentOutput): 38 | print("Agent output: ", event.response) # the current full response 39 | print("Tool calls made: ", event.tool_calls) # the selected tool calls, if any 40 | print("Raw LLM response: ", event.raw) # the raw llm api response 41 | elif isinstance(event, ToolCallResult): 42 | print("Tool called: ", event.tool_name) # the tool name 43 | print("Arguments to the tool: ", event.tool_kwargs) # the tool kwargs 44 | print("Tool output: ", event.tool_output) # the tool output 45 | 46 | # print final output 47 | print(str(await handler)) 48 | 49 | if __name__ == "__main__": 50 | import asyncio 51 | asyncio.run(main()) 52 | -------------------------------------------------------------------------------- /5_human_in_the_loop.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | load_dotenv() 3 | 4 | from llama_index.llms.openai import OpenAI 5 | from llama_index.core.agent.workflow import AgentWorkflow 6 | from llama_index.core.workflow import Context 7 | from llama_index.core.workflow import ( 8 | InputRequiredEvent, 9 | HumanResponseEvent, 10 | ) 11 | 12 | llm = OpenAI(model="gpt-4o-mini") 13 | 14 | # a tool that performs a dangerous task 15 | async def dangerous_task(ctx: Context) -> str: 16 | """A dangerous task that requires human confirmation.""" 17 | 18 | # emit an event to the external stream to be captured 19 | ctx.write_event_to_stream( 20 | InputRequiredEvent( 21 | prefix="Are you sure you want to proceed? ", 22 | user_name="Laurie", 23 | ) 24 | ) 25 | 26 | # wait until we see a HumanResponseEvent 27 | response = await ctx.wait_for_event( 28 | HumanResponseEvent, requirements={"user_name": "Laurie"} 29 | ) 30 | 31 | # act on the input from the event 32 | if response.response.strip().lower() == "yes": 33 | return "Dangerous task completed successfully." 34 | else: 35 | return "Dangerous task aborted." 36 | 37 | workflow = AgentWorkflow.from_tools_or_functions( 38 | [dangerous_task], 39 | llm=llm, 40 | system_prompt="You are a helpful assistant that can perform dangerous tasks.", 41 | ) 42 | async def main(): 43 | handler = workflow.run(user_msg="I want to proceed with the dangerous task.") 44 | 45 | async for event in handler.stream_events(): 46 | # capture InputRequiredEvent 47 | if isinstance(event, InputRequiredEvent): 48 | # capture keyboard input 49 | response = input(event.prefix) 50 | # send our response back 51 | handler.ctx.send_event( 52 | HumanResponseEvent( 53 | response=response, 54 | user_name=event.user_name, 55 | ) 56 | ) 57 | 58 | response = await handler 59 | print(str(response)) 60 | 61 | 62 | if __name__ == "__main__": 63 | import asyncio 64 | asyncio.run(main()) 65 | -------------------------------------------------------------------------------- /6_multi_agent.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | load_dotenv() 3 | 4 | from llama_index.llms.openai import OpenAI 5 | from llama_index.core.agent.workflow import AgentWorkflow 6 | from llama_index.core.workflow import Context 7 | from llama_index.core.agent.workflow import ( 8 | AgentOutput, 9 | ToolCall, 10 | ToolCallResult, 11 | ) 12 | from llama_index.tools.tavily_research import TavilyToolSpec 13 | from llama_index.core.agent.workflow import FunctionAgent 14 | import os 15 | 16 | llm = OpenAI(model="gpt-4o-mini") 17 | 18 | tavily_tool = TavilyToolSpec( api_key=os.getenv("TAVILY_API_KEY") ) 19 | search_web = tavily_tool.to_tool_list()[0] 20 | 21 | async def record_notes(ctx: Context, notes: str, notes_title: str) -> str: 22 | """Useful for recording notes on a given topic.""" 23 | current_state = await ctx.get("state") 24 | if "research_notes" not in current_state: 25 | current_state["research_notes"] = {} 26 | current_state["research_notes"][notes_title] = notes 27 | await ctx.set("state", current_state) 28 | return "Notes recorded." 29 | 30 | 31 | async def write_report(ctx: Context, report_content: str) -> str: 32 | """Useful for writing a report on a given topic.""" 33 | current_state = await ctx.get("state") 34 | current_state["report_content"] = report_content 35 | await ctx.set("state", current_state) 36 | return "Report written." 37 | 38 | 39 | async def review_report(ctx: Context, review: str) -> str: 40 | """Useful for reviewing a report and providing feedback.""" 41 | current_state = await ctx.get("state") 42 | current_state["review"] = review 43 | await ctx.set("state", current_state) 44 | return "Report reviewed." 45 | 46 | research_agent = FunctionAgent( 47 | name="ResearchAgent", 48 | description="Useful for searching the web for information on a given topic and recording notes on the topic.", 49 | system_prompt=( 50 | "You are the ResearchAgent that can search the web for information on a given topic and record notes on the topic. " 51 | "Once notes are recorded and you are satisfied, you should hand off control to the WriteAgent to write a report on the topic." 52 | ), 53 | llm=llm, 54 | tools=[search_web, record_notes], 55 | can_handoff_to=["WriteAgent"], 56 | ) 57 | 58 | write_agent = FunctionAgent( 59 | name="WriteAgent", 60 | description="Useful for writing a report on a given topic.", 61 | system_prompt=( 62 | "You are the WriteAgent that can write a report on a given topic. " 63 | "Your report should be in a markdown format. The content should be grounded in the research notes. " 64 | "Once the report is written, you should get feedback at least once from the ReviewAgent." 65 | ), 66 | llm=llm, 67 | tools=[write_report], 68 | can_handoff_to=["ReviewAgent", "ResearchAgent"], 69 | ) 70 | 71 | review_agent = FunctionAgent( 72 | name="ReviewAgent", 73 | description="Useful for reviewing a report and providing feedback.", 74 | system_prompt=( 75 | "You are the ReviewAgent that can review a report and provide feedback. " 76 | "Your feedback should either approve the current report or request changes for the WriteAgent to implement." 77 | ), 78 | llm=llm, 79 | tools=[review_report], 80 | can_handoff_to=["WriteAgent"], 81 | ) 82 | 83 | agent_workflow = AgentWorkflow( 84 | agents=[research_agent, write_agent, review_agent], 85 | root_agent=research_agent.name, 86 | initial_state={ 87 | "research_notes": {}, 88 | "report_content": "Not written yet.", 89 | "review": "Review required.", 90 | }, 91 | ) 92 | 93 | async def main(): 94 | handler = agent_workflow.run(user_msg=""" 95 | Write me a report on the history of the web. Briefly describe the history 96 | of the world wide web, including the development of the internet and the 97 | development of the web, including 21st century developments. 98 | """) 99 | 100 | current_agent = None 101 | current_tool_calls = "" 102 | async for event in handler.stream_events(): 103 | if ( 104 | hasattr(event, "current_agent_name") 105 | and event.current_agent_name != current_agent 106 | ): 107 | current_agent = event.current_agent_name 108 | print(f"\n{'='*50}") 109 | print(f"🤖 Agent: {current_agent}") 110 | print(f"{'='*50}\n") 111 | elif isinstance(event, AgentOutput): 112 | if event.response.content: 113 | print("📤 Output:", event.response.content) 114 | if event.tool_calls: 115 | print( 116 | "🛠️ Planning to use tools:", 117 | [call.tool_name for call in event.tool_calls], 118 | ) 119 | elif isinstance(event, ToolCallResult): 120 | print(f"🔧 Tool Result ({event.tool_name}):") 121 | print(f" Arguments: {event.tool_kwargs}") 122 | print(f" Output: {event.tool_output}") 123 | elif isinstance(event, ToolCall): 124 | print(f"🔨 Calling Tool: {event.tool_name}") 125 | print(f" With arguments: {event.tool_kwargs}") 126 | 127 | if __name__ == "__main__": 128 | import asyncio 129 | asyncio.run(main()) 130 | -------------------------------------------------------------------------------- /data/2023_canadian_budget.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/run-llama/python-agents-tutorial/36a83eb6e940939f67f84dea33c8bf147f9d3a18/data/2023_canadian_budget.pdf -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "agents-tutorial" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["Laurie Voss "] 6 | readme = "README.md" 7 | 8 | [tool.poetry.dependencies] 9 | python = "^3.11" 10 | 11 | 12 | [build-system] 13 | requires = ["poetry-core"] 14 | build-backend = "poetry.core.masonry.api" 15 | --------------------------------------------------------------------------------